repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
gdub/django | django/utils/lru_cache.py | 270 | 7647 | try:
from functools import lru_cache
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| bsd-3-clause |
AlvaroRQ/prototipo | test_objectdetector.py | 1 | 1138 | #!/usr/bin/env python2.7
import os
import argparse
import time
from ownLibraries.object_detection.objectdetect import ObjectDetection
if __name__ == '__main__':
# For pass argument file
parser = argparse.ArgumentParser(description='Add folder to process')
parser.add_argument('-f', '--checkImage', default = None, type=str, help="Add path to the folder to check")
args = parser.parse_args()
if args.checkImage != None:
rutaDeTrabajo = args.checkImage
print('Ruta a limpiar: {}'.format(rutaDeTrabajo))
else:
print('No se introdujo folder a revisar')
# Instantiate detection class
detect = ObjectDetection()
# Load the images into the target directory
fotografias = [f for f in os.listdir(rutaDeTrabajo) if '.jpg' in f]
print('Analizando {} imagenes'.format(len(fotografias)))
for fotografia in fotografias:
path_to_original_image = rutaDeTrabajo+'/'+fotografia
tiempoMedicion = time.time()
results = detect.roi_results(path_to_original_image)
print('TOTAL TIME IS...: ', time.time() - tiempoMedicion)
print(results) | gpl-3.0 |
cooljeanius/emacs | build-aux/vcstocl/frontend_c.py | 1 | 30556 | # The C Parser.
# Copyright (C) 2019-2020 Free Software Foundation, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from enum import Enum
import re
from vcstocl.misc_util import *
class block_flags(Enum):
''' Flags for the code block.
'''
else_block = 1
macro_defined = 2
macro_redefined = 3
class block_type(Enum):
''' Type of code block.
'''
file = 1
macro_cond = 2
macro_def = 3
macro_undef = 4
macro_include = 5
macro_info = 6
decl = 7
func = 8
composite = 9
macrocall = 10
fndecl = 11
assign = 12
struct = 13
union = 14
enum = 15
# A dictionary describing what each action (add, modify, delete) show up as in
# the ChangeLog output.
actions = {0:{'new': 'New', 'mod': 'Modified', 'del': 'Remove'},
block_type.file:{'new': 'New file', 'mod': 'Modified file',
'del': 'Remove file'},
block_type.macro_cond:{'new': 'New', 'mod': 'Modified',
'del': 'Remove'},
block_type.macro_def:{'new': 'New', 'mod': 'Modified',
'del': 'Remove'},
block_type.macro_include:{'new': 'Include file', 'mod': 'Modified',
'del': 'Remove include'},
block_type.macro_info:{'new': 'New preprocessor message',
'mod': 'Modified', 'del': 'Remove'},
block_type.decl:{'new': 'New', 'mod': 'Modified', 'del': 'Remove'},
block_type.func:{'new': 'New function', 'mod': 'Modified function',
'del': 'Remove function'},
block_type.composite:{'new': 'New', 'mod': 'Modified',
'del': 'Remove'},
block_type.struct:{'new': 'New struct', 'mod': 'Modified struct',
'del': 'Remove struct'},
block_type.union:{'new': 'New union', 'mod': 'Modified union',
'del': 'Remove union'},
block_type.enum:{'new': 'New enum', 'mod': 'Modified enum',
'del': 'Remove enum'},
block_type.macrocall:{'new': 'New', 'mod': 'Modified',
'del': 'Remove'},
block_type.fndecl:{'new': 'New function', 'mod': 'Modified',
'del': 'Remove'},
block_type.assign:{'new': 'New', 'mod': 'Modified', 'del': 'Remove'}}
def new_block(name, type, contents, parent, flags = 0):
''' Create a new code block with the parent as PARENT.
The code block is a basic structure around which the tree representation of
the source code is built. It has the following attributes:
- name: A name to refer it by in the ChangeLog
- type: Any one of the following types in BLOCK_TYPE.
- contents: The contents of the block. For a block of types file or
macro_cond, this would be a list of blocks that it nests. For other types
it is a list with a single string specifying its contents.
- parent: This is the parent of the current block, useful in setting up
#elif or #else blocks in the tree.
- flags: A special field to indicate some properties of the block. See
BLOCK_FLAGS for values.
'''
block = {}
block['matched'] = False
block['name'] = name
block['type'] = type
block['contents'] = contents
block['parent'] = parent
if parent:
parent['contents'].append(block)
block['flags'] = flags
block['actions'] = actions[type]
return block
class ExprParser:
''' Parent class of all of the C expression parsers.
It is necessary that the children override the parse_line() method.
'''
ATTRIBUTE = r'(((__attribute__\s*\(\([^;]+\)\))|(asm\s*\([?)]+\)))\s*)*'
def __init__(self, project_quirks, debug):
self.project_quirks = project_quirks
self.debug = debug
def fast_forward_scope(self, cur, op, loc):
''' Consume lines in a code block.
Consume all lines of a block of code such as a composite type declaration or
a function declaration.
- CUR is the string to consume this expression from
- OP is the string array for the file
- LOC is the first unread location in CUR
- Returns: The next location to be read in the array as well as the updated
value of CUR, which will now have the body of the function or composite
type.
'''
nesting = cur.count('{') - cur.count('}')
while nesting > 0 and loc < len(op):
cur = cur + ' ' + op[loc]
nesting = nesting + op[loc].count('{')
nesting = nesting - op[loc].count('}')
loc = loc + 1
return (cur, loc)
def parse_line(self, cur, op, loc, code, macros):
''' The parse method should always be overridden by the child.
'''
raise
class FuncParser(ExprParser):
REGEX = re.compile(ExprParser.ATTRIBUTE + r'\s*(\w+)\s*\([^(][^{]+\)\s*{')
def parse_line(self, cur, op, loc, code, macros):
''' Parse a function.
Match a function definition.
- CUR is the string to consume this expression from
- OP is the string array for the file
- LOC is the first unread location in CUR
- CODE is the block to which we add this
- Returns: The next location to be read in the array.
'''
found = re.search(self.REGEX, cur)
if not found:
return cur, loc
name = found.group(5)
self.debug.print('FOUND FUNC: %s' % name)
# Consume everything up to the ending brace of the function.
(cur, loc) = self.fast_forward_scope(cur, op, loc)
new_block(name, block_type.func, [cur], code)
return '', loc
class CompositeParser(ExprParser):
# Composite types such as structs and unions.
REGEX = re.compile(r'(struct|union|enum)\s*(\w*)\s*{')
def parse_line(self, cur, op, loc, code, macros):
''' Parse a composite type.
Match declaration of a composite type such as a sruct or a union..
- CUR is the string to consume this expression from
- OP is the string array for the file
- LOC is the first unread location in CUR
- CODE is the block to which we add this
- Returns: The next location to be read in the array.
'''
found = re.search(self.REGEX, cur)
if not found:
return cur, loc
# Lap up all of the struct definition.
(cur, loc) = self.fast_forward_scope(cur, op, loc)
name = found.group(2)
if not name:
if 'typedef' in cur:
name = re.sub(r'.*}\s*(\w+);$', r'\1', cur)
else:
name= '<anoymous>'
ctype = found.group(1)
if ctype == 'struct':
blocktype = block_type.struct
if ctype == 'enum':
blocktype = block_type.enum
if ctype == 'union':
blocktype = block_type.union
new_block(name, block_type.composite, [cur], code)
return '', loc
class AssignParser(ExprParser):
# Static assignments.
REGEX = re.compile(r'(\w+)\s*(\[[^\]]*\])*\s*([^\s]*attribute[\s\w()]+)?\s*=')
def parse_line(self, cur, op, loc, code, macros):
''' Parse an assignment statement.
This includes array assignments.
- CUR is the string to consume this expression from
- OP is the string array for the file
- LOC is the first unread location in CUR
- CODE is the block to which we add this
- Returns: The next location to be read in the array.
'''
found = re.search(self.REGEX, cur)
if not found:
return cur, loc
name = found.group(1)
self.debug.print('FOUND ASSIGN: %s' % name)
# Lap up everything up to semicolon.
while ';' not in cur and loc < len(op):
cur = op[loc]
loc = loc + 1
new_block(name, block_type.assign, [cur], code)
return '', loc
class DeclParser(ExprParser):
# Function pointer typedefs.
TYPEDEF_FN_RE = re.compile(r'\(\*(\w+)\)\s*\([^)]+\);')
# Simple decls.
DECL_RE = re.compile(r'(\w+)(\[\w*\])*\s*' + ExprParser.ATTRIBUTE + ';')
# __typeof decls.
TYPEOF_RE = re.compile(r'__typeof\s*\([\w\s]+\)\s*(\w+)\s*' + \
ExprParser.ATTRIBUTE + ';')
# Function Declarations.
FNDECL_RE = re.compile(r'\s*(\w+)\s*\(([^\(][^;]*)?\)\s*' +
ExprParser.ATTRIBUTE + ';')
def __init__(self, regex, blocktype, project_quirks, debug):
# The regex for the current instance.
self.REGEX = regex
self.blocktype = blocktype
super().__init__(project_quirks, debug)
def parse_line(self, cur, op, loc, code, macros):
''' Parse a top level declaration.
All types of declarations except function declarations.
- CUR is the string to consume this expression from
- OP is the string array for the file
- LOC is the first unread location in CUR
- CODE is the block to which we add this function
- Returns: The next location to be read in the array.
'''
found = re.search(self.REGEX, cur)
if not found:
return cur, loc
# The name is the first group for all of the above regexes. This is a
# coincidence, so care must be taken if regexes are added or changed to
# ensure that this is true.
name = found.group(1)
self.debug.print('FOUND DECL: %s' % name)
new_block(name, self.blocktype, [cur], code)
return '', loc
class MacroParser(ExprParser):
# The macrocall_re peeks into the next line to ensure that it doesn't
# eat up a FUNC by accident. The func_re regex is also quite crude and
# only intends to ensure that the function name gets picked up
# correctly.
MACROCALL_RE = re.compile(r'(\w+)\s*(\(.*\))*$')
def parse_line(self, cur, op, loc, code, macros):
''' Parse a macro call.
Match a symbol hack macro calls that get added without semicolons.
- CUR is the string to consume this expression from
- OP is the string array for the file
- LOC is the first unread location in CUR
- CODE is the block to which we add this
- MACROS is the regex match object.
- Returns: The next location to be read in the array.
'''
# First we have the macros for symbol hacks and all macros we identified so
# far.
if cur.count('(') != cur.count(')'):
return cur, loc
if loc < len(op) and '{' in op[loc]:
return cur, loc
found = re.search(self.MACROCALL_RE, cur)
if found:
sym = found.group(1)
name = found.group(2)
if sym in macros or self.project_quirks and \
sym in self.project_quirks.C_MACROS:
self.debug.print('FOUND MACROCALL: %s (%s)' % (sym, name))
new_block(sym, block_type.macrocall, [cur], code)
return '', loc
# Next, there could be macros that get called right inside their #ifdef, but
# without the semi-colon.
if cur.strip() == code['name'].strip():
self.debug.print('FOUND MACROCALL (without brackets): %s' % (cur))
new_block(cur, block_type.macrocall, [cur], code)
return '',loc
return cur, loc
class Frontend:
''' The C Frontend implementation.
'''
KNOWN_MACROS = []
def __init__(self, project_quirks, debug):
self.op = []
self.debug = debug
self.project_quirks = project_quirks
self.c_expr_parsers = [
CompositeParser(project_quirks, debug),
AssignParser(project_quirks, debug),
DeclParser(DeclParser.TYPEOF_RE, block_type.decl,
project_quirks, debug),
DeclParser(DeclParser.TYPEDEF_FN_RE, block_type.decl,
project_quirks, debug),
DeclParser(DeclParser.FNDECL_RE, block_type.fndecl,
project_quirks, debug),
FuncParser(project_quirks, debug),
DeclParser(DeclParser.DECL_RE, block_type.decl, project_quirks,
debug),
MacroParser(project_quirks, debug)]
def remove_extern_c(self):
''' Process extern "C"/"C++" block nesting.
The extern "C" nesting does not add much value so it's safe to almost always
drop it. Also drop extern "C++"
'''
new_op = []
nesting = 0
extern_nesting = 0
for l in self.op:
if '{' in l:
nesting = nesting + 1
if re.match(r'extern\s*"C"\s*{', l):
extern_nesting = nesting
continue
if '}' in l:
nesting = nesting - 1
if nesting < extern_nesting:
extern_nesting = 0
continue
new_op.append(l)
# Now drop all extern C++ blocks.
self.op = new_op
new_op = []
nesting = 0
extern_nesting = 0
in_cpp = False
for l in self.op:
if re.match(r'extern\s*"C\+\+"\s*{', l):
nesting = nesting + 1
in_cpp = True
if in_cpp:
if '{' in l:
nesting = nesting + 1
if '}' in l:
nesting = nesting - 1
if nesting == 0:
new_op.append(l)
self.op = new_op
def remove_comments(self, op):
''' Remove comments.
Return OP by removing all comments from it.
'''
self.debug.print('REMOVE COMMENTS')
sep='\n'
opstr = sep.join(op)
opstr = re.sub(r'/\*.*?\*/', r'', opstr, flags=re.MULTILINE | re.DOTALL)
opstr = re.sub(r'\\\n', r' ', opstr, flags=re.MULTILINE | re.DOTALL)
new_op = list(filter(None, opstr.split(sep)))
return new_op
def normalize_condition(self, name):
''' Make some minor transformations on macro conditions to make them more
readable.
'''
# Negation with a redundant bracket.
name = re.sub(r'!\s*\(\s*(\w+)\s*\)', r'! \1', name)
# Pull in negation of equality.
name = re.sub(r'!\s*\(\s*(\w+)\s*==\s*(\w+)\)', r'\1 != \2', name)
# Pull in negation of inequality.
name = re.sub(r'!\s*\(\s*(\w+)\s*!=\s*(\w+)\)', r'\1 == \2', name)
# Fix simple double negation.
name = re.sub(r'!\s*\(\s*!\s*(\w+)\s*\)', r'\1', name)
# Similar, but nesting a complex expression. Because of the greedy match,
# this matches only the outermost brackets.
name = re.sub(r'!\s*\(\s*!\s*\((.*)\)\s*\)$', r'\1', name)
return name
def parse_preprocessor(self, loc, code, start = ''):
''' Parse a preprocessor directive.
In case a preprocessor condition (i.e. if/elif/else), create a new code
block to nest code into and in other cases, identify and add entities suchas
include files, defines, etc.
- OP is the string array for the file
- LOC is the first unread location in CUR
- CODE is the block to which we add this function
- START is the string that should continue to be expanded in case we step
into a new macro scope.
- Returns: The next location to be read in the array.
'''
cur = self.op[loc]
loc = loc + 1
endblock = False
self.debug.print('PARSE_MACRO: %s' % cur)
# Remove the # and strip spaces again.
cur = cur[1:].strip()
# Include file.
if cur.find('include') == 0:
m = re.search(r'include\s*["<]?([^">]+)[">]?', cur)
new_block(m.group(1), block_type.macro_include, [cur], code)
# Macro definition.
if cur.find('define') == 0:
m = re.search(r'define\s+([a-zA-Z0-9_]+)', cur)
name = m.group(1)
exists = False
# Find out if this is a redefinition.
for c in code['contents']:
if c['name'] == name and c['type'] == block_type.macro_def:
c['flags'] = block_flags.macro_redefined
exists = True
break
if not exists:
new_block(m.group(1), block_type.macro_def, [cur], code,
block_flags.macro_defined)
# Add macros as we encounter them.
self.KNOWN_MACROS.append(m.group(1))
# Macro undef.
if cur.find('undef') == 0:
m = re.search(r'undef\s+([a-zA-Z0-9_]+)', cur)
new_block(m.group(1), block_type.macro_def, [cur], code)
# #error and #warning macros.
if cur.find('error') == 0 or cur.find('warning') == 0:
m = re.search(r'(error|warning)\s+"?(.*)"?', cur)
if m:
name = m.group(2)
else:
name = '<blank>'
new_block(name, block_type.macro_info, [cur], code)
# Start of an #if or #ifdef block.
elif cur.find('if') == 0:
rem = re.sub(r'ifndef', r'!', cur).strip()
rem = re.sub(r'(ifdef|defined|if)', r'', rem).strip()
rem = self.normalize_condition(rem)
ifdef = new_block(rem, block_type.macro_cond, [], code)
ifdef['headcond'] = ifdef
ifdef['start'] = start
loc = self.parse_line(loc, ifdef, start)
# End the previous #if/#elif and begin a new block.
elif cur.find('elif') == 0 and code['parent']:
rem = self.normalize_condition(re.sub(r'(elif|defined)', r'', cur).strip())
# The #else and #elif blocks should go into the current block's parent.
ifdef = new_block(rem, block_type.macro_cond, [], code['parent'])
ifdef['headcond'] = code['headcond']
loc = self.parse_line(loc, ifdef, code['headcond']['start'])
endblock = True
# End the previous #if/#elif and begin a new block.
elif cur.find('else') == 0 and code['parent']:
name = self.normalize_condition('!(' + code['name'] + ')')
ifdef = new_block(name, block_type.macro_cond, [], code['parent'],
block_flags.else_block)
ifdef['headcond'] = code['headcond']
loc = self.parse_line(loc, ifdef, code['headcond']['start'])
endblock = True
elif cur.find('endif') == 0 and code['parent']:
# Insert an empty else block if there isn't one.
if code['flags'] != block_flags.else_block:
name = self.normalize_condition('!(' + code['name'] + ')')
ifdef = new_block(name, block_type.macro_cond, [], code['parent'],
block_flags.else_block)
ifdef['headcond'] = code['headcond']
loc = self.parse_line(loc - 1, ifdef, code['headcond']['start'])
endblock = True
return (loc, endblock)
def parse_c_expr(self, cur, loc, code):
''' Parse a C expression.
CUR is the string to be parsed, which continues to grow until a match is
found. OP is the string array and LOC is the first unread location in the
string array. CODE is the block in which any identified expressions should
be added.
'''
self.debug.print('PARSING: %s' % cur)
for p in self.c_expr_parsers:
cur, loc = p.parse_line(cur, self.op, loc, code, self.KNOWN_MACROS)
if not cur:
break
return cur, loc
def expand_problematic_macros(self, cur):
''' Replace problem macros with their substitutes in CUR.
'''
for p in self.project_quirks.MACRO_QUIRKS:
cur = re.sub(p['orig'], p['sub'], cur)
return cur
def parse_line(self, loc, code, start = ''):
'''
Parse the file line by line. The function assumes a mostly GNU coding
standard compliant input so it might barf with anything that is eligible for
the Obfuscated C code contest.
The basic idea of the parser is to identify macro conditional scopes and
definitions, includes, etc. and then parse the remaining C code in the
context of those macro scopes. The parser does not try to understand the
semantics of the code or even validate its syntax. It only records high
level symbols in the source and makes a tree structure to indicate the
declaration/definition of those symbols and their scope in the macro
definitions.
OP is the string array.
LOC is the first unparsed line.
CODE is the block scope within which the parsing is currently going on.
START is the string with which this parsing should start.
'''
cur = start
endblock = False
saved_cur = ''
saved_loc = 0
endblock_loc = loc
while loc < len(self.op):
nextline = self.op[loc]
# Macros.
if nextline[0] == '#':
(loc, endblock) = self.parse_preprocessor(loc, code, cur)
if endblock:
endblock_loc = loc
# Rest of C Code.
else:
cur = cur + ' ' + nextline
cur = self.expand_problematic_macros(cur).strip()
cur, loc = self.parse_c_expr(cur, loc + 1, code)
if endblock and not cur:
# If we are returning from the first #if block, we want to proceed
# beyond the current block, not repeat it for any preceding blocks.
if code['headcond'] == code:
return loc
else:
return endblock_loc
return loc
def drop_empty_blocks(self, tree):
''' Drop empty macro conditional blocks.
'''
newcontents = []
for x in tree['contents']:
if x['type'] != block_type.macro_cond or len(x['contents']) > 0:
newcontents.append(x)
for t in newcontents:
if t['type'] == block_type.macro_cond:
self.drop_empty_blocks(t)
tree['contents'] = newcontents
def consolidate_tree_blocks(self, tree):
''' Consolidate common macro conditional blocks.
Get macro conditional blocks at the same level but scatterred across the
file together into a single common block to allow for better comparison.
'''
# Nothing to do for non-nesting blocks.
if tree['type'] != block_type.macro_cond \
and tree['type'] != block_type.file:
return
# Now for nesting blocks, get the list of unique condition names and
# consolidate code under them. The result also bunches up all the
# conditions at the top.
newcontents = []
macros = [x for x in tree['contents'] \
if x['type'] == block_type.macro_cond]
macro_names = sorted(set([x['name'] for x in macros]))
for m in macro_names:
nc = [x['contents'] for x in tree['contents'] if x['name'] == m \
and x['type'] == block_type.macro_cond]
b = new_block(m, block_type.macro_cond, sum(nc, []), tree)
self.consolidate_tree_blocks(b)
newcontents.append(b)
newcontents.extend([x for x in tree['contents'] \
if x['type'] != block_type.macro_cond])
tree['contents'] = newcontents
def compact_tree(self, tree):
''' Try to reduce the tree to its minimal form.
A source code tree in its simplest form may have a lot of duplicated
information that may be difficult to compare and come up with a minimal
difference.
'''
# First, drop all empty blocks.
self.drop_empty_blocks(tree)
# Macro conditions that nest the entire file aren't very interesting. This
# should take care of the header guards.
if tree['type'] == block_type.file \
and len(tree['contents']) == 1 \
and tree['contents'][0]['type'] == block_type.macro_cond:
tree['contents'] = tree['contents'][0]['contents']
# Finally consolidate all macro conditional blocks.
self.consolidate_tree_blocks(tree)
def parse(self, op):
''' File parser.
Parse the input array of lines OP and generate a tree structure to
represent the file. This tree structure is then used for comparison between
the old and new file.
'''
self.KNOWN_MACROS = []
tree = new_block('', block_type.file, [], None)
self.op = self.remove_comments(op)
self.remove_extern_c()
self.op = [re.sub(r'#\s+', '#', x) for x in self.op]
self.parse_line(0, tree)
self.compact_tree(tree)
self.dump_tree(tree, 0)
return tree
def print_change(self, tree, action, prologue = ''):
''' Print the nature of the differences found in the tree compared to the
other tree. TREE is the tree that changed, action is what the change was
(Added, Removed, Modified) and prologue specifies the macro scope the change
is in. The function calls itself recursively for all macro condition tree
nodes.
'''
if tree['type'] != block_type.macro_cond:
print('\t%s(%s): %s.' % (prologue, tree['name'], action))
return
prologue = '%s[%s]' % (prologue, tree['name'])
for t in tree['contents']:
if t['type'] == block_type.macro_cond:
self.print_change(t, action, prologue)
else:
print('\t%s(%s): %s.' % (prologue, t['name'], action))
def compare_trees(self, left, right, prologue = ''):
''' Compare two trees and print the difference.
This routine is the entry point to compare two trees and print out their
differences. LEFT and RIGHT will always have the same name and type,
starting with block_type.file and '' at the top level.
'''
if left['type'] == block_type.macro_cond or left['type'] == block_type.file:
if left['type'] == block_type.macro_cond:
prologue = '%s[%s]' % (prologue, left['name'])
# Make sure that everything in the left tree exists in the right tree.
for cl in left['contents']:
found = False
for cr in right['contents']:
if not cl['matched'] and not cr['matched'] and \
cl['name'] == cr['name'] and cl['type'] == cr['type']:
cl['matched'] = cr['matched'] = True
self.compare_trees(cl, cr, prologue)
found = True
break
if not found:
self.print_change(cl, cl['actions']['del'], prologue)
# ... and vice versa. This time we only need to look at unmatched
# contents.
for cr in right['contents']:
if not cr['matched']:
self.print_change(cr, cr['actions']['new'], prologue)
else:
if left['contents'] != right['contents']:
self.print_change(left, left['actions']['mod'], prologue)
def dump_tree(self, tree, indent):
''' Print the entire tree.
'''
if not self.debug.debug:
return
if tree['type'] == block_type.macro_cond or tree['type'] == block_type.file:
print('%sScope: %s' % (' ' * indent, tree['name']))
for c in tree['contents']:
self.dump_tree(c, indent + 4)
print('%sEndScope: %s' % (' ' * indent, tree['name']))
else:
if tree['type'] == block_type.func:
print('%sFUNC: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.composite:
print('%sCOMPOSITE: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.assign:
print('%sASSIGN: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.fndecl:
print('%sFNDECL: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.decl:
print('%sDECL: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.macrocall:
print('%sMACROCALL: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.macro_def:
print('%sDEFINE: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.macro_include:
print('%sINCLUDE: %s' % (' ' * indent, tree['name']))
elif tree['type'] == block_type.macro_undef:
print('%sUNDEF: %s' % (' ' * indent, tree['name']))
else:
print('%sMACRO LEAF: %s' % (' ' * indent, tree['name']))
def compare(self, oldfile, newfile):
''' Entry point for the C backend.
Parse the two files into trees and compare them. Print the result of the
comparison in the ChangeLog-like format.
'''
self.debug.print('LEFT TREE')
self.debug.print('-' * 80)
left = self.parse(oldfile)
self.debug.print('RIGHT TREE')
self.debug.print('-' * 80)
right = self.parse(newfile)
self.compare_trees(left, right)
| gpl-3.0 |
apbard/scipy | scipy/sparse/linalg/dsolve/setup.py | 43 | 1728 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join, dirname
import sys
import os
import glob
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
from scipy._build_utils import get_sgemv_fix
from scipy._build_utils import numpy_nodepr_api
config = Configuration('dsolve',parent_package,top_path)
config.add_data_dir('tests')
lapack_opt = get_info('lapack_opt',notfound_action=2)
if sys.platform == 'win32':
superlu_defs = [('NO_TIMER',1)]
else:
superlu_defs = []
superlu_defs.append(('USE_VENDOR_BLAS',1))
superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
sources = list(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
config.add_library('superlu_src',
sources=sources,
macros=superlu_defs,
include_dirs=[superlu_src],
)
# Extension
ext_sources = ['_superlumodule.c',
'_superlu_utils.c',
'_superluobject.c']
ext_sources += get_sgemv_fix(lapack_opt)
config.add_extension('_superlu',
sources=ext_sources,
libraries=['superlu_src'],
depends=(sources + headers),
extra_info=lapack_opt,
**numpy_nodepr_api
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
nathanial/lettuce | tests/integration/lib/Django-1.3/django/contrib/databrowse/sites.py | 329 | 5628 | from django import http
from django.db import models
from django.contrib.databrowse.datastructures import EasyModel
from django.shortcuts import render_to_response
from django.utils.safestring import mark_safe
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class DatabrowsePlugin(object):
def urls(self, plugin_name, easy_instance_field):
"""
Given an EasyInstanceField object, returns a list of URLs for this
plugin's views of this object. These URLs should be absolute.
Returns None if the EasyInstanceField object doesn't get a
list of plugin-specific URLs.
"""
return None
def model_index_html(self, request, model, site):
"""
Returns a snippet of HTML to include on the model index page.
"""
return ''
def model_view(self, request, model_databrowse, url):
"""
Handles main URL routing for a plugin's model-specific pages.
"""
raise NotImplementedError
class ModelDatabrowse(object):
plugins = {}
def __init__(self, model, site):
self.model = model
self.site = site
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'objects/3'.
"""
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.main_view(request)
try:
plugin_name, rest_of_url = url.split('/', 1)
except ValueError: # need more than 1 value to unpack
plugin_name, rest_of_url = url, None
try:
plugin = self.plugins[plugin_name]
except KeyError:
raise http.Http404('A plugin with the requested name does not exist.')
return plugin.model_view(request, self, rest_of_url)
def main_view(self, request):
easy_model = EasyModel(self.site, self.model)
html_snippets = mark_safe(u'\n'.join([p.model_index_html(request, self.model, self.site) for p in self.plugins.values()]))
return render_to_response('databrowse/model_detail.html', {
'model': easy_model,
'root_url': self.site.root_url,
'plugin_html': html_snippets,
})
class DatabrowseSite(object):
def __init__(self):
self.registry = {} # model_class -> databrowse_class
self.root_url = None
def register(self, model_or_iterable, databrowse_class=None, **options):
"""
Registers the given model(s) with the given databrowse site.
The model(s) should be Model classes, not instances.
If a databrowse class isn't given, it will use DefaultModelDatabrowse
(the default databrowse options).
If a model is already registered, this will raise AlreadyRegistered.
"""
databrowse_class = databrowse_class or DefaultModelDatabrowse
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self.registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
self.registry[model] = databrowse_class
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if issubclass(model_or_iterable, models.Model):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self.registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self.registry[model]
def root(self, request, url):
"""
Handles main URL routing for the databrowse app.
`url` is the remainder of the URL -- e.g. 'comments/comment/'.
"""
self.root_url = request.path[:len(request.path) - len(url)]
url = url.rstrip('/') # Trim trailing slash, if it exists.
if url == '':
return self.index(request)
elif '/' in url:
return self.model_page(request, *url.split('/', 2))
raise http.Http404('The requested databrowse page does not exist.')
def index(self, request):
m_list = [EasyModel(self, m) for m in self.registry.keys()]
return render_to_response('databrowse/homepage.html', {'model_list': m_list, 'root_url': self.root_url})
def model_page(self, request, app_label, model_name, rest_of_url=None):
"""
Handles the model-specific functionality of the databrowse site, delegating
to the appropriate ModelDatabrowse class.
"""
model = models.get_model(app_label, model_name)
if model is None:
raise http.Http404("App %r, model %r, not found." % (app_label, model_name))
try:
databrowse_class = self.registry[model]
except KeyError:
raise http.Http404("This model exists but has not been registered with databrowse.")
return databrowse_class(model, self).root(request, rest_of_url)
site = DatabrowseSite()
from django.contrib.databrowse.plugins.calendars import CalendarPlugin
from django.contrib.databrowse.plugins.objects import ObjectDetailPlugin
from django.contrib.databrowse.plugins.fieldchoices import FieldChoicePlugin
class DefaultModelDatabrowse(ModelDatabrowse):
plugins = {'objects': ObjectDetailPlugin(), 'calendars': CalendarPlugin(), 'fields': FieldChoicePlugin()}
| gpl-3.0 |
bassettsj/dmc-three.js | utils/exporters/blender/2.63/scripts/addons/io_mesh_threejs/import_threejs.py | 23 | 16503 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
Blender importer for Three.js (ASCII JSON format).
"""
import os
import time
import json
import bpy
import mathutils
from mathutils.geometry import tesselate_polygon
from bpy_extras.image_utils import load_image
# #####################################################
# Generators
# #####################################################
def setColor(c, t):
c.r = t[0]
c.g = t[1]
c.b = t[2]
def create_texture(filename, modelpath):
name = filename
texture = bpy.data.textures.new(name, type='IMAGE')
image = load_image(filename, modelpath)
has_data = False
if image:
texture.image = image
has_data = image.has_data
return texture
def create_materials(data, modelpath):
materials = []
materials_data = data.get("materials", [])
for i, m in enumerate(materials_data):
name = m.get("DbgName", "material_%d" % i)
colorAmbient = m.get("colorAmbient", None)
colorDiffuse = m.get("colorDiffuse", None)
colorSpecular = m.get("colorSpecular", None)
alpha = m.get("transparency", 1.0)
specular_hardness = m.get("specularCoef", 0)
mapDiffuse = m.get("mapDiffuse", None)
mapLightmap = m.get("mapLightmap", None)
vertexColorsType = m.get("vertexColors", False)
useVertexColors = False
if vertexColorsType:
useVertexColors = True
material = bpy.data.materials.new(name)
material.THREE_useVertexColors = useVertexColors
if colorDiffuse:
setColor(material.diffuse_color, colorDiffuse)
material.diffuse_intensity = 1.0
if colorSpecular:
setColor(material.specular_color, colorSpecular)
material.specular_intensity = 1.0
if alpha < 1.0:
material.alpha = alpha
material.use_transparency = True
if specular_hardness:
material.specular_hardness = specular_hardness
if mapDiffuse:
texture = create_texture(mapDiffuse, modelpath)
mtex = material.texture_slots.add()
mtex.texture = texture
mtex.texture_coords = 'UV'
mtex.use = True
mtex.use_map_color_diffuse = True
material.active_texture = texture
materials.append(material)
return materials
def create_mesh_object(name, vertices, materials, face_data, flipYZ, recalculate_normals):
faces = face_data["faces"]
vertexNormals = face_data["vertexNormals"]
vertexColors = face_data["vertexColors"]
vertexUVs = face_data["vertexUVs"]
faceMaterials = face_data["materials"]
faceColors = face_data["faceColors"]
edges = []
# Create a new mesh
me = bpy.data.meshes.new(name)
me.from_pydata(vertices, edges, faces)
# Handle normals
if not recalculate_normals:
me.update(calc_edges = True)
if face_data["hasVertexNormals"]:
print("setting vertex normals")
for fi in range(len(faces)):
if vertexNormals[fi]:
#print("setting face %i with %i vertices" % (fi, len(normals[fi])))
# if me.update() is called after setting vertex normals
# setting face.use_smooth overrides these normals
# - this fixes weird shading artefacts (seems to come from sharing
# of vertices between faces, didn't find a way how to set vertex normals
# per face use of vertex as opposed to per vertex),
# - probably this just overrides all custom vertex normals
# - to preserve vertex normals from the original data
# call me.update() before setting them
me.faces[fi].use_smooth = True
if not recalculate_normals:
for j in range(len(vertexNormals[fi])):
vertexNormal = vertexNormals[fi][j]
x = vertexNormal[0]
y = vertexNormal[1]
z = vertexNormal[2]
if flipYZ:
tmp = y
y = -z
z = tmp
# flip normals (this make them look consistent with the original before export)
#x = -x
#y = -y
#z = -z
vi = me.faces[fi].vertices[j]
me.vertices[vi].normal.x = x
me.vertices[vi].normal.y = y
me.vertices[vi].normal.z = z
if recalculate_normals:
me.update(calc_edges = True)
# Handle colors
if face_data["hasVertexColors"]:
print("setting vertex colors")
me.vertex_colors.new("vertex_color_layer_0")
for fi in range(len(faces)):
if vertexColors[fi]:
face_colors = me.vertex_colors[0].data[fi]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for vi in range(len(vertexColors[fi])):
r = vertexColors[fi][vi][0]
g = vertexColors[fi][vi][1]
b = vertexColors[fi][vi][2]
face_colors[vi].r = r
face_colors[vi].g = g
face_colors[vi].b = b
elif face_data["hasFaceColors"]:
print("setting vertex colors from face colors")
me.vertex_colors.new("vertex_color_layer_0")
for fi in range(len(faces)):
if faceColors[fi]:
r = faceColors[fi][0]
g = faceColors[fi][1]
b = faceColors[fi][2]
face_colors = me.vertex_colors[0].data[fi]
face_colors = face_colors.color1, face_colors.color2, face_colors.color3, face_colors.color4
for vi in range(len(faces[fi])):
face_colors[vi].r = r
face_colors[vi].g = g
face_colors[vi].b = b
# Handle uvs
if face_data["hasVertexUVs"]:
print("setting vertex uvs")
for li, layer in enumerate(vertexUVs):
me.uv_textures.new("uv_layer_%d" % li)
for fi in range(len(faces)):
if layer[fi]:
uv_face = me.uv_textures[li].data[fi]
face_uvs = uv_face.uv1, uv_face.uv2, uv_face.uv3, uv_face.uv4
for vi in range(len(layer[fi])):
u = layer[fi][vi][0]
v = layer[fi][vi][1]
face_uvs[vi].x = u
face_uvs[vi].y = v
active_texture = materials[faceMaterials[fi]].active_texture
if active_texture:
uv_face.image = active_texture.image
# Handle materials # 1
if face_data["hasMaterials"]:
print("setting materials (mesh)")
for m in materials:
me.materials.append(m)
print("setting materials (faces)")
for fi in range(len(faces)):
if faceMaterials[fi] >= 0:
me.faces[fi].material_index = faceMaterials[fi]
# Create a new object
ob = bpy.data.objects.new(name, me)
ob.data = me # link the mesh data to the object
scene = bpy.context.scene # get the current scene
scene.objects.link(ob) # link the object into the scene
ob.location = scene.cursor_location # position object at 3d-cursor
# #####################################################
# Faces
# #####################################################
def extract_faces(data):
result = {
"faces" : [],
"materials" : [],
"faceUVs" : [],
"vertexUVs" : [],
"faceNormals" : [],
"vertexNormals" : [],
"faceColors" : [],
"vertexColors" : [],
"hasVertexNormals" : False,
"hasVertexUVs" : False,
"hasVertexColors" : False,
"hasFaceColors" : False,
"hasMaterials" : False
}
faces = data.get("faces", [])
normals = data.get("normals", [])
colors = data.get("colors", [])
offset = 0
zLength = len(faces)
# disregard empty arrays
nUvLayers = 0
for layer in data["uvs"]:
if len(layer) > 0:
nUvLayers += 1
result["faceUVs"].append([])
result["vertexUVs"].append([])
while ( offset < zLength ):
type = faces[ offset ]
offset += 1
isQuad = isBitSet( type, 0 )
hasMaterial = isBitSet( type, 1 )
hasFaceUv = isBitSet( type, 2 )
hasFaceVertexUv = isBitSet( type, 3 )
hasFaceNormal = isBitSet( type, 4 )
hasFaceVertexNormal = isBitSet( type, 5 )
hasFaceColor = isBitSet( type, 6 )
hasFaceVertexColor = isBitSet( type, 7 )
#print("type", type, "bits", isQuad, hasMaterial, hasFaceUv, hasFaceVertexUv, hasFaceNormal, hasFaceVertexNormal, hasFaceColor, hasFaceVertexColor)
result["hasVertexUVs"] = result["hasVertexUVs"] or hasFaceVertexUv
result["hasVertexNormals"] = result["hasVertexNormals"] or hasFaceVertexNormal
result["hasVertexColors"] = result["hasVertexColors"] or hasFaceVertexColor
result["hasFaceColors"] = result["hasFaceColors"] or hasFaceColor
result["hasMaterials"] = result["hasMaterials"] or hasMaterial
# vertices
if isQuad:
a = faces[ offset ]
offset += 1
b = faces[ offset ]
offset += 1
c = faces[ offset ]
offset += 1
d = faces[ offset ]
offset += 1
face = [a, b, c, d]
nVertices = 4
else:
a = faces[ offset ]
offset += 1
b = faces[ offset ]
offset += 1
c = faces[ offset ]
offset += 1
face = [a, b, c]
nVertices = 3
result["faces"].append(face)
# material
if hasMaterial:
materialIndex = faces[ offset ]
offset += 1
else:
materialIndex = -1
result["materials"].append(materialIndex)
# uvs
for i in range(nUvLayers):
faceUv = None
if hasFaceUv:
uvLayer = data["uvs"][ i ]
uvIndex = faces[ offset ]
offset += 1
u = uvLayer[ uvIndex * 2 ]
v = uvLayer[ uvIndex * 2 + 1 ]
faceUv = [u, v]
result["faceUVs"][i].append(faceUv)
if hasFaceVertexUv:
uvLayer = data["uvs"][ i ]
vertexUvs = []
for j in range(nVertices):
uvIndex = faces[ offset ]
offset += 1
u = uvLayer[ uvIndex * 2 ]
v = uvLayer[ uvIndex * 2 + 1 ]
vertexUvs.append([u, v])
result["vertexUVs"][i].append(vertexUvs)
if hasFaceNormal:
normalIndex = faces[ offset ] * 3
offset += 1
x = normals[ normalIndex ]
y = normals[ normalIndex + 1 ]
z = normals[ normalIndex + 2 ]
faceNormal = [x, y, z]
else:
faceNormal = None
result["faceNormals"].append(faceNormal)
if hasFaceVertexNormal:
vertexNormals = []
for j in range(nVertices):
normalIndex = faces[ offset ] * 3
offset += 1
x = normals[ normalIndex ]
y = normals[ normalIndex + 1 ]
z = normals[ normalIndex + 2 ]
vertexNormals.append( [x, y, z] )
else:
vertexNormals = None
result["vertexNormals"].append(vertexNormals)
if hasFaceColor:
colorIndex = faces[ offset ]
offset += 1
faceColor = hexToTuple( colors[ colorIndex ] )
else:
faceColor = None
result["faceColors"].append(faceColor)
if hasFaceVertexColor:
vertexColors = []
for j in range(nVertices):
colorIndex = faces[ offset ]
offset += 1
color = hexToTuple( colors[ colorIndex ] )
vertexColors.append( color )
else:
vertexColors = None
result["vertexColors"].append(vertexColors)
return result
# #####################################################
# Utils
# #####################################################
def hexToTuple( hexColor ):
r = (( hexColor >> 16 ) & 0xff) / 255.0
g = (( hexColor >> 8 ) & 0xff) / 255.0
b = ( hexColor & 0xff) / 255.0
return (r, g, b)
def isBitSet(value, position):
return value & ( 1 << position )
def splitArray(data, chunkSize):
result = []
chunk = []
for i in range(len(data)):
if i > 0 and i % chunkSize == 0:
result.append(chunk)
chunk = []
chunk.append(data[i])
result.append(chunk)
return result
def extract_json_string(text):
marker_begin = "var model ="
marker_end = "postMessage"
start = text.find(marker_begin) + len(marker_begin)
end = text.find(marker_end)
end = text.rfind("}", start, end)
return text[start:end+1].strip()
def get_name(filepath):
return os.path.splitext(os.path.basename(filepath))[0]
def get_path(filepath):
return os.path.dirname(filepath)
# #####################################################
# Parser
# #####################################################
def load(operator, context, filepath, option_flip_yz = True, recalculate_normals = True, option_worker = False):
print('\nimporting %r' % filepath)
time_main = time.time()
print("\tparsing JSON file...")
time_sub = time.time()
file = open(filepath, 'rU')
rawcontent = file.read()
file.close()
if option_worker:
json_string = extract_json_string(rawcontent)
else:
json_string = rawcontent
data = json.loads( json_string )
time_new = time.time()
print('parsing %.4f sec' % (time_new - time_sub))
time_sub = time_new
# flip YZ
vertices = splitArray(data["vertices"], 3)
if option_flip_yz:
vertices[:] = [(v[0], -v[2], v[1]) for v in vertices]
# extract faces
face_data = extract_faces(data)
# deselect all
bpy.ops.object.select_all(action='DESELECT')
nfaces = len(face_data["faces"])
nvertices = len(vertices)
nnormals = len(data.get("normals", [])) / 3
ncolors = len(data.get("colors", [])) / 3
nuvs = len(data.get("uvs", [])) / 2
nmaterials = len(data.get("materials", []))
print('\tbuilding geometry...\n\tfaces:%i, vertices:%i, vertex normals: %i, vertex uvs: %i, vertex colors: %i, materials: %i ...' % (
nfaces, nvertices, nnormals, nuvs, ncolors, nmaterials ))
# Create materials
materials = create_materials(data, get_path(filepath))
# Create new obj
create_mesh_object(get_name(filepath), vertices, materials, face_data, option_flip_yz, recalculate_normals)
scene = bpy.context.scene
scene.update()
time_new = time.time()
print('finished importing: %r in %.4f sec.' % (filepath, (time_new - time_main)))
return {'FINISHED'}
if __name__ == "__main__":
register()
| mit |
hilaskis/UAV_MissionPlanner | Lib/_weakrefset.py | 135 | 6389 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return sum(x() is not None for x in self.data)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
if isinstance(other, self.__class__):
self.data.update(other.data)
else:
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
# Helper functions for simple delegating methods.
def _apply(self, other, method):
if not isinstance(other, self.__class__):
other = self.__class__(other)
newdata = method(other.data)
newset = self.__class__()
newset.data = newdata
return newset
def difference(self, other):
return self._apply(other, self.data.difference)
__sub__ = difference
def difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self._apply(other, self.data.intersection)
__and__ = intersection
def intersection_update(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__lt__ = issubset
def __le__(self, other):
return self.data <= set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__gt__ = issuperset
def __ge__(self, other):
return self.data >= set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
return self._apply(other, self.data.symmetric_difference)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
return self
def union(self, other):
return self._apply(other, self.data.union)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| gpl-2.0 |
goyalankit/po-compiler | object_files/pygraphviz-1.2/setup_egg.py | 2 | 1644 | #!/usr/bin/env python
"""
An alternate setup.py script for setuptools.
If you have setuptools and run this as
>>> python setup_egg.py bdist_egg
you will get a python egg.
Use
>>> python setup_egg.py test
to run the tests.
"""
# local import, might need modification for 2.6/3.0
from setup import *
# must occur after local import to override distutils.core.setup
from setuptools import setup, Extension
extension = [Extension("pygraphviz._graphviz",
["pygraphviz/graphviz_wrap.c"],
include_dirs=include_dirs,
library_dirs=library_dirs,
runtime_library_dirs=library_dirs,
libraries=["cgraph","cdt"])]
if __name__ == "__main__":
setup(
name = release.name,
version = release.version,
author = release.authors['Hagberg'][0],
author_email = release.authors['Hagberg'][1],
description = release.description,
keywords = release.keywords,
long_description = release.long_description,
license = release.license,
platforms = release.platforms,
url = release.url,
download_url = release.download_url,
classifiers = release.classifiers,
packages = packages,
data_files = data,
ext_modules = extension,
package_data = package_data,
install_requires=['setuptools'],
include_package_data = True,
test_suite = "pygraphviz.tests.test.test_suite",
)
| apache-2.0 |
0Chencc/CTFCrackTools | Lib/Lib/lib2to3/pgen2/token.py | 353 | 1244 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# Taken from Python (r53757) and modified to include some tokens
# originally monkeypatched in by pgen2.tokenize
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
COMMENT = 52
NL = 53
RARROW = 54
ERRORTOKEN = 55
N_TOKENS = 56
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
| gpl-3.0 |
blackbliss/callme | flask/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py | 250 | 4062 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %s") % name)
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %s") % token["name"])
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %r") % name)
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %r") % value)
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %s") % name)
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%s) does not match StartTag (%s)") % (name, start_name))
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %r") % data)
if not data:
raise LintError(_("%s token with empty data") % type)
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: ") % data)
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %s") % name)
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %r") % name)
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %s") % type)
yield token
| mit |
cysuncn/python | spark/crm/PROC_O_LNA_XDXT_CUSTOMER_MEMO.py | 1 | 6179 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_O_LNA_XDXT_CUSTOMER_MEMO').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
O_CI_XDXT_CUSTOMER_MEMO = sqlContext.read.parquet(hdfs+'/O_CI_XDXT_CUSTOMER_MEMO/*')
O_CI_XDXT_CUSTOMER_MEMO.registerTempTable("O_CI_XDXT_CUSTOMER_MEMO")
#任务[12] 001-01::
V_STEP = V_STEP + 1
#先删除原表所有数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO/*.parquet")
#从昨天备表复制一份全量过来
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO_BK/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO/"+V_DT+".parquet")
F_CI_XDXT_CUSTOMER_MEMO = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_CUSTOMER_MEMO/*')
F_CI_XDXT_CUSTOMER_MEMO.registerTempTable("F_CI_XDXT_CUSTOMER_MEMO")
sql = """
SELECT A.CUSTOMERID AS CUSTOMERID
,A.SERIALNO AS SERIALNO
,A.OCCURDATE AS OCCURDATE
,A.EVENTTYPE AS EVENTTYPE
,A.STATEBEFORECHANGE AS STATEBEFORECHANGE
,A.STATEAFTERCHANGE AS STATEAFTERCHANGE
,A.EVENTNAME AS EVENTNAME
,A.EVENTSUM AS EVENTSUM
,A.EVENTCURRENCY AS EVENTCURRENCY
,A.EVENTDESCRIBE AS EVENTDESCRIBE
,A.INPUTORGID AS INPUTORGID
,A.INPUTUSERID AS INPUTUSERID
,A.INPUTDATE AS INPUTDATE
,A.REMARK AS REMARK
,A.DISPOSERESULT AS DISPOSERESULT
,A.FR_ID AS FR_ID
,V_DT AS ODS_ST_DATE
,'LNA' AS ODS_SYS_ID
FROM O_CI_XDXT_CUSTOMER_MEMO A --客户大事记表
"""
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_MEMO_INNTMP1 = sqlContext.sql(sql)
F_CI_XDXT_CUSTOMER_MEMO_INNTMP1.registerTempTable("F_CI_XDXT_CUSTOMER_MEMO_INNTMP1")
#F_CI_XDXT_CUSTOMER_MEMO = sqlContext.read.parquet(hdfs+'/F_CI_XDXT_CUSTOMER_MEMO/*')
#F_CI_XDXT_CUSTOMER_MEMO.registerTempTable("F_CI_XDXT_CUSTOMER_MEMO")
sql = """
SELECT DST.CUSTOMERID --客户编号:src.CUSTOMERID
,DST.SERIALNO --流水号:src.SERIALNO
,DST.OCCURDATE --发生日期:src.OCCURDATE
,DST.EVENTTYPE --事件类型:src.EVENTTYPE
,DST.STATEBEFORECHANGE --变更前状态:src.STATEBEFORECHANGE
,DST.STATEAFTERCHANGE --变更后状态:src.STATEAFTERCHANGE
,DST.EVENTNAME --事件名称:src.EVENTNAME
,DST.EVENTSUM --事件涉及金额:src.EVENTSUM
,DST.EVENTCURRENCY --币种:src.EVENTCURRENCY
,DST.EVENTDESCRIBE --事件描述及原因:src.EVENTDESCRIBE
,DST.INPUTORGID --登记单位:src.INPUTORGID
,DST.INPUTUSERID --登记人:src.INPUTUSERID
,DST.INPUTDATE --登记日期:src.INPUTDATE
,DST.REMARK --备注:src.REMARK
,DST.DISPOSERESULT --处理结果:src.DISPOSERESULT
,DST.FR_ID --法人号:src.FR_ID
,DST.ODS_ST_DATE --系统平台日期:src.ODS_ST_DATE
,DST.ODS_SYS_ID --系统代码:src.ODS_SYS_ID
FROM F_CI_XDXT_CUSTOMER_MEMO DST
LEFT JOIN F_CI_XDXT_CUSTOMER_MEMO_INNTMP1 SRC
ON SRC.CUSTOMERID = DST.CUSTOMERID
AND SRC.SERIALNO = DST.SERIALNO
WHERE SRC.CUSTOMERID IS NULL """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
F_CI_XDXT_CUSTOMER_MEMO_INNTMP2 = sqlContext.sql(sql)
dfn="F_CI_XDXT_CUSTOMER_MEMO/"+V_DT+".parquet"
F_CI_XDXT_CUSTOMER_MEMO_INNTMP2=F_CI_XDXT_CUSTOMER_MEMO_INNTMP2.unionAll(F_CI_XDXT_CUSTOMER_MEMO_INNTMP1)
F_CI_XDXT_CUSTOMER_MEMO_INNTMP1.cache()
F_CI_XDXT_CUSTOMER_MEMO_INNTMP2.cache()
nrowsi = F_CI_XDXT_CUSTOMER_MEMO_INNTMP1.count()
nrowsa = F_CI_XDXT_CUSTOMER_MEMO_INNTMP2.count()
F_CI_XDXT_CUSTOMER_MEMO_INNTMP2.write.save(path = hdfs + '/' + dfn, mode='overwrite')
F_CI_XDXT_CUSTOMER_MEMO_INNTMP1.unpersist()
F_CI_XDXT_CUSTOMER_MEMO_INNTMP2.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert F_CI_XDXT_CUSTOMER_MEMO lines %d, all lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrowsi, nrowsa)
ret = os.system("hdfs dfs -mv /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO/"+V_DT_LD+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO_BK/")
#先删除备表当天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO_BK/"+V_DT+".parquet")
#从当天原表复制一份全量到备表
ret = os.system("hdfs dfs -cp -f /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO/"+V_DT+".parquet /"+dbname+"/F_CI_XDXT_CUSTOMER_MEMO_BK/"+V_DT+".parquet")
| gpl-3.0 |
linlife/Python | lin_jumper/author_agent/paramiko-1.10.1/paramiko/channel.py | 5 | 46546 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Abstraction for an SSH2 channel.
"""
import binascii
import sys
import time
import threading
import socket
import os
from paramiko.common import *
from paramiko import util
from paramiko.message import Message
from paramiko.ssh_exception import SSHException
from paramiko.file import BufferedFile
from paramiko.buffered_pipe import BufferedPipe, PipeTimeout
from paramiko import pipe
# lower bound on the max packet size we'll accept from the remote host
MIN_PACKET_SIZE = 1024
class Channel (object):
"""
A secure tunnel across an SSH L{Transport}. A Channel is meant to behave
like a socket, and has an API that should be indistinguishable from the
python socket API.
Because SSH2 has a windowing kind of flow control, if you stop reading data
from a Channel and its buffer fills up, the server will be unable to send
you any more data until you read some of it. (This won't affect other
channels on the same transport -- all channels on a single transport are
flow-controlled independently.) Similarly, if the server isn't reading
data you send, calls to L{send} may block, unless you set a timeout. This
is exactly like a normal network socket, so it shouldn't be too surprising.
"""
def __init__(self, chanid):
"""
Create a new channel. The channel is not associated with any
particular session or L{Transport} until the Transport attaches it.
Normally you would only call this method from the constructor of a
subclass of L{Channel}.
@param chanid: the ID of this channel, as passed by an existing
L{Transport}.
@type chanid: int
"""
self.chanid = chanid
self.remote_chanid = 0
self.transport = None
self.active = False
self.eof_received = 0
self.eof_sent = 0
self.in_buffer = BufferedPipe()
self.in_stderr_buffer = BufferedPipe()
self.timeout = None
self.closed = False
self.ultra_debug = False
self.lock = threading.Lock()
self.out_buffer_cv = threading.Condition(self.lock)
self.in_window_size = 0
self.out_window_size = 0
self.in_max_packet_size = 0
self.out_max_packet_size = 0
self.in_window_threshold = 0
self.in_window_sofar = 0
self.status_event = threading.Event()
self._name = str(chanid)
self.logger = util.get_logger('paramiko.transport')
self._pipe = None
self.event = threading.Event()
self.event_ready = False
self.combine_stderr = False
self.exit_status = -1
self.origin_addr = None
def __del__(self):
try:
self.close()
except:
pass
def __repr__(self):
"""
Return a string representation of this object, for debugging.
@rtype: str
"""
out = '<paramiko.Channel %d' % self.chanid
if self.closed:
out += ' (closed)'
elif self.active:
if self.eof_received:
out += ' (EOF received)'
if self.eof_sent:
out += ' (EOF sent)'
out += ' (open) window=%d' % (self.out_window_size)
if len(self.in_buffer) > 0:
out += ' in-buffer=%d' % (len(self.in_buffer),)
out += ' -> ' + repr(self.transport)
out += '>'
return out
def get_pty(self, term='vt100', width=80, height=24, width_pixels=0,
height_pixels=0):
"""
Request a pseudo-terminal from the server. This is usually used right
after creating a client channel, to ask the server to provide some
basic terminal semantics for a shell invoked with L{invoke_shell}.
It isn't necessary (or desirable) to call this method if you're going
to exectue a single command with L{exec_command}.
@param term: the terminal type to emulate (for example, C{'vt100'})
@type term: str
@param width: width (in characters) of the terminal screen
@type width: int
@param height: height (in characters) of the terminal screen
@type height: int
@param width_pixels: width (in pixels) of the terminal screen
@type width_pixels: int
@param height_pixels: height (in pixels) of the terminal screen
@type height_pixels: int
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('pty-req')
m.add_boolean(True)
m.add_string(term)
m.add_int(width)
m.add_int(height)
m.add_int(width_pixels)
m.add_int(height_pixels)
m.add_string('')
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def invoke_shell(self):
"""
Request an interactive shell session on this channel. If the server
allows it, the channel will then be directly connected to the stdin,
stdout, and stderr of the shell.
Normally you would call L{get_pty} before this, in which case the
shell will operate through the pty, and the channel will be connected
to the stdin and stdout of the pty.
When the shell exits, the channel will be closed and can't be reused.
You must open a new channel if you wish to open another shell.
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('shell')
m.add_boolean(1)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def exec_command(self, command):
"""
Execute a command on the server. If the server allows it, the channel
will then be directly connected to the stdin, stdout, and stderr of
the command being executed.
When the command finishes executing, the channel will be closed and
can't be reused. You must open a new channel if you wish to execute
another command.
@param command: a shell command to execute.
@type command: str
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('exec')
m.add_boolean(True)
m.add_string(command)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def invoke_subsystem(self, subsystem):
"""
Request a subsystem on the server (for example, C{sftp}). If the
server allows it, the channel will then be directly connected to the
requested subsystem.
When the subsystem finishes, the channel will be closed and can't be
reused.
@param subsystem: name of the subsystem being requested.
@type subsystem: str
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('subsystem')
m.add_boolean(True)
m.add_string(subsystem)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
def resize_pty(self, width=80, height=24, width_pixels=0, height_pixels=0):
"""
Resize the pseudo-terminal. This can be used to change the width and
height of the terminal emulation created in a previous L{get_pty} call.
@param width: new width (in characters) of the terminal screen
@type width: int
@param height: new height (in characters) of the terminal screen
@type height: int
@param width_pixels: new width (in pixels) of the terminal screen
@type width_pixels: int
@param height_pixels: new height (in pixels) of the terminal screen
@type height_pixels: int
@raise SSHException: if the request was rejected or the channel was
closed
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('window-change')
m.add_boolean(False)
m.add_int(width)
m.add_int(height)
m.add_int(width_pixels)
m.add_int(height_pixels)
self.transport._send_user_message(m)
def exit_status_ready(self):
"""
Return true if the remote process has exited and returned an exit
status. You may use this to poll the process status if you don't
want to block in L{recv_exit_status}. Note that the server may not
return an exit status in some cases (like bad servers).
@return: True if L{recv_exit_status} will return immediately
@rtype: bool
@since: 1.7.3
"""
return self.closed or self.status_event.isSet()
def recv_exit_status(self):
"""
Return the exit status from the process on the server. This is
mostly useful for retrieving the reults of an L{exec_command}.
If the command hasn't finished yet, this method will wait until
it does, or until the channel is closed. If no exit status is
provided by the server, -1 is returned.
@return: the exit code of the process on the server.
@rtype: int
@since: 1.2
"""
self.status_event.wait()
assert self.status_event.isSet()
return self.exit_status
def send_exit_status(self, status):
"""
Send the exit status of an executed command to the client. (This
really only makes sense in server mode.) Many clients expect to
get some sort of status code back from an executed command after
it completes.
@param status: the exit code of the process
@type status: int
@since: 1.2
"""
# in many cases, the channel will not still be open here.
# that's fine.
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('exit-status')
m.add_boolean(False)
m.add_int(status)
self.transport._send_user_message(m)
def request_x11(self, screen_number=0, auth_protocol=None, auth_cookie=None,
single_connection=False, handler=None):
"""
Request an x11 session on this channel. If the server allows it,
further x11 requests can be made from the server to the client,
when an x11 application is run in a shell session.
From RFC4254::
It is RECOMMENDED that the 'x11 authentication cookie' that is
sent be a fake, random cookie, and that the cookie be checked and
replaced by the real cookie when a connection request is received.
If you omit the auth_cookie, a new secure random 128-bit value will be
generated, used, and returned. You will need to use this value to
verify incoming x11 requests and replace them with the actual local
x11 cookie (which requires some knoweldge of the x11 protocol).
If a handler is passed in, the handler is called from another thread
whenever a new x11 connection arrives. The default handler queues up
incoming x11 connections, which may be retrieved using
L{Transport.accept}. The handler's calling signature is::
handler(channel: Channel, (address: str, port: int))
@param screen_number: the x11 screen number (0, 10, etc)
@type screen_number: int
@param auth_protocol: the name of the X11 authentication method used;
if none is given, C{"MIT-MAGIC-COOKIE-1"} is used
@type auth_protocol: str
@param auth_cookie: hexadecimal string containing the x11 auth cookie;
if none is given, a secure random 128-bit value is generated
@type auth_cookie: str
@param single_connection: if True, only a single x11 connection will be
forwarded (by default, any number of x11 connections can arrive
over this session)
@type single_connection: bool
@param handler: an optional handler to use for incoming X11 connections
@type handler: function
@return: the auth_cookie used
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
if auth_protocol is None:
auth_protocol = 'MIT-MAGIC-COOKIE-1'
if auth_cookie is None:
auth_cookie = binascii.hexlify(self.transport.rng.read(16))
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('x11-req')
m.add_boolean(True)
m.add_boolean(single_connection)
m.add_string(auth_protocol)
m.add_string(auth_cookie)
m.add_int(screen_number)
self._event_pending()
self.transport._send_user_message(m)
self._wait_for_event()
self.transport._set_x11_handler(handler)
return auth_cookie
def request_forward_agent(self, handler):
"""
Request for a forward SSH Agent on this channel.
This is only valid for an ssh-agent from openssh !!!
@param handler: a required handler to use for incoming SSH Agent connections
@type handler: function
@return: if we are ok or not (at that time we always return ok)
@rtype: boolean
@raise: SSHException in case of channel problem.
"""
if self.closed or self.eof_received or self.eof_sent or not self.active:
raise SSHException('Channel is not open')
m = Message()
m.add_byte(chr(MSG_CHANNEL_REQUEST))
m.add_int(self.remote_chanid)
m.add_string('[email protected]')
m.add_boolean(False)
self.transport._send_user_message(m)
self.transport._set_forward_agent_handler(handler)
return True
def get_transport(self):
"""
Return the L{Transport} associated with this channel.
@return: the L{Transport} that was used to create this channel.
@rtype: L{Transport}
"""
return self.transport
def set_name(self, name):
"""
Set a name for this channel. Currently it's only used to set the name
of the channel in logfile entries. The name can be fetched with the
L{get_name} method.
@param name: new channel name
@type name: str
"""
self._name = name
def get_name(self):
"""
Get the name of this channel that was previously set by L{set_name}.
@return: the name of this channel.
@rtype: str
"""
return self._name
def get_id(self):
"""
Return the ID # for this channel. The channel ID is unique across
a L{Transport} and usually a small number. It's also the number
passed to L{ServerInterface.check_channel_request} when determining
whether to accept a channel request in server mode.
@return: the ID of this channel.
@rtype: int
"""
return self.chanid
def set_combine_stderr(self, combine):
"""
Set whether stderr should be combined into stdout on this channel.
The default is C{False}, but in some cases it may be convenient to
have both streams combined.
If this is C{False}, and L{exec_command} is called (or C{invoke_shell}
with no pty), output to stderr will not show up through the L{recv}
and L{recv_ready} calls. You will have to use L{recv_stderr} and
L{recv_stderr_ready} to get stderr output.
If this is C{True}, data will never show up via L{recv_stderr} or
L{recv_stderr_ready}.
@param combine: C{True} if stderr output should be combined into
stdout on this channel.
@type combine: bool
@return: previous setting.
@rtype: bool
@since: 1.1
"""
data = ''
self.lock.acquire()
try:
old = self.combine_stderr
self.combine_stderr = combine
if combine and not old:
# copy old stderr buffer into primary buffer
data = self.in_stderr_buffer.empty()
finally:
self.lock.release()
if len(data) > 0:
self._feed(data)
return old
### socket API
def settimeout(self, timeout):
"""
Set a timeout on blocking read/write operations. The C{timeout}
argument can be a nonnegative float expressing seconds, or C{None}. If
a float is given, subsequent channel read/write operations will raise
a timeout exception if the timeout period value has elapsed before the
operation has completed. Setting a timeout of C{None} disables
timeouts on socket operations.
C{chan.settimeout(0.0)} is equivalent to C{chan.setblocking(0)};
C{chan.settimeout(None)} is equivalent to C{chan.setblocking(1)}.
@param timeout: seconds to wait for a pending read/write operation
before raising C{socket.timeout}, or C{None} for no timeout.
@type timeout: float
"""
self.timeout = timeout
def gettimeout(self):
"""
Returns the timeout in seconds (as a float) associated with socket
operations, or C{None} if no timeout is set. This reflects the last
call to L{setblocking} or L{settimeout}.
@return: timeout in seconds, or C{None}.
@rtype: float
"""
return self.timeout
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode of the channel: if C{blocking} is 0,
the channel is set to non-blocking mode; otherwise it's set to blocking
mode. Initially all channels are in blocking mode.
In non-blocking mode, if a L{recv} call doesn't find any data, or if a
L{send} call can't immediately dispose of the data, an error exception
is raised. In blocking mode, the calls block until they can proceed. An
EOF condition is considered "immediate data" for L{recv}, so if the
channel is closed in the read direction, it will never block.
C{chan.setblocking(0)} is equivalent to C{chan.settimeout(0)};
C{chan.setblocking(1)} is equivalent to C{chan.settimeout(None)}.
@param blocking: 0 to set non-blocking mode; non-0 to set blocking
mode.
@type blocking: int
"""
if blocking:
self.settimeout(None)
else:
self.settimeout(0.0)
def getpeername(self):
"""
Return the address of the remote side of this Channel, if possible.
This is just a wrapper around C{'getpeername'} on the Transport, used
to provide enough of a socket-like interface to allow asyncore to work.
(asyncore likes to call C{'getpeername'}.)
@return: the address if the remote host, if known
@rtype: tuple(str, int)
"""
return self.transport.getpeername()
def close(self):
"""
Close the channel. All future read/write operations on the channel
will fail. The remote end will receive no more data (after queued data
is flushed). Channels are automatically closed when their L{Transport}
is closed or when they are garbage collected.
"""
self.lock.acquire()
try:
# only close the pipe when the user explicitly closes the channel.
# otherwise they will get unpleasant surprises. (and do it before
# checking self.closed, since the remote host may have already
# closed the connection.)
if self._pipe is not None:
self._pipe.close()
self._pipe = None
if not self.active or self.closed:
return
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def recv_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel. A C{False} result does not mean that the channel has closed;
it means you may need to wait before more data arrives.
@return: C{True} if a L{recv} call on this channel would immediately
return at least one byte; C{False} otherwise.
@rtype: boolean
"""
return self.in_buffer.read_ready()
def recv(self, nbytes):
"""
Receive data from the channel. The return value is a string
representing the data received. The maximum amount of data to be
received at once is specified by C{nbytes}. If a string of length zero
is returned, the channel stream has closed.
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data.
@rtype: str
@raise socket.timeout: if no data is ready before the timeout set by
L{settimeout}.
"""
try:
out = self.in_buffer.read(nbytes, self.timeout)
except PipeTimeout, e:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST))
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def recv_stderr_ready(self):
"""
Returns true if data is buffered and ready to be read from this
channel's stderr stream. Only channels using L{exec_command} or
L{invoke_shell} without a pty will ever have data on the stderr
stream.
@return: C{True} if a L{recv_stderr} call on this channel would
immediately return at least one byte; C{False} otherwise.
@rtype: boolean
@since: 1.1
"""
return self.in_stderr_buffer.read_ready()
def recv_stderr(self, nbytes):
"""
Receive data from the channel's stderr stream. Only channels using
L{exec_command} or L{invoke_shell} without a pty will ever have data
on the stderr stream. The return value is a string representing the
data received. The maximum amount of data to be received at once is
specified by C{nbytes}. If a string of length zero is returned, the
channel stream has closed.
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data.
@rtype: str
@raise socket.timeout: if no data is ready before the timeout set by
L{settimeout}.
@since: 1.1
"""
try:
out = self.in_stderr_buffer.read(nbytes, self.timeout)
except PipeTimeout, e:
raise socket.timeout()
ack = self._check_add_window(len(out))
# no need to hold the channel lock when sending this
if ack > 0:
m = Message()
m.add_byte(chr(MSG_CHANNEL_WINDOW_ADJUST))
m.add_int(self.remote_chanid)
m.add_int(ack)
self.transport._send_user_message(m)
return out
def send_ready(self):
"""
Returns true if data can be written to this channel without blocking.
This means the channel is either closed (so any write attempt would
return immediately) or there is at least one byte of space in the
outbound buffer. If there is at least one byte of space in the
outbound buffer, a L{send} call will succeed immediately and return
the number of bytes actually written.
@return: C{True} if a L{send} call on this channel would immediately
succeed or fail
@rtype: boolean
"""
self.lock.acquire()
try:
if self.closed or self.eof_sent:
return True
return self.out_window_size > 0
finally:
self.lock.release()
def send(self, s):
"""
Send data to the channel. Returns the number of bytes sent, or 0 if
the channel stream is closed. Applications are responsible for
checking that all data has been sent: if only some of the data was
transmitted, the application needs to attempt delivery of the remaining
data.
@param s: data to send
@type s: str
@return: number of bytes actually sent
@rtype: int
@raise socket.timeout: if no data could be sent before the timeout set
by L{settimeout}.
"""
size = len(s)
self.lock.acquire()
try:
size = self._wait_for_send_window(size)
if size == 0:
# eof or similar
return 0
m = Message()
m.add_byte(chr(MSG_CHANNEL_DATA))
m.add_int(self.remote_chanid)
m.add_string(s[:size])
finally:
self.lock.release()
# Note: We release self.lock before calling _send_user_message.
# Otherwise, we can deadlock during re-keying.
self.transport._send_user_message(m)
return size
def send_stderr(self, s):
"""
Send data to the channel on the "stderr" stream. This is normally
only used by servers to send output from shell commands -- clients
won't use this. Returns the number of bytes sent, or 0 if the channel
stream is closed. Applications are responsible for checking that all
data has been sent: if only some of the data was transmitted, the
application needs to attempt delivery of the remaining data.
@param s: data to send.
@type s: str
@return: number of bytes actually sent.
@rtype: int
@raise socket.timeout: if no data could be sent before the timeout set
by L{settimeout}.
@since: 1.1
"""
size = len(s)
self.lock.acquire()
try:
size = self._wait_for_send_window(size)
if size == 0:
# eof or similar
return 0
m = Message()
m.add_byte(chr(MSG_CHANNEL_EXTENDED_DATA))
m.add_int(self.remote_chanid)
m.add_int(1)
m.add_string(s[:size])
finally:
self.lock.release()
# Note: We release self.lock before calling _send_user_message.
# Otherwise, we can deadlock during re-keying.
self.transport._send_user_message(m)
return size
def sendall(self, s):
"""
Send data to the channel, without allowing partial results. Unlike
L{send}, this method continues to send data from the given string until
either all data has been sent or an error occurs. Nothing is returned.
@param s: data to send.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@note: If the channel is closed while only part of the data hase been
sent, there is no way to determine how much data (if any) was sent.
This is irritating, but identically follows python's API.
"""
while s:
if self.closed:
# this doesn't seem useful, but it is the documented behavior of Socket
raise socket.error('Socket is closed')
sent = self.send(s)
s = s[sent:]
return None
def sendall_stderr(self, s):
"""
Send data to the channel's "stderr" stream, without allowing partial
results. Unlike L{send_stderr}, this method continues to send data
from the given string until all data has been sent or an error occurs.
Nothing is returned.
@param s: data to send to the client as "stderr" output.
@type s: str
@raise socket.timeout: if sending stalled for longer than the timeout
set by L{settimeout}.
@raise socket.error: if an error occured before the entire string was
sent.
@since: 1.1
"""
while s:
if self.closed:
raise socket.error('Socket is closed')
sent = self.send_stderr(s)
s = s[sent:]
return None
def makefile(self, *params):
"""
Return a file-like object associated with this channel. The optional
C{mode} and C{bufsize} arguments are interpreted the same way as by
the built-in C{file()} function in python.
@return: object which can be used for python file I/O.
@rtype: L{ChannelFile}
"""
return ChannelFile(*([self] + list(params)))
def makefile_stderr(self, *params):
"""
Return a file-like object associated with this channel's stderr
stream. Only channels using L{exec_command} or L{invoke_shell}
without a pty will ever have data on the stderr stream.
The optional C{mode} and C{bufsize} arguments are interpreted the
same way as by the built-in C{file()} function in python. For a
client, it only makes sense to open this file for reading. For a
server, it only makes sense to open this file for writing.
@return: object which can be used for python file I/O.
@rtype: L{ChannelFile}
@since: 1.1
"""
return ChannelStderrFile(*([self] + list(params)))
def fileno(self):
"""
Returns an OS-level file descriptor which can be used for polling, but
but I{not} for reading or writing. This is primaily to allow python's
C{select} module to work.
The first time C{fileno} is called on a channel, a pipe is created to
simulate real OS-level file descriptor (FD) behavior. Because of this,
two OS-level FDs are created, which will use up FDs faster than normal.
(You won't notice this effect unless you have hundreds of channels
open at the same time.)
@return: an OS-level file descriptor
@rtype: int
@warning: This method causes channel reads to be slightly less
efficient.
"""
self.lock.acquire()
try:
if self._pipe is not None:
return self._pipe.fileno()
# create the pipe and feed in any existing data
self._pipe = pipe.make_pipe()
p1, p2 = pipe.make_or_pipe(self._pipe)
self.in_buffer.set_event(p1)
self.in_stderr_buffer.set_event(p2)
return self._pipe.fileno()
finally:
self.lock.release()
def shutdown(self, how):
"""
Shut down one or both halves of the connection. If C{how} is 0,
further receives are disallowed. If C{how} is 1, further sends
are disallowed. If C{how} is 2, further sends and receives are
disallowed. This closes the stream in one or both directions.
@param how: 0 (stop receiving), 1 (stop sending), or 2 (stop
receiving and sending).
@type how: int
"""
if (how == 0) or (how == 2):
# feign "read" shutdown
self.eof_received = 1
if (how == 1) or (how == 2):
self.lock.acquire()
try:
m = self._send_eof()
finally:
self.lock.release()
if m is not None:
self.transport._send_user_message(m)
def shutdown_read(self):
"""
Shutdown the receiving side of this socket, closing the stream in
the incoming direction. After this call, future reads on this
channel will fail instantly. This is a convenience method, equivalent
to C{shutdown(0)}, for people who don't make it a habit to
memorize unix constants from the 1970s.
@since: 1.2
"""
self.shutdown(0)
def shutdown_write(self):
"""
Shutdown the sending side of this socket, closing the stream in
the outgoing direction. After this call, future writes on this
channel will fail instantly. This is a convenience method, equivalent
to C{shutdown(1)}, for people who don't make it a habit to
memorize unix constants from the 1970s.
@since: 1.2
"""
self.shutdown(1)
### calls from Transport
def _set_transport(self, transport):
self.transport = transport
self.logger = util.get_logger(self.transport.get_log_channel())
def _set_window(self, window_size, max_packet_size):
self.in_window_size = window_size
self.in_max_packet_size = max_packet_size
# threshold of bytes we receive before we bother to send a window update
self.in_window_threshold = window_size // 10
self.in_window_sofar = 0
self._log(DEBUG, 'Max packet in: %d bytes' % max_packet_size)
def _set_remote_channel(self, chanid, window_size, max_packet_size):
self.remote_chanid = chanid
self.out_window_size = window_size
self.out_max_packet_size = max(max_packet_size, MIN_PACKET_SIZE)
self.active = 1
self._log(DEBUG, 'Max packet out: %d bytes' % max_packet_size)
def _request_success(self, m):
self._log(DEBUG, 'Sesch channel %d request ok' % self.chanid)
self.event_ready = True
self.event.set()
return
def _request_failed(self, m):
self.lock.acquire()
try:
msgs = self._close_internal()
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
def _feed(self, m):
if type(m) is str:
# passed from _feed_extended
s = m
else:
s = m.get_string()
self.in_buffer.feed(s)
def _feed_extended(self, m):
code = m.get_int()
s = m.get_string()
if code != 1:
self._log(ERROR, 'unknown extended_data type %d; discarding' % code)
return
if self.combine_stderr:
self._feed(s)
else:
self.in_stderr_buffer.feed(s)
def _window_adjust(self, m):
nbytes = m.get_int()
self.lock.acquire()
try:
if self.ultra_debug:
self._log(DEBUG, 'window up %d' % nbytes)
self.out_window_size += nbytes
self.out_buffer_cv.notifyAll()
finally:
self.lock.release()
def _handle_request(self, m):
key = m.get_string()
want_reply = m.get_boolean()
server = self.transport.server_object
ok = False
if key == 'exit-status':
self.exit_status = m.get_int()
self.status_event.set()
ok = True
elif key == 'xon-xoff':
# ignore
ok = True
elif key == 'pty-req':
term = m.get_string()
width = m.get_int()
height = m.get_int()
pixelwidth = m.get_int()
pixelheight = m.get_int()
modes = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_pty_request(self, term, width, height, pixelwidth,
pixelheight, modes)
elif key == 'shell':
if server is None:
ok = False
else:
ok = server.check_channel_shell_request(self)
elif key == 'exec':
cmd = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_exec_request(self, cmd)
elif key == 'subsystem':
name = m.get_string()
if server is None:
ok = False
else:
ok = server.check_channel_subsystem_request(self, name)
elif key == 'window-change':
width = m.get_int()
height = m.get_int()
pixelwidth = m.get_int()
pixelheight = m.get_int()
if server is None:
ok = False
else:
ok = server.check_channel_window_change_request(self, width, height, pixelwidth,
pixelheight)
elif key == 'x11-req':
single_connection = m.get_boolean()
auth_proto = m.get_string()
auth_cookie = m.get_string()
screen_number = m.get_int()
if server is None:
ok = False
else:
ok = server.check_channel_x11_request(self, single_connection,
auth_proto, auth_cookie, screen_number)
elif key == '[email protected]':
if server is None:
ok = False
else:
ok = server.check_channel_forward_agent_request(self)
else:
self._log(DEBUG, 'Unhandled channel request "%s"' % key)
ok = False
if want_reply:
m = Message()
if ok:
m.add_byte(chr(MSG_CHANNEL_SUCCESS))
else:
m.add_byte(chr(MSG_CHANNEL_FAILURE))
m.add_int(self.remote_chanid)
self.transport._send_user_message(m)
def _handle_eof(self, m):
self.lock.acquire()
try:
if not self.eof_received:
self.eof_received = True
self.in_buffer.close()
self.in_stderr_buffer.close()
if self._pipe is not None:
self._pipe.set_forever()
finally:
self.lock.release()
self._log(DEBUG, 'EOF received (%s)', self._name)
def _handle_close(self, m):
self.lock.acquire()
try:
msgs = self._close_internal()
self.transport._unlink_channel(self.chanid)
finally:
self.lock.release()
for m in msgs:
if m is not None:
self.transport._send_user_message(m)
### internals...
def _log(self, level, msg, *args):
self.logger.log(level, "[chan " + self._name + "] " + msg, *args)
def _event_pending(self):
self.event.clear()
self.event_ready = False
def _wait_for_event(self):
self.event.wait()
assert self.event.isSet()
if self.event_ready:
return
e = self.transport.get_exception()
if e is None:
e = SSHException('Channel closed.')
raise e
def _set_closed(self):
# you are holding the lock.
self.closed = True
self.in_buffer.close()
self.in_stderr_buffer.close()
self.out_buffer_cv.notifyAll()
# Notify any waiters that we are closed
self.event.set()
self.status_event.set()
if self._pipe is not None:
self._pipe.set_forever()
def _send_eof(self):
# you are holding the lock.
if self.eof_sent:
return None
m = Message()
m.add_byte(chr(MSG_CHANNEL_EOF))
m.add_int(self.remote_chanid)
self.eof_sent = True
self._log(DEBUG, 'EOF sent (%s)', self._name)
return m
def _close_internal(self):
# you are holding the lock.
if not self.active or self.closed:
return None, None
m1 = self._send_eof()
m2 = Message()
m2.add_byte(chr(MSG_CHANNEL_CLOSE))
m2.add_int(self.remote_chanid)
self._set_closed()
# can't unlink from the Transport yet -- the remote side may still
# try to send meta-data (exit-status, etc)
return m1, m2
def _unlink(self):
# server connection could die before we become active: still signal the close!
if self.closed:
return
self.lock.acquire()
try:
self._set_closed()
self.transport._unlink_channel(self.chanid)
finally:
self.lock.release()
def _check_add_window(self, n):
self.lock.acquire()
try:
if self.closed or self.eof_received or not self.active:
return 0
if self.ultra_debug:
self._log(DEBUG, 'addwindow %d' % n)
self.in_window_sofar += n
if self.in_window_sofar <= self.in_window_threshold:
return 0
if self.ultra_debug:
self._log(DEBUG, 'addwindow send %d' % self.in_window_sofar)
out = self.in_window_sofar
self.in_window_sofar = 0
return out
finally:
self.lock.release()
def _wait_for_send_window(self, size):
"""
(You are already holding the lock.)
Wait for the send window to open up, and allocate up to C{size} bytes
for transmission. If no space opens up before the timeout, a timeout
exception is raised. Returns the number of bytes available to send
(may be less than requested).
"""
# you are already holding the lock
if self.closed or self.eof_sent:
return 0
if self.out_window_size == 0:
# should we block?
if self.timeout == 0.0:
raise socket.timeout()
# loop here in case we get woken up but a different thread has filled the buffer
timeout = self.timeout
while self.out_window_size == 0:
if self.closed or self.eof_sent:
return 0
then = time.time()
self.out_buffer_cv.wait(timeout)
if timeout != None:
timeout -= time.time() - then
if timeout <= 0.0:
raise socket.timeout()
# we have some window to squeeze into
if self.closed or self.eof_sent:
return 0
if self.out_window_size < size:
size = self.out_window_size
if self.out_max_packet_size - 64 < size:
size = self.out_max_packet_size - 64
self.out_window_size -= size
if self.ultra_debug:
self._log(DEBUG, 'window down to %d' % self.out_window_size)
return size
class ChannelFile (BufferedFile):
"""
A file-like wrapper around L{Channel}. A ChannelFile is created by calling
L{Channel.makefile}.
@bug: To correctly emulate the file object created from a socket's
C{makefile} method, a L{Channel} and its C{ChannelFile} should be able
to be closed or garbage-collected independently. Currently, closing
the C{ChannelFile} does nothing but flush the buffer.
"""
def __init__(self, channel, mode = 'r', bufsize = -1):
self.channel = channel
BufferedFile.__init__(self)
self._set_mode(mode, bufsize)
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
@rtype: str
"""
return '<paramiko.ChannelFile from ' + repr(self.channel) + '>'
def _read(self, size):
return self.channel.recv(size)
def _write(self, data):
self.channel.sendall(data)
return len(data)
class ChannelStderrFile (ChannelFile):
def __init__(self, channel, mode = 'r', bufsize = -1):
ChannelFile.__init__(self, channel, mode, bufsize)
def _read(self, size):
return self.channel.recv_stderr(size)
def _write(self, data):
self.channel.sendall_stderr(data)
return len(data)
# vim: set shiftwidth=4 expandtab :
| apache-2.0 |
mitodl/PyLmod | pylmod/base.py | 1 | 5995 | """
Python class representing interface to MIT Learning Modules Web service.
"""
import json
import logging
import requests
from requests.adapters import HTTPAdapter
log = logging.getLogger(__name__) # pylint: disable=C0103
class Base(object):
"""
Base provides the transport for accessing the MIT Learning Modules (LMod).
The Base class implements the functions that underlie the HTTP calls to
the MIT Learning Modules (LMod) Web service. It shouldn't be
instantiated directly as it is inherited by the classes that
implement the API.
Attributes:
cert (unicode): File path to the certificate used to
authenticate access to LMod Web service
urlbase (str): The URL of the LMod Web service. i.e.
``learning-modules.mit.edu`` or ``learning-modules-test.mit.edu``
"""
#: connection timeout, seconds
TIMEOUT = 200
#: Number of connection retries
RETRIES = 10
verbose = True
gradebookid = None
def __init__(
self,
cert,
urlbase='https://learning-modules.mit.edu:8443/',
):
"""Initialize Base instance.
Args:
cert (unicode): File path to the certificate used to
authenticate access to LMod Web service
urlbase (str): The URL of the LMod Web service. i.e.
``learning-modules.mit.edu`` or
``learning-modules-test.mit.edu``
"""
# pem with private and public key application certificate for access
self.cert = cert
self.urlbase = urlbase
if not urlbase.endswith('/'):
self.urlbase += '/'
self._session = requests.Session()
self._session.cert = cert
self._session.timeout = self.TIMEOUT # connection timeout
self._session.verify = True # verify site certificate
# Mount the retry adapter to the base url
self._session.mount(urlbase, HTTPAdapter(max_retries=self.RETRIES))
log.debug("------------------------------------------------------")
log.info("[PyLmod] init urlbase=%s", urlbase)
@staticmethod
def _data_to_json(data):
"""Convert to json if it isn't already a string.
Args:
data (str): data to convert to json
"""
if type(data) != str:
data = json.dumps(data)
return data
def _url_format(self, service):
"""Generate URL from urlbase and service.
Args:
service (str): The endpoint service to use, i.e. gradebook
Returns:
str: URL to where the request should be made
"""
base_service_url = '{base}{service}'.format(
base=self.urlbase,
service=service
)
return base_service_url
def rest_action(self, func, url, **kwargs):
"""Routine to do low-level REST operation, with retry.
Args:
func (callable): API function to call
url (str): service URL endpoint
kwargs (dict): addition parameters
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: the json-encoded content of the response
"""
try:
response = func(url, timeout=self.TIMEOUT, **kwargs)
except requests.RequestException as err:
log.exception(
"[PyLmod] Error - connection error in "
"rest_action, err=%s", err
)
raise err
try:
return response.json()
except ValueError as err:
log.exception('Unable to decode %s', response.content)
raise err
def get(self, service, params=None):
"""Generic GET operation for retrieving data from Learning Modules API.
.. code-block:: python
gbk.get('students/{gradebookId}', params=params, gradebookId=gbid)
Args:
service (str): The endpoint service to use, i.e. gradebook
params (dict): additional parameters to add to the call
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: the json-encoded content of the response
"""
url = self._url_format(service)
if params is None:
params = {}
return self.rest_action(self._session.get, url, params=params)
def post(self, service, data):
"""Generic POST operation for sending data to Learning Modules API.
Data should be a JSON string or a dict. If it is not a string,
it is turned into a JSON string for the POST body.
Args:
service (str): The endpoint service to use, i.e. gradebook
data (json or dict): the data payload
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: the json-encoded content of the response
"""
url = self._url_format(service)
data = Base._data_to_json(data)
# Add content-type for body in POST.
headers = {'content-type': 'application/json'}
return self.rest_action(self._session.post, url,
data=data, headers=headers)
def delete(self, service):
"""Generic DELETE operation for Learning Modules API.
Args:
service (str): The endpoint service to use, i.e. gradebook
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
list: the json-encoded content of the response
"""
url = self._url_format(service)
return self.rest_action(
self._session.delete, url
)
| bsd-2-clause |
slickqa/slickqaweb | slickqaweb/model/systemConfiguration/emailSubscription.py | 1 | 1443 | __author__ = 'jcorbett'
from mongoengine import *
from .types import AllTypes
from .subscriptionInfo import SubscriptionInfo
EmailSubscriptionSystemConfigurationType = "email-subscription"
# this is the default class name, kept for compatibility reasons
EmailSubscriptionSystemConfigurationClassName = "org.tcrun.slickij.api.data.EmailSubscription"
class EmailSubscriptionSystemConfiguration(Document):
meta = {'collection': 'system-configurations'}
name = StringField()
subscriptions = ListField(EmbeddedDocumentField(SubscriptionInfo))
enabled = BooleanField()
globalSubscription = BooleanField()
configurationType = StringField(required=True, default=EmailSubscriptionSystemConfigurationType, choices=[EmailSubscriptionSystemConfigurationType,])
className = StringField(required=True, default=EmailSubscriptionSystemConfigurationClassName, choices=[EmailSubscriptionSystemConfigurationClassName,])
dynamic_types = {
'typeName': StringField()
}
def dynamic_fields(self):
return {
'typeName': 'EmailSubscriptionSystemConfiguration'
}
@queryset_manager
def objects(doc_cls, queryset):
"""Custom QuerySet Manager that filters based on the configurationType"""
return queryset.filter(configurationType=EmailSubscriptionSystemConfigurationType)
AllTypes[EmailSubscriptionSystemConfigurationType] = EmailSubscriptionSystemConfiguration
| apache-2.0 |
Barmaley-exe/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
Omegaphora/external_chromium_org | chrome/test/chromedriver/archive.py | 25 | 2935 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads items from the Chromium continuous archive."""
import os
import platform
import urllib
import util
CHROME_35_REVISION = '260135'
CHROME_36_REVISION = '269696'
CHROME_37_REVISION = '278933'
_SITE = 'http://commondatastorage.googleapis.com'
class Site(object):
CONTINUOUS = _SITE + '/chromium-browser-continuous'
CHROMIUM_SNAPSHOT = _SITE + '/chromium-browser-snapshots'
BLINK_SNAPSHOT = _SITE + '/chromium-webkit-snapshots'
def GetLatestRevision(site=Site.CONTINUOUS):
"""Returns the latest revision (as a string) available for this platform.
Args:
site: the archive site to check against, default to the continuous one.
"""
url = site + '/%s/LAST_CHANGE'
return urllib.urlopen(url % _GetDownloadPlatform()).read()
def DownloadChrome(revision, dest_dir, site=Site.CONTINUOUS):
"""Downloads the packaged Chrome from the archive to the given directory.
Args:
revision: the revision of Chrome to download.
dest_dir: the directory to download Chrome to.
site: the archive site to download from, default to the continuous one.
Returns:
The path to the unzipped Chrome binary.
"""
def GetZipName():
if util.IsWindows():
return 'chrome-win32'
elif util.IsMac():
return 'chrome-mac'
elif util.IsLinux():
return 'chrome-linux'
def GetChromePathFromPackage():
if util.IsWindows():
return 'chrome.exe'
elif util.IsMac():
return 'Chromium.app/Contents/MacOS/Chromium'
elif util.IsLinux():
return 'chrome'
zip_path = os.path.join(dest_dir, 'chrome-%s.zip' % revision)
if not os.path.exists(zip_path):
url = site + '/%s/%s/%s.zip' % (_GetDownloadPlatform(), revision,
GetZipName())
print 'Downloading', url, '...'
urllib.urlretrieve(url, zip_path)
util.Unzip(zip_path, dest_dir)
return os.path.join(dest_dir, GetZipName(), GetChromePathFromPackage())
def _GetDownloadPlatform():
"""Returns the name for this platform on the archive site."""
if util.IsWindows():
return 'Win'
elif util.IsMac():
return 'Mac'
elif util.IsLinux():
if platform.architecture()[0] == '64bit':
return 'Linux_x64'
else:
return 'Linux'
def GetLatestSnapshotVersion():
"""Returns the latest revision of snapshot build."""
return GetLatestRevision(GetSnapshotDownloadSite())
def GetSnapshotDownloadSite():
"""Returns the site to download snapshot build according to the platform.
For Linux 32-bit, it is chromium snapshot build.
For other platform, it is blink snapshot build.
Because there is no linux32 blink snapshot build.
"""
if _GetDownloadPlatform() in ('Linux', 'Linux_x64', 'Mac'):
return Site.CHROMIUM_SNAPSHOT
else:
return Site.BLINK_SNAPSHOT
| bsd-3-clause |
benpatterson/edx-platform | pavelib/utils/envs.py | 39 | 6325 | """
Helper functions for loading environment settings.
"""
from __future__ import print_function
import os
import sys
import json
from lazy import lazy
from path import path
import memcache
class Env(object):
"""
Load information about the execution environment.
"""
# Root of the git repository (edx-platform)
REPO_ROOT = path(__file__).abspath().parent.parent.parent
# Reports Directory
REPORT_DIR = REPO_ROOT / 'reports'
METRICS_DIR = REPORT_DIR / 'metrics'
# Python unittest dirs
PYTHON_COVERAGERC = REPO_ROOT / ".coveragerc"
# Bok_choy dirs
BOK_CHOY_DIR = REPO_ROOT / "common" / "test" / "acceptance"
BOK_CHOY_LOG_DIR = REPO_ROOT / "test_root" / "log"
BOK_CHOY_REPORT_DIR = REPORT_DIR / "bok_choy"
BOK_CHOY_COVERAGERC = BOK_CHOY_DIR / ".coveragerc"
# If set, put reports for run in "unique" directories.
# The main purpose of this is to ensure that the reports can be 'slurped'
# in the main jenkins flow job without overwriting the reports from other
# build steps. For local development/testing, this shouldn't be needed.
if os.environ.get("SHARD", None):
shard_str = "shard_{}".format(os.environ.get("SHARD"))
BOK_CHOY_REPORT_DIR = BOK_CHOY_REPORT_DIR / shard_str
BOK_CHOY_LOG_DIR = BOK_CHOY_LOG_DIR / shard_str
# For the time being, stubs are used by both the bok-choy and lettuce acceptance tests
# For this reason, the stubs package is currently located in the Django app called "terrain"
# where other lettuce configuration is stored.
BOK_CHOY_STUB_DIR = REPO_ROOT / "common" / "djangoapps" / "terrain"
# Directory that videos are served from
VIDEO_SOURCE_DIR = REPO_ROOT / "test_root" / "data" / "video"
BOK_CHOY_SERVERS = {
'lms': {
'port': 8003,
'log': BOK_CHOY_LOG_DIR / "bok_choy_lms.log"
},
'cms': {
'port': 8031,
'log': BOK_CHOY_LOG_DIR / "bok_choy_studio.log"
}
}
BOK_CHOY_STUBS = {
'xqueue': {
'port': 8040,
'log': BOK_CHOY_LOG_DIR / "bok_choy_xqueue.log",
'config': 'register_submission_url=http://0.0.0.0:8041/test/register_submission',
},
'ora': {
'port': 8041,
'log': BOK_CHOY_LOG_DIR / "bok_choy_ora.log",
'config': '',
},
'comments': {
'port': 4567,
'log': BOK_CHOY_LOG_DIR / "bok_choy_comments.log",
},
'video': {
'port': 8777,
'log': BOK_CHOY_LOG_DIR / "bok_choy_video_sources.log",
'config': "root_dir={}".format(VIDEO_SOURCE_DIR),
},
'youtube': {
'port': 9080,
'log': BOK_CHOY_LOG_DIR / "bok_choy_youtube.log",
},
'edxnotes': {
'port': 8042,
'log': BOK_CHOY_LOG_DIR / "bok_choy_edxnotes.log",
}
}
# Mongo databases that will be dropped before/after the tests run
BOK_CHOY_MONGO_DATABASE = "test"
BOK_CHOY_CACHE = memcache.Client(['0.0.0.0:11211'], debug=0)
# Test Ids Directory
TEST_DIR = REPO_ROOT / ".testids"
# Files used to run each of the js test suites
# TODO: Store this as a dict. Order seems to matter for some
# reason. See issue TE-415.
JS_TEST_ID_FILES = [
REPO_ROOT / 'lms/static/js_test.yml',
REPO_ROOT / 'lms/static/js_test_coffee.yml',
REPO_ROOT / 'cms/static/js_test.yml',
REPO_ROOT / 'cms/static/js_test_squire.yml',
REPO_ROOT / 'common/lib/xmodule/xmodule/js/js_test.yml',
REPO_ROOT / 'common/static/js_test.yml',
REPO_ROOT / 'common/static/js_test_requirejs.yml',
]
JS_TEST_ID_KEYS = [
'lms',
'lms-coffee',
'cms',
'cms-squire',
'xmodule',
'common',
'common-requirejs'
]
JS_REPORT_DIR = REPORT_DIR / 'javascript'
# Directories used for common/lib/ tests
LIB_TEST_DIRS = []
for item in (REPO_ROOT / "common/lib").listdir():
if (REPO_ROOT / 'common/lib' / item).isdir():
LIB_TEST_DIRS.append(path("common/lib") / item.basename())
LIB_TEST_DIRS.append(path("pavelib/paver_tests"))
# Directory for i18n test reports
I18N_REPORT_DIR = REPORT_DIR / 'i18n'
# Service variant (lms, cms, etc.) configured with an environment variable
# We use this to determine which envs.json file to load.
SERVICE_VARIANT = os.environ.get('SERVICE_VARIANT', None)
# If service variant not configured in env, then pass the correct
# environment for lms / cms
if not SERVICE_VARIANT: # this will intentionally catch "";
if any(i in sys.argv[1:] for i in ('cms', 'studio')):
SERVICE_VARIANT = 'cms'
else:
SERVICE_VARIANT = 'lms'
@lazy
def env_tokens(self):
"""
Return a dict of environment settings.
If we couldn't find the JSON file, issue a warning and return an empty dict.
"""
# Find the env JSON file
if self.SERVICE_VARIANT:
env_path = self.REPO_ROOT.parent / "{service}.env.json".format(service=self.SERVICE_VARIANT)
else:
env_path = path("env.json").abspath()
# If the file does not exist, here or one level up,
# issue a warning and return an empty dict
if not env_path.isfile():
env_path = env_path.parent.parent / env_path.basename()
if not env_path.isfile():
print(
"Warning: could not find environment JSON file "
"at '{path}'".format(path=env_path),
file=sys.stderr,
)
return dict()
# Otherwise, load the file as JSON and return the resulting dict
try:
with open(env_path) as env_file:
return json.load(env_file)
except ValueError:
print(
"Error: Could not parse JSON "
"in {path}".format(path=env_path),
file=sys.stderr,
)
sys.exit(1)
@lazy
def feature_flags(self):
"""
Return a dictionary of feature flags configured by the environment.
"""
return self.env_tokens.get('FEATURES', dict())
| agpl-3.0 |
brandonPurvis/osf.io | framework/exceptions/__init__.py | 6 | 3538 | # -*- coding: utf-8 -*-
'''Custom exceptions for the framework.'''
import copy
import httplib as http
from flask import request
class FrameworkError(Exception):
"""Base class from which framework-related errors inherit."""
pass
class HTTPError(FrameworkError):
error_msgs = {
http.BAD_REQUEST: {
'message_short': 'Bad request',
'message_long': ('If this should not have occurred and the issue persists, '
'please report it to <a href="mailto:[email protected]">[email protected]</a>.'),
},
http.UNAUTHORIZED: {
'message_short': 'Unauthorized',
'message_long': 'You must <a href="/login/">log in</a> to access this resource.',
},
http.FORBIDDEN: {
'message_short': 'Forbidden',
'message_long': ('You do not have permission to perform this action. '
'If this should not have occurred and the issue persists, '
'please report it to <a href="mailto:[email protected]">[email protected]</a>.'),
},
http.NOT_FOUND: {
'message_short': 'Page not found',
'message_long': ('The requested resource could not be found. If this '
'should not have occurred and the issue persists, please report it '
'to <a href="mailto:[email protected]">[email protected]</a>.'),
},
http.GONE: {
'message_short': 'Resource deleted',
'message_long': ('The requested resource has been deleted. If this should '
'not have occurred and the issue persists, please report it to '
'<a href="mailto:[email protected]">[email protected]</a>.'),
},
http.SERVICE_UNAVAILABLE: {
'message_short': 'Service is currently unavailable',
'message_long': ('The requested service is unavailable. If this should '
'not have occurred and the issue persists, please report it to '
'<a href="mailto:[email protected]">[email protected]</a>.'),
},
}
def __init__(self, code, message=None, redirect_url=None, data=None):
super(HTTPError, self).__init__(message)
self.code = code
self.redirect_url = redirect_url
self.data = data or {}
try:
self.referrer = request.referrer
except RuntimeError:
self.referrer = None
def __repr__(self):
class_name = self.__class__.__name__
return '{ClassName}(code={code}, data={data})'.format(
ClassName=class_name,
code=self.code,
data=self.to_data(),
)
def __str__(self):
return repr(self)
def to_data(self):
data = copy.deepcopy(self.data)
if self.code in self.error_msgs:
data = {
'message_short': self.error_msgs[self.code]['message_short'],
'message_long': self.error_msgs[self.code]['message_long']
}
else:
data['message_short'] = 'Unable to resolve'
data['message_long'] = ('OSF was unable to resolve your request. If this '
'issue persists, please report it to '
'<a href="mailto:[email protected]">[email protected]</a>.')
data.update(self.data)
data['code'] = self.code
data['referrer'] = self.referrer
return data
class PermissionsError(FrameworkError):
"""Raised if an action cannot be performed due to insufficient permissions
"""
pass
| apache-2.0 |
alqfahad/odoo | addons/l10n_pe/__openerp__.py | 260 | 1762 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Peru Localization Chart Account',
'version': '1.0',
'description': """
Peruvian accounting chart and tax localization. According the PCGE 2010.
========================================================================
Plan contable peruano e impuestos de acuerdo a disposiciones vigentes de la
SUNAT 2011 (PCGE 2010).
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data':[
'account_tax_code.xml',
'l10n_pe_chart.xml',
'account_tax.xml',
'l10n_pe_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aleen42/three.js | utils/converters/msgpack/msgpack/__init__.py | 659 | 1385 | # coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| mit |
w1ll1am23/home-assistant | tests/components/mqtt/test_light_template.py | 3 | 39144 | """The tests for the MQTT Template light platform.
Configuration example with all features:
light:
platform: mqtt_template
name: mqtt_template_light_1
state_topic: 'home/rgb1'
command_topic: 'home/rgb1/set'
command_on_template: >
on,{{ brightness|d }},{{ red|d }}-{{ green|d }}-{{ blue|d }}
command_off_template: 'off'
state_template: '{{ value.split(",")[0] }}'
brightness_template: '{{ value.split(",")[1] }}'
color_temp_template: '{{ value.split(",")[2] }}'
white_value_template: '{{ value.split(",")[3] }}'
red_template: '{{ value.split(",")[4].split("-")[0] }}'
green_template: '{{ value.split(",")[4].split("-")[1] }}'
blue_template: '{{ value.split(",")[4].split("-")[2] }}'
If your light doesn't support brightness feature, omit `brightness_template`.
If your light doesn't support color temp feature, omit `color_temp_template`.
If your light doesn't support white value feature, omit `white_value_template`.
If your light doesn't support RGB feature, omit `(red|green|blue)_template`.
"""
from unittest.mock import patch
import pytest
from homeassistant.components import light
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
}
}
async def test_setup_fails(hass, mqtt_mock):
"""Test that setup fails with missing required configuration items."""
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{light.DOMAIN: {"platform": "mqtt", "schema": "template", "name": "test"}},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
"command_on_template": "on",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
"command_off_template": "off",
}
},
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
async def test_rgb_light(hass, mqtt_mock):
"""Test RGB light flags brightness support."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on",
"command_off_template": "off",
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
expected_features = (
light.SUPPORT_TRANSITION
| light.SUPPORT_COLOR
| light.SUPPORT_FLASH
| light.SUPPORT_BRIGHTNESS
)
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == expected_features
async def test_state_change_via_topic(hass, mqtt_mock):
"""Test state change via topic."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
async def test_state_brightness_color_effect_temp_white_change_via_topic(
hass, mqtt_mock
):
"""Test state, bri, color, effect, color temp, white val change."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }},"
"{{ effect|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# turn on the light, full white
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,145,123,255-128-64,")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 128, 63)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 145
assert state.attributes.get("white_value") == 123
assert state.attributes.get("effect") is None
# turn the light off
async_fire_mqtt_message(hass, "test_light_rgb", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# lower the brightness
async_fire_mqtt_message(hass, "test_light_rgb", "on,100")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
# change the color temp
async_fire_mqtt_message(hass, "test_light_rgb", "on,,195")
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 195
# change the color
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,,41-42-43")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (243, 249, 255)
# change the white value
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,134")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 134
# change the effect
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,,41-42-43,rainbow")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("effect") == "rainbow"
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
), assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"effect_list": ["colorloop", "random"],
"optimistic": True,
"state_template": '{{ value.split(",")[0] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
"qos": 2,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Set color_temp
await common.async_turn_on(hass, "light.test", color_temp=70)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,70,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 70
# Set full brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,255,,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Full brightness - no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,80,255-128-0", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 80
assert state.attributes.get("rgb_color") == (255, 128, 0)
# Full brightness - normalization of RGB values sent over MQTT
await common.async_turn_on(hass, "light.test", rgb_color=[128, 64, 0])
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,255-127-0", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 127, 0)
# Set half brightness
await common.async_turn_on(hass, "light.test", brightness=128)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,128,,,--", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
# Half brightness - scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 255, 128], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-128-64", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 40
assert state.attributes.get("rgb_color") == (0, 255, 128)
# Half brightness - normalization+scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 32, 16], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-128-64", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 40
assert state.attributes.get("rgb_color") == (0, 255, 127)
async def test_sending_mqtt_commands_non_optimistic_brightness_template(
hass, mqtt_mock
):
"""Test the sending of command in optimistic mode."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("brightness")
assert not state.attributes.get("hs_color")
assert not state.attributes.get("effect")
assert not state.attributes.get("color_temp")
assert not state.attributes.get("white_value")
assert not state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Set color_temp
await common.async_turn_on(hass, "light.test", color_temp=70)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,70,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("color_temp")
# Set full brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,255,,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("brightness")
# Full brightness - no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,80,255-128-0", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert not state.attributes.get("white_value")
assert not state.attributes.get("rgb_color")
# Full brightness - normalization of RGB values sent over MQTT
await common.async_turn_on(hass, "light.test", rgb_color=[128, 64, 0])
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,,255-127-0", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Set half brightness
await common.async_turn_on(hass, "light.test", brightness=128)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,128,,,--", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Half brightness - no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 255, 128], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-255-128", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
# Half brightness - normalization but no scaling of RGB values sent over MQTT
await common.async_turn_on(
hass, "light.test", rgb_color=[0, 32, 16], white_value=40
)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,,,40,0-255-127", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
async def test_effect(hass, mqtt_mock):
"""Test effect sent over MQTT in optimistic mode."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"effect_list": ["rainbow", "colorloop"],
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ effect }}",
"command_off_template": "off",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 44
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert not state.attributes.get("effect")
await common.async_turn_on(hass, "light.test", effect="rainbow")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,rainbow", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "rainbow"
await common.async_turn_on(hass, "light.test", effect="colorloop")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,colorloop", 0, False
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "colorloop"
async def test_flash(hass, mqtt_mock):
"""Test flash sent over MQTT in optimistic mode."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ flash }}",
"command_off_template": "off",
"qos": 0,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="short")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,short", 0, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_on(hass, "light.test", flash="long")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,long", 0, False
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
async def test_transition(hass, mqtt_mock):
"""Test for transition time being sent when included."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|int|d }}",
"qos": 1,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 40
await common.async_turn_on(hass, "light.test", transition=10.0)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on,10.0", 1, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test", transition=20.0)
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off,20", 1, False
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_invalid_values(hass, mqtt_mock):
"""Test that invalid values are ignored."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }},"
"{{ effect|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# turn on the light, full white
async_fire_mqtt_message(
hass, "test_light_rgb", "on,255,215,222,255-255-255,rainbow"
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 215
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("white_value") == 222
assert state.attributes.get("effect") == "rainbow"
# bad state value
async_fire_mqtt_message(hass, "test_light_rgb", "offf")
# state should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
# bad brightness values
async_fire_mqtt_message(hass, "test_light_rgb", "on,off,255-255-255")
# brightness should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 255
# bad color temp values
async_fire_mqtt_message(hass, "test_light_rgb", "on,,off,255-255-255")
# color temp should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("color_temp") == 215
# bad color values
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,a-b-c")
# color should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("rgb_color") == (255, 255, 255)
# bad white value values
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,off,255-255-255")
# white value should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("white_value") == 222
# bad effect value
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,a-b-c,white")
# effect should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("effect") == "rainbow"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one light per unique_id."""
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test_topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, light.DOMAIN, config)
async def test_discovery_removal(hass, mqtt_mock, caplog):
"""Test removal of discovered mqtt_json lights."""
data = (
'{ "name": "test",'
' "schema": "template",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_update_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
data2 = (
'{ "name": "Milk",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
await help_test_discovery_update(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
with patch(
"homeassistant.components.mqtt.light.schema_template.MqttLightTemplate.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, light.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"state_template": '{{ value.split(",")[0] }}',
}
}
await help_test_entity_debug_info_message(hass, mqtt_mock, light.DOMAIN, config)
async def test_max_mireds(hass, mqtt_mock):
"""Test setting min_mireds and max_mireds."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_max_mireds/set",
"command_on_template": "on",
"command_off_template": "off",
"color_temp_template": "{{ value }}",
"max_mireds": 370,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 370
| apache-2.0 |
dpetzold/django | django/conf/locale/en_GB/formats.py | 504 | 2117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j M Y' # '25 Oct 2006'
TIME_FORMAT = 'P' # '2:30 p.m.'
DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 p.m.'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'j F' # '25 October'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 p.m.'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
strets123/rdkit | rdkit/Chem/Draw/qtCanvas.py | 4 | 3963 | # $Id$
#
# Copyright (C) 2014 Seiji Matsuoka
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit.Chem.Draw.canvasbase import CanvasBase
from PySide import QtGui, QtCore
class Canvas(CanvasBase):
def __init__(self, size):
self.size = size
self.qsize = QtCore.QSize(*size)
self.pixmap = QtGui.QPixmap(self.qsize)
self.painter = QtGui.QPainter(self.pixmap)
self.painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
self.painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, True)
self.painter.fillRect(0, 0, size[0], size[1], QtCore.Qt.white)
def addCanvasLine(self, p1, p2, color=(0, 0, 0), color2=None, **kwargs):
if 'dash' in kwargs:
line_type = QtCore.Qt.DashLine
else:
line_type = QtCore.Qt.SolidLine
qp1 = QtCore.QPointF(*p1)
qp2 = QtCore.QPointF(*p2)
qpm = QtCore.QPointF((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2)
if color2 and color2 != color:
rgb = [int(c * 255) for c in color]
pen = QtGui.QPen(QtGui.QColor(*rgb), 1, line_type)
self.painter.setPen(pen)
self.painter.drawLine(qp1, qpm)
rgb2 = [int(c * 255) for c in color2]
pen.setColor(QtGui.QColor(*rgb2))
self.painter.setPen(pen)
self.painter.drawLine(qpm, qp2)
else:
rgb = [int(c * 255) for c in color]
pen = QtGui.QPen(QtGui.QColor(*rgb), 1, line_type)
self.painter.setPen(pen)
self.painter.drawLine(qp1, qp2)
def addCanvasText(self, text, pos, font, color=(0, 0, 0), **kwargs):
orientation = kwargs.get('orientation', 'E')
qfont = QtGui.QFont("Helvetica", font.size * 1.5)
qtext = QtGui.QTextDocument()
qtext.setDefaultFont(qfont)
colored = [int(c * 255) for c in color]
colored.append(text)
html_format = "<span style='color:rgb({},{},{})'>{}</span>"
formatted = html_format.format(*colored)
qtext.setHtml(formatted)
if orientation == 'N':
qpos = QtCore.QPointF(pos[0] - qtext.idealWidth() / 2,
pos[1] - font.size)
elif orientation == 'W':
qpos = QtCore.QPointF(pos[0] - qtext.idealWidth() + font.size,
pos[1] - font.size)
else:
qpos = QtCore.QPointF(pos[0] - font.size, pos[1] - font.size)
self.painter.save()
self.painter.translate(qpos)
qtext.drawContents(self.painter)
self.painter.restore()
return font.size * 1.8, font.size * 1.8, 0
def addCanvasPolygon(self, ps, color=(0, 0, 0), fill=True,
stroke=False, **kwargs):
polygon = QtGui.QPolygonF()
for ver in ps:
polygon.append(QtCore.QPointF(*ver))
pen = QtGui.QPen(QtGui.QColor(*color), 1, QtCore.Qt.SolidLine)
self.painter.setPen(pen)
self.painter.setBrush(QtGui.QColor(0, 0, 0))
self.painter.drawPolygon(polygon)
def addCanvasDashedWedge(self, p1, p2, p3, dash=(2, 2), color=(0, 0, 0),
color2=None, **kwargs):
rgb = [int(c * 255) for c in color]
pen = QtGui.QPen(QtGui.QColor(*rgb), 1, QtCore.Qt.SolidLine)
self.painter.setPen(pen)
dash = (4, 4)
pts1 = self._getLinePoints(p1, p2, dash)
pts2 = self._getLinePoints(p1, p3, dash)
if len(pts2) < len(pts1):
pts2, pts1 = pts1, pts2
for i in range(len(pts1)):
qp1 = QtCore.QPointF(pts1[i][0], pts1[i][1])
qp2 = QtCore.QPointF(pts2[i][0], pts2[i][1])
self.painter.drawLine(qp1, qp2)
def flush(self):
self.painter.end()
| bsd-3-clause |
charlesccychen/incubator-beam | sdks/python/apache_beam/runners/worker/sdk_worker_main.py | 3 | 7687 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK Fn Harness entry point."""
from __future__ import absolute_import
import http.server
import json
import logging
import os
import re
import sys
import threading
import traceback
from builtins import object
from google.protobuf import text_format
from apache_beam.internal import pickler
from apache_beam.options import pipeline_options
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.internal import names
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import profiler
# This module is experimental. No backwards-compatibility guarantees.
class StatusServer(object):
@classmethod
def get_thread_dump(cls):
lines = []
frames = sys._current_frames() # pylint: disable=protected-access
for t in threading.enumerate():
lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name))
lines.append(''.join(traceback.format_stack(frames[t.ident])))
return lines
def start(self, status_http_port=0):
"""Executes the serving loop for the status server.
Args:
status_http_port(int): Binding port for the debug server.
Default is 0 which means any free unsecured port
"""
class StatusHttpHandler(http.server.BaseHTTPRequestHandler):
"""HTTP handler for serving stacktraces of all threads."""
def do_GET(self): # pylint: disable=invalid-name
"""Return all thread stacktraces information for GET request."""
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
for line in StatusServer.get_thread_dump():
self.wfile.write(line)
def log_message(self, f, *args):
"""Do not log any messages."""
pass
self.httpd = httpd = http.server.HTTPServer(
('localhost', status_http_port), StatusHttpHandler)
logging.info('Status HTTP server running at %s:%s', httpd.server_name,
httpd.server_port)
httpd.serve_forever()
def main(unused_argv):
"""Main entry point for SDK Fn Harness."""
if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'],
logging_service_descriptor)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
# TODO(BEAM-5468): This should be picked up from pipeline options.
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(fn_log_handler)
logging.info('Logging handler created.')
else:
fn_log_handler = None
# Start status HTTP server thread.
thread = threading.Thread(target=StatusServer().start)
thread.daemon = True
thread.setName('status-server-demon')
thread.start()
if 'PIPELINE_OPTIONS' in os.environ:
sdk_pipeline_options = _parse_pipeline_options(
os.environ['PIPELINE_OPTIONS'])
else:
sdk_pipeline_options = PipelineOptions.from_dictionary({})
if 'SEMI_PERSISTENT_DIRECTORY' in os.environ:
semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY']
else:
semi_persistent_directory = None
logging.info('semi_persistent_directory: %s', semi_persistent_directory)
try:
_load_main_session(semi_persistent_directory)
except Exception: # pylint: disable=broad-except
exception_details = traceback.format_exc()
logging.error(
'Could not load main session: %s', exception_details, exc_info=True)
try:
logging.info('Python sdk harness started with pipeline_options: %s',
sdk_pipeline_options.get_all_options(drop_default=True))
service_descriptor = endpoints_pb2.ApiServiceDescriptor()
text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'],
service_descriptor)
# TODO(robertwb): Support credentials.
assert not service_descriptor.oauth2_client_credentials_grant.url
SdkHarness(
control_address=service_descriptor.url,
worker_count=_get_worker_count(sdk_pipeline_options),
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(pipeline_options.ProfilingOptions))
).run()
logging.info('Python sdk harness exiting.')
except: # pylint: disable=broad-except
logging.exception('Python sdk harness failed: ')
raise
finally:
if fn_log_handler:
fn_log_handler.close()
def _parse_pipeline_options(options_json):
options = json.loads(options_json)
# Check the options field first for backward compatibility.
if 'options' in options:
return PipelineOptions.from_dictionary(options.get('options'))
else:
# Remove extra urn part from the key.
portable_option_regex = r'^beam:option:(?P<key>.*):v1$'
return PipelineOptions.from_dictionary({
re.match(portable_option_regex, k).group('key')
if re.match(portable_option_regex, k) else k: v
for k, v in options.items()
})
def _get_worker_count(pipeline_options):
"""Extract worker count from the pipeline_options.
This defines how many SdkWorkers will be started in this Python process.
And each SdkWorker will have its own thread to process data. Name of the
experimental parameter is 'worker_threads'
Example Usage in the Command Line:
--experimental worker_threads=1
Note: worker_threads is an experimental flag and might not be available in
future releases.
Returns:
an int containing the worker_threads to use. Default is 1
"""
experiments = pipeline_options.view_as(DebugOptions).experiments
experiments = experiments if experiments else []
for experiment in experiments:
# There should only be 1 match so returning from the loop
if re.match(r'worker_threads=', experiment):
return int(
re.match(r'worker_threads=(?P<worker_threads>.*)',
experiment).group('worker_threads'))
return 12
def _load_main_session(semi_persistent_directory):
"""Loads a pickled main session from the path specified."""
if semi_persistent_directory:
session_file = os.path.join(semi_persistent_directory, 'staged',
names.PICKLED_MAIN_SESSION_FILE)
if os.path.isfile(session_file):
pickler.load_session(session_file)
else:
logging.warning(
'No session file found: %s. Functions defined in __main__ '
'(interactive session) may fail.', session_file)
else:
logging.warning(
'No semi_persistent_directory found: Functions defined in __main__ '
'(interactive session) may fail.')
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
nparley/mylatitude | lib/chardet/sjisprober.py | 290 | 3774 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJIS_SM_MODEL
from .enums import ProbingState, MachineState
class SJISProber(MultiByteCharSetProber):
def __init__(self):
super(SJISProber, self).__init__()
self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
self.distribution_analyzer = SJISDistributionAnalysis()
self.context_analyzer = SJISContextAnalysis()
self.reset()
def reset(self):
super(SJISProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return self.context_analyzer.charset_name
@property
def language(self):
return "Japanese"
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char[2 - char_len:],
char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
- char_len], char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
| mit |
chromium2014/src | ppapi/generators/idl_diff.py | 180 | 9073 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import subprocess
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
#
# IDLDiff
#
# IDLDiff is a tool for comparing sets of IDL generated header files
# with the standard checked in headers. It does this by capturing the
# output of the standard diff tool, parsing it into separate changes, then
# ignoring changes that are know to be safe, such as adding or removing
# blank lines, etc...
#
Option('gen', 'IDL generated files', default='hdir')
Option('src', 'Original ".h" files', default='../c')
Option('halt', 'Stop if a difference is found')
Option('diff', 'Directory holding acceptable diffs', default='diff')
Option('ok', 'Write out the diff file.')
# Change
#
# A Change object contains the previous lines, new news and change type.
#
class Change(object):
def __init__(self, mode, was, now):
self.mode = mode
self.was = was
self.now = now
def Dump(self):
if not self.was:
print 'Adding %s' % self.mode
elif not self.now:
print 'Missing %s' % self.mode
else:
print 'Modifying %s' % self.mode
for line in self.was:
print 'src: >>%s<<' % line
for line in self.now:
print 'gen: >>%s<<' % line
print
#
# IsCopyright
#
# Return True if this change is only a one line change in the copyright notice
# such as non-matching years.
#
def IsCopyright(change):
if len(change.now) != 1 or len(change.was) != 1: return False
if 'Copyright (c)' not in change.now[0]: return False
if 'Copyright (c)' not in change.was[0]: return False
return True
#
# IsBlankComment
#
# Return True if this change only removes a blank line from a comment
#
def IsBlankComment(change):
if change.now: return False
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
return True
#
# IsBlank
#
# Return True if this change only adds or removes blank lines
#
def IsBlank(change):
for line in change.now:
if line: return False
for line in change.was:
if line: return False
return True
#
# IsCppComment
#
# Return True if this change only going from C++ to C style
#
def IsToCppComment(change):
if not len(change.now) or len(change.now) != len(change.was):
return False
for index in range(len(change.now)):
was = change.was[index].strip()
if was[:2] != '//':
return False
was = was[2:].strip()
now = change.now[index].strip()
if now[:2] != '/*':
return False
now = now[2:-2].strip()
if now != was:
return False
return True
return True
def IsMergeComment(change):
if len(change.was) != 1: return False
if change.was[0].strip() != '*': return False
for line in change.now:
stripped = line.strip()
if stripped != '*' and stripped[:2] != '/*' and stripped[-2:] != '*/':
return False
return True
#
# IsSpacing
#
# Return True if this change is only different in the way 'words' are spaced
# such as in an enum:
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
# vs
# ENUM_XXX = 1,
# ENUM_XYY_Y = 2,
#
def IsSpacing(change):
if len(change.now) != len(change.was): return False
for i in range(len(change.now)):
# Also ignore right side comments
line = change.was[i]
offs = line.find('//')
if offs == -1:
offs = line.find('/*')
if offs >-1:
line = line[:offs-1]
words1 = change.now[i].split()
words2 = line.split()
if words1 != words2: return False
return True
#
# IsInclude
#
# Return True if change has extra includes
#
def IsInclude(change):
for line in change.was:
if line.strip().find('struct'): return False
for line in change.now:
if line and '#include' not in line: return False
return True
#
# IsCppComment
#
# Return True if the change is only missing C++ comments
#
def IsCppComment(change):
if len(change.now): return False
for line in change.was:
line = line.strip()
if line[:2] != '//': return False
return True
#
# ValidChange
#
# Return True if none of the changes does not patch an above "bogus" change.
#
def ValidChange(change):
if IsToCppComment(change): return False
if IsCopyright(change): return False
if IsBlankComment(change): return False
if IsMergeComment(change): return False
if IsBlank(change): return False
if IsSpacing(change): return False
if IsInclude(change): return False
if IsCppComment(change): return False
return True
#
# Swapped
#
# Check if the combination of last + next change signals they are both
# invalid such as swap of line around an invalid block.
#
def Swapped(last, next):
if not last.now and not next.was and len(last.was) == len(next.now):
cnt = len(last.was)
for i in range(cnt):
match = True
for j in range(cnt):
if last.was[j] != next.now[(i + j) % cnt]:
match = False
break;
if match: return True
if not last.was and not next.now and len(last.now) == len(next.was):
cnt = len(last.now)
for i in range(cnt):
match = True
for j in range(cnt):
if last.now[i] != next.was[(i + j) % cnt]:
match = False
break;
if match: return True
return False
def FilterLinesIn(output):
was = []
now = []
filter = []
for index in range(len(output)):
filter.append(False)
line = output[index]
if len(line) < 2: continue
if line[0] == '<':
if line[2:].strip() == '': continue
was.append((index, line[2:]))
elif line[0] == '>':
if line[2:].strip() == '': continue
now.append((index, line[2:]))
for windex, wline in was:
for nindex, nline in now:
if filter[nindex]: continue
if filter[windex]: continue
if wline == nline:
filter[nindex] = True
filter[windex] = True
if GetOption('verbose'):
print "Found %d, %d >>%s<<" % (windex + 1, nindex + 1, wline)
out = []
for index in range(len(output)):
if not filter[index]:
out.append(output[index])
return out
#
# GetChanges
#
# Parse the output into discrete change blocks.
#
def GetChanges(output):
# Split on lines, adding an END marker to simply add logic
lines = output.split('\n')
lines = FilterLinesIn(lines)
lines.append('END')
changes = []
was = []
now = []
mode = ''
last = None
for line in lines:
# print "LINE=%s" % line
if not line: continue
elif line[0] == '<':
if line[2:].strip() == '': continue
# Ignore prototypes
if len(line) > 10:
words = line[2:].split()
if len(words) == 2 and words[1][-1] == ';':
if words[0] == 'struct' or words[0] == 'union':
continue
was.append(line[2:])
elif line[0] == '>':
if line[2:].strip() == '': continue
if line[2:10] == '#include': continue
now.append(line[2:])
elif line[0] == '-':
continue
else:
change = Change(line, was, now)
was = []
now = []
if ValidChange(change):
changes.append(change)
if line == 'END':
break
return FilterChanges(changes)
def FilterChanges(changes):
if len(changes) < 2: return changes
out = []
filter = [False for change in changes]
for cur in range(len(changes)):
for cmp in range(cur+1, len(changes)):
if filter[cmp]:
continue
if Swapped(changes[cur], changes[cmp]):
filter[cur] = True
filter[cmp] = True
for cur in range(len(changes)):
if filter[cur]: continue
out.append(changes[cur])
return out
def Main(args):
filenames = ParseOptions(args)
if not filenames:
gendir = os.path.join(GetOption('gen'), '*.h')
filenames = sorted(glob.glob(gendir))
srcdir = os.path.join(GetOption('src'), '*.h')
srcs = sorted(glob.glob(srcdir))
for name in srcs:
name = os.path.split(name)[1]
name = os.path.join(GetOption('gen'), name)
if name not in filenames:
print 'Missing: %s' % name
for filename in filenames:
gen = filename
filename = filename[len(GetOption('gen')) + 1:]
src = os.path.join(GetOption('src'), filename)
diff = os.path.join(GetOption('diff'), filename)
p = subprocess.Popen(['diff', src, gen], stdout=subprocess.PIPE)
output, errors = p.communicate()
try:
input = open(diff, 'rt').read()
except:
input = ''
if input != output:
changes = GetChanges(output)
else:
changes = []
if changes:
print "\n\nDelta between:\n src=%s\n gen=%s\n" % (src, gen)
for change in changes:
change.Dump()
print 'Done with %s\n\n' % src
if GetOption('ok'):
open(diff, 'wt').write(output)
if GetOption('halt'):
return 1
else:
print "\nSAME:\n src=%s\n gen=%s" % (src, gen)
if input: print ' ** Matched expected diff. **'
print '\n'
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
goriccardo/pylibconfig | test/testio.py | 1 | 3930 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Riccardo Gori <[email protected]>
# License: GPL-3 http://www.gnu.org/licenses/gpl.txt
def main():
read_test()
write_test()
def read_test():
from pylibconfig import libconfigFile
test = libconfigFile('test/cfgfiles/test.cfg')
appwin = test.application.window
assert appwin.title == "My Application"
assert appwin.size.w == 640
assert appwin.size.h == 480
assert appwin.pos.x == 350
assert appwin.pos.y == 250
app = test.application
assert app.a == 5
assert app.b == 6
assert app.ff == 1e6
assert test.get('application.test-comment') == "/* hello\n \"there\"*/"
assert test.get('application.test-long-string') == "A very long string that spans multiple lines. " \
"Adjacent strings are automatically concatenated."
assert test.get('application.test-escaped-string') == "\"This is\n a test.\""
gr1 = test.application.group1
assert gr1.x == 5
assert gr1.y == 10
assert gr1.my_array == range(10,23)
assert gr1.flag == True
assert gr1.group2.zzz == "this is a test"
assert gr1.states == ["CT", "CA", "TX", "NV", "FL"]
assert test.binary == [0xAA, 0xBB, 0xCC]
#TODO: Not working tests!
#assert test.get('list') == [ ["abc", 123, True], 1.234, [], [1,2,3]]
#assert test.books
msc = test.misc
assert msc.port == 5000
assert msc.pi == 3.14159265
assert msc.enabled == False
assert msc.mask == 0xAABBCCDD
assert msc.unicode == "STARGΛ̊TE SG-1"
assert msc.bigint == 9223372036854775807
assert msc.bighex == 0x1122334455667788
def write_test():
from pylibconfig import libconfigFile
test = libconfigFile('/tmp/pylibconfigtest.cfg', True)
#Test strings
assert test.set('string.short', 'hi', True)
lls = """Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Vestibulum fermentum, eros nec lacinia convallis, lectus est consequat
tellus, in condimentum risus metus èu tellus. Suspendisse potenti.
Proin bibendum, sapién at feugiat auctor, turpis nisi molestie lorem,
at pretium dolor leo vel eros. In commodo ultricies tortor at sagittis.
Vestibulum at nunc vel mi adipiscing dapibùs. Morbi velit justo,
luctus congue commodo eget, sodales non màssa. Mauris ac mauris sem.
Integer semper fermentum suscipit. Nunc eu purus urna.
Nam nec ultrices urna. Quisque eu mauris egestas nisl faucibus semper
eget malesuada purus. Etìam dignissim ligula at tellus consequat aliquam.
Nam hendrerit, magna ac placerat tincidunt, lorem liberò laoreet lacus,
nec tempor tellus eros a odio. Integer lectus nisi, ultricies ut rutrum
sed, sodales in quam.
"""
assert test.set('string.long-long-string', lls, True)
assert test.set('math.unicode', '⨕⨘dA≉⥁ℜ', True)
#Test numbers
assert test.set('math.integer', -3400, True)
assert test.set('math.smallfact', reduce(lambda x,y:x*y, range(1,10)), True)
hugeness = 21
assert test.set('math.hugefact', reduce(lambda x,y:x*y, range(1,hugeness)), True)
assert test.math.hugefact == reduce(lambda x,y:x*y, range(1,hugeness))
#TODO: solve problems with longlong integers
assert test.set('math.floats.small', 1.452e-16, True)
assert test.set('math.floats.big', 140301e156, True)
#Test bools
assert test.set('math.is.a.nice.thing.right.question.mark', True, add=True)
assert test.set('The.Cretans.are.always.liars', False, add=True)
#Test lists and arrays
assert test.set('math.fibonacci', [1,2,3,5,8,13], True)
assert test.set('personal.mynames', ['John', 'Patrick', 'Michel', 'Jack'], True)
assert test.set('personal.mynames', ['Richard', 'Lagrange'])
assert test.get('personal.mynames') == ['Richard', 'Lagrange']
#Dump the file
test.write()
if __name__ == '__main__':
main()
| lgpl-3.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/pip-7.1.0-py3.4.egg/pip/_vendor/html5lib/treebuilders/dom.py | 920 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| mit |
brandonPurvis/osf.io | website/addons/figshare/messages.py | 24 | 2988 | # MODEL MESSAGES :model.py
BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS = 'Warning: This OSF {category} is private but figshare project {project_id} may contain some public files or filesets.'
BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS = 'Warning: This OSF {category} is public but figshare project {project_id} may contain some private files or filesets.'
BEFORE_PAGE_LOAD_PERM_MISMATCH = 'Warning: This OSF {category} is {node_perm}, but the figshare article {figshare_id} is {figshare_perm}. '
BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS = 'Users can view the contents of this private fishare article. '
BEFORE_REMOVE_CONTRIBUTOR = 'The figshare add-on for this {category} is authenticated by {user}. Removing this user will also remove write access to the {category} unless another contributor re-authenticates. '
BEFORE_FORK_OWNER = 'Because you have authenticated the fishare add-on for this {category}, forking it will also transfer your authorization to the forked {category}. '
BEFORE_FORK_NOT_OWNER = 'Because this figshare add-on has been authenticated by a different user, forking it will not transfer authentication to the forked {category}. '
AFTER_FORK_OWNER = 'figshare authorization copied to forked {category}. '
AFTER_FORK_NOT_OWNER = 'figshare authorization not copied to forked {category}. You may authorize this fork on the <u><a href={url}>Settings</a></u> page. '
BEFORE_REGISTER = 'The contents of figshare projects cannot be registered at this time. The figshare data associated with this {category} will not be included as part of this registration. '
# END MODEL MESSAGES
# MFR MESSAGES :views/crud.py
FIGSHARE_VIEW_FILE_PRIVATE = 'Since this figshare file is unpublished we cannot render it. In order to access this content you will need to log into the <u><a href="{url}">figshare page</a></u> and view it there. '
FIGSHARE_VIEW_FILE_OVERSIZED = 'This figshare file is too large to render; <u><a href="{url}">download file</a></u> to view it. '
'''
Publishing this article is an irreversible operation. Once a figshare article is published it can never be deleted. Proceed with caution.
<br /><br />
Also, figshare requires some additional info before this article can be published: <br />
<form id='figsharePublishForm' action='${nodeApiUrl}figshare/publish/article/${parent_id}/'>
<h3><label><Title></label></h3>
<input name='title' type='text' value='${figshare_title}'>
<h3><label>Category:</label></h3>
<select name='cat' id='figshareCategory' value='${figshare_category}'>${figshare_categories}</select><br />
<h3><label>Tag(s):</label></h3>
<input name='tags' type='text' value='${figshare_tags}' placeholder='e.g. neuroscience, cognition'><br />
<h3><label>Description</label></h3>
<textarea name='description' placeholder='Please type a description of this file here'>${figshare_desc}</textarea>
</form>
'''
OAUTH_INVALID = 'Your OAuth key for figshare is no longer valid. Please re-authenticate. '
# END MFR MESSAGES
| apache-2.0 |
Gussy/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/applelink.py | 61 | 2851 | """SCons.Tool.applelink
Tool-specific initialization for the Apple gnu-like linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/applelink.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
# Even though the Mac is based on the GNU toolchain, it doesn't understand
# the -rpath option, so we use the "link" tool instead of "gnulink".
import link
def generate(env):
"""Add Builders and construction variables for applelink to an
Environment."""
link.generate(env)
env['FRAMEWORKPATHPREFIX'] = '-F'
env['_FRAMEWORKPATH'] = '${_concat(FRAMEWORKPATHPREFIX, FRAMEWORKPATH, "", __env__)}'
env['_FRAMEWORKS'] = '${_concat("-framework ", FRAMEWORKS, "", __env__)}'
env['LINKCOM'] = env['LINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -dynamiclib')
env['SHLINKCOM'] = env['SHLINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
# override the default for loadable modules, which are different
# on OS X than dynamic shared libs. echoing what XCode does for
# pre/suffixes:
env['LDMODULEPREFIX'] = ''
env['LDMODULESUFFIX'] = ''
env['LDMODULEFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -bundle')
env['LDMODULECOM'] = '$LDMODULE -o ${TARGET} $LDMODULEFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
def exists(env):
return env['PLATFORM'] == 'darwin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
webostin/django-btc | tests/aggregation_regress/tests.py | 9 | 48568 | from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.core.exceptions import FieldError
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (Author, Book, Publisher, Clues, Entries, HardbackBook,
ItemTag, WithManualPK, Alfa, Bravo, Charlie)
class AggregationTests(TestCase):
fixtures = ["aggregation_regress.json"]
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost': 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost': 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page': 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page': 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by()
grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping([], [])
self.assertEqual(len(grouping), 1)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'contact' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_book', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class AggregationOnRelationTest(TestCase):
def setUp(self):
self.a = Author.objects.create(name='Anssi', age=33)
self.p = Publisher.objects.create(name='Manning', num_awards=3)
Book.objects.create(isbn='asdf', name='Foo', pages=10, rating=0.1, price="0.0",
contact=self.a, publisher=self.p, pubdate=datetime.date.today())
def test_annotate_on_relation(self):
qs = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name'))
self.assertEqual(qs[0].avg_price, 0.0)
self.assertEqual(qs[0].publisher_name, "Manning")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 3)
Book.objects.create(isbn='asdf', name='Foo', pages=10, rating=0.1, price="0.0",
contact=self.a, publisher=self.p, pubdate=datetime.date.today())
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 6)
| bsd-3-clause |
gorcz/mercurial | mercurial/bundle2.py | 2 | 45956 | # bundle2.py - generic container format to transmit arbitrary data.
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Handling of the new bundle2 format
The goal of bundle2 is to act as an atomically packet to transmit a set of
payloads in an application agnostic way. It consist in a sequence of "parts"
that will be handed to and processed by the application layer.
General format architecture
===========================
The format is architectured as follow
- magic string
- stream level parameters
- payload parts (any number)
- end of stream marker.
the Binary format
============================
All numbers are unsigned and big-endian.
stream level parameters
------------------------
Binary format is as follow
:params size: int32
The total number of Bytes used by the parameters
:params value: arbitrary number of Bytes
A blob of `params size` containing the serialized version of all stream level
parameters.
The blob contains a space separated list of parameters. Parameters with value
are stored in the form `<name>=<value>`. Both name and value are urlquoted.
Empty name are obviously forbidden.
Name MUST start with a letter. If this first letter is lower case, the
parameter is advisory and can be safely ignored. However when the first
letter is capital, the parameter is mandatory and the bundling process MUST
stop if he is not able to proceed it.
Stream parameters use a simple textual format for two main reasons:
- Stream level parameters should remain simple and we want to discourage any
crazy usage.
- Textual data allow easy human inspection of a bundle2 header in case of
troubles.
Any Applicative level options MUST go into a bundle2 part instead.
Payload part
------------------------
Binary format is as follow
:header size: int32
The total number of Bytes used by the part headers. When the header is empty
(size = 0) this is interpreted as the end of stream marker.
:header:
The header defines how to interpret the part. It contains two piece of
data: the part type, and the part parameters.
The part type is used to route an application level handler, that can
interpret payload.
Part parameters are passed to the application level handler. They are
meant to convey information that will help the application level object to
interpret the part payload.
The binary format of the header is has follow
:typesize: (one byte)
:parttype: alphanumerical part name (restricted to [a-zA-Z0-9_:-]*)
:partid: A 32bits integer (unique in the bundle) that can be used to refer
to this part.
:parameters:
Part's parameter may have arbitrary content, the binary structure is::
<mandatory-count><advisory-count><param-sizes><param-data>
:mandatory-count: 1 byte, number of mandatory parameters
:advisory-count: 1 byte, number of advisory parameters
:param-sizes:
N couple of bytes, where N is the total number of parameters. Each
couple contains (<size-of-key>, <size-of-value) for one parameter.
:param-data:
A blob of bytes from which each parameter key and value can be
retrieved using the list of size couples stored in the previous
field.
Mandatory parameters comes first, then the advisory ones.
Each parameter's key MUST be unique within the part.
:payload:
payload is a series of `<chunksize><chunkdata>`.
`chunksize` is an int32, `chunkdata` are plain bytes (as much as
`chunksize` says)` The payload part is concluded by a zero size chunk.
The current implementation always produces either zero or one chunk.
This is an implementation limitation that will ultimately be lifted.
`chunksize` can be negative to trigger special case processing. No such
processing is in place yet.
Bundle processing
============================
Each part is processed in order using a "part handler". Handler are registered
for a certain part type.
The matching of a part to its handler is case insensitive. The case of the
part type is used to know if a part is mandatory or advisory. If the Part type
contains any uppercase char it is considered mandatory. When no handler is
known for a Mandatory part, the process is aborted and an exception is raised.
If the part is advisory and no handler is known, the part is ignored. When the
process is aborted, the full bundle is still read from the stream to keep the
channel usable. But none of the part read from an abort are processed. In the
future, dropping the stream may become an option for channel we do not care to
preserve.
"""
import errno
import sys
import util
import struct
import urllib
import string
import obsolete
import pushkey
import url
import re
import changegroup, error
from i18n import _
_pack = struct.pack
_unpack = struct.unpack
_fstreamparamsize = '>i'
_fpartheadersize = '>i'
_fparttypesize = '>B'
_fpartid = '>I'
_fpayloadsize = '>i'
_fpartparamcount = '>BB'
preferedchunksize = 4096
_parttypeforbidden = re.compile('[^a-zA-Z0-9_:-]')
def validateparttype(parttype):
"""raise ValueError if a parttype contains invalid character"""
if _parttypeforbidden.search(parttype):
raise ValueError(parttype)
def _makefpartparamsizes(nbparams):
"""return a struct format to read part parameter sizes
The number parameters is variable so we need to build that format
dynamically.
"""
return '>'+('BB'*nbparams)
parthandlermapping = {}
def parthandler(parttype, params=()):
"""decorator that register a function as a bundle2 part handler
eg::
@parthandler('myparttype', ('mandatory', 'param', 'handled'))
def myparttypehandler(...):
'''process a part of type "my part".'''
...
"""
validateparttype(parttype)
def _decorator(func):
lparttype = parttype.lower() # enforce lower case matching.
assert lparttype not in parthandlermapping
parthandlermapping[lparttype] = func
func.params = frozenset(params)
return func
return _decorator
class unbundlerecords(object):
"""keep record of what happens during and unbundle
New records are added using `records.add('cat', obj)`. Where 'cat' is a
category of record and obj is an arbitrary object.
`records['cat']` will return all entries of this category 'cat'.
Iterating on the object itself will yield `('category', obj)` tuples
for all entries.
All iterations happens in chronological order.
"""
def __init__(self):
self._categories = {}
self._sequences = []
self._replies = {}
def add(self, category, entry, inreplyto=None):
"""add a new record of a given category.
The entry can then be retrieved in the list returned by
self['category']."""
self._categories.setdefault(category, []).append(entry)
self._sequences.append((category, entry))
if inreplyto is not None:
self.getreplies(inreplyto).add(category, entry)
def getreplies(self, partid):
"""get the records that are replies to a specific part"""
return self._replies.setdefault(partid, unbundlerecords())
def __getitem__(self, cat):
return tuple(self._categories.get(cat, ()))
def __iter__(self):
return iter(self._sequences)
def __len__(self):
return len(self._sequences)
def __nonzero__(self):
return bool(self._sequences)
class bundleoperation(object):
"""an object that represents a single bundling process
Its purpose is to carry unbundle-related objects and states.
A new object should be created at the beginning of each bundle processing.
The object is to be returned by the processing function.
The object has very little content now it will ultimately contain:
* an access to the repo the bundle is applied to,
* a ui object,
* a way to retrieve a transaction to add changes to the repo,
* a way to record the result of processing each part,
* a way to construct a bundle response when applicable.
"""
def __init__(self, repo, transactiongetter, captureoutput=True):
self.repo = repo
self.ui = repo.ui
self.records = unbundlerecords()
self.gettransaction = transactiongetter
self.reply = None
self.captureoutput = captureoutput
class TransactionUnavailable(RuntimeError):
pass
def _notransaction():
"""default method to get a transaction while processing a bundle
Raise an exception to highlight the fact that no transaction was expected
to be created"""
raise TransactionUnavailable()
def processbundle(repo, unbundler, transactiongetter=None, op=None):
"""This function process a bundle, apply effect to/from a repo
It iterates over each part then searches for and uses the proper handling
code to process the part. Parts are processed in order.
This is very early version of this function that will be strongly reworked
before final usage.
Unknown Mandatory part will abort the process.
It is temporarily possible to provide a prebuilt bundleoperation to the
function. This is used to ensure output is properly propagated in case of
an error during the unbundling. This output capturing part will likely be
reworked and this ability will probably go away in the process.
"""
if op is None:
if transactiongetter is None:
transactiongetter = _notransaction
op = bundleoperation(repo, transactiongetter)
# todo:
# - replace this is a init function soon.
# - exception catching
unbundler.params
iterparts = unbundler.iterparts()
part = None
try:
for part in iterparts:
_processpart(op, part)
except Exception, exc:
for part in iterparts:
# consume the bundle content
part.seek(0, 2)
# Small hack to let caller code distinguish exceptions from bundle2
# processing from processing the old format. This is mostly
# needed to handle different return codes to unbundle according to the
# type of bundle. We should probably clean up or drop this return code
# craziness in a future version.
exc.duringunbundle2 = True
salvaged = []
if op.reply is not None:
salvaged = op.reply.salvageoutput()
exc._bundle2salvagedoutput = salvaged
raise
return op
def _processpart(op, part):
"""process a single part from a bundle
The part is guaranteed to have been fully consumed when the function exits
(even if an exception is raised)."""
try:
try:
handler = parthandlermapping.get(part.type)
if handler is None:
raise error.UnsupportedPartError(parttype=part.type)
op.ui.debug('found a handler for part %r\n' % part.type)
unknownparams = part.mandatorykeys - handler.params
if unknownparams:
unknownparams = list(unknownparams)
unknownparams.sort()
raise error.UnsupportedPartError(parttype=part.type,
params=unknownparams)
except error.UnsupportedPartError, exc:
if part.mandatory: # mandatory parts
raise
op.ui.debug('ignoring unsupported advisory part %s\n' % exc)
return # skip to part processing
# handler is called outside the above try block so that we don't
# risk catching KeyErrors from anything other than the
# parthandlermapping lookup (any KeyError raised by handler()
# itself represents a defect of a different variety).
output = None
if op.captureoutput and op.reply is not None:
op.ui.pushbuffer(error=True, subproc=True)
output = ''
try:
handler(op, part)
finally:
if output is not None:
output = op.ui.popbuffer()
if output:
outpart = op.reply.newpart('output', data=output,
mandatory=False)
outpart.addparam('in-reply-to', str(part.id), mandatory=False)
finally:
# consume the part content to not corrupt the stream.
part.seek(0, 2)
def decodecaps(blob):
"""decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
capability=value1,value2,value3
The values are always a list."""
caps = {}
for line in blob.splitlines():
if not line:
continue
if '=' not in line:
key, vals = line, ()
else:
key, vals = line.split('=', 1)
vals = vals.split(',')
key = urllib.unquote(key)
vals = [urllib.unquote(v) for v in vals]
caps[key] = vals
return caps
def encodecaps(caps):
"""encode a bundle2 caps dictionary into a bytes blob"""
chunks = []
for ca in sorted(caps):
vals = caps[ca]
ca = urllib.quote(ca)
vals = [urllib.quote(v) for v in vals]
if vals:
ca = "%s=%s" % (ca, ','.join(vals))
chunks.append(ca)
return '\n'.join(chunks)
class bundle20(object):
"""represent an outgoing bundle2 container
Use the `addparam` method to add stream level parameter. and `newpart` to
populate it. Then call `getchunks` to retrieve all the binary chunks of
data that compose the bundle2 container."""
_magicstring = 'HG20'
def __init__(self, ui, capabilities=()):
self.ui = ui
self._params = []
self._parts = []
self.capabilities = dict(capabilities)
@property
def nbparts(self):
"""total number of parts added to the bundler"""
return len(self._parts)
# methods used to defines the bundle2 content
def addparam(self, name, value=None):
"""add a stream level parameter"""
if not name:
raise ValueError('empty parameter name')
if name[0] not in string.letters:
raise ValueError('non letter first character: %r' % name)
self._params.append((name, value))
def addpart(self, part):
"""add a new part to the bundle2 container
Parts contains the actual applicative payload."""
assert part.id is None
part.id = len(self._parts) # very cheap counter
self._parts.append(part)
def newpart(self, typeid, *args, **kwargs):
"""create a new part and add it to the containers
As the part is directly added to the containers. For now, this means
that any failure to properly initialize the part after calling
``newpart`` should result in a failure of the whole bundling process.
You can still fall back to manually create and add if you need better
control."""
part = bundlepart(typeid, *args, **kwargs)
self.addpart(part)
return part
# methods used to generate the bundle2 stream
def getchunks(self):
self.ui.debug('start emission of %s stream\n' % self._magicstring)
yield self._magicstring
param = self._paramchunk()
self.ui.debug('bundle parameter: %s\n' % param)
yield _pack(_fstreamparamsize, len(param))
if param:
yield param
self.ui.debug('start of parts\n')
for part in self._parts:
self.ui.debug('bundle part: "%s"\n' % part.type)
for chunk in part.getchunks():
yield chunk
self.ui.debug('end of bundle\n')
yield _pack(_fpartheadersize, 0)
def _paramchunk(self):
"""return a encoded version of all stream parameters"""
blocks = []
for par, value in self._params:
par = urllib.quote(par)
if value is not None:
value = urllib.quote(value)
par = '%s=%s' % (par, value)
blocks.append(par)
return ' '.join(blocks)
def salvageoutput(self):
"""return a list with a copy of all output parts in the bundle
This is meant to be used during error handling to make sure we preserve
server output"""
salvaged = []
for part in self._parts:
if part.type.startswith('output'):
salvaged.append(part.copy())
return salvaged
class unpackermixin(object):
"""A mixin to extract bytes and struct data from a stream"""
def __init__(self, fp):
self._fp = fp
self._seekable = (util.safehasattr(fp, 'seek') and
util.safehasattr(fp, 'tell'))
def _unpack(self, format):
"""unpack this struct format from the stream"""
data = self._readexact(struct.calcsize(format))
return _unpack(format, data)
def _readexact(self, size):
"""read exactly <size> bytes from the stream"""
return changegroup.readexactly(self._fp, size)
def seek(self, offset, whence=0):
"""move the underlying file pointer"""
if self._seekable:
return self._fp.seek(offset, whence)
else:
raise NotImplementedError(_('File pointer is not seekable'))
def tell(self):
"""return the file offset, or None if file is not seekable"""
if self._seekable:
try:
return self._fp.tell()
except IOError, e:
if e.errno == errno.ESPIPE:
self._seekable = False
else:
raise
return None
def close(self):
"""close underlying file"""
if util.safehasattr(self._fp, 'close'):
return self._fp.close()
def getunbundler(ui, fp, header=None):
"""return a valid unbundler object for a given header"""
if header is None:
header = changegroup.readexactly(fp, 4)
magic, version = header[0:2], header[2:4]
if magic != 'HG':
raise util.Abort(_('not a Mercurial bundle'))
unbundlerclass = formatmap.get(version)
if unbundlerclass is None:
raise util.Abort(_('unknown bundle version %s') % version)
unbundler = unbundlerclass(ui, fp)
ui.debug('start processing of %s stream\n' % header)
return unbundler
class unbundle20(unpackermixin):
"""interpret a bundle2 stream
This class is fed with a binary stream and yields parts through its
`iterparts` methods."""
def __init__(self, ui, fp):
"""If header is specified, we do not read it out of the stream."""
self.ui = ui
super(unbundle20, self).__init__(fp)
@util.propertycache
def params(self):
"""dictionary of stream level parameters"""
self.ui.debug('reading bundle2 stream parameters\n')
params = {}
paramssize = self._unpack(_fstreamparamsize)[0]
if paramssize < 0:
raise error.BundleValueError('negative bundle param size: %i'
% paramssize)
if paramssize:
for p in self._readexact(paramssize).split(' '):
p = p.split('=', 1)
p = [urllib.unquote(i) for i in p]
if len(p) < 2:
p.append(None)
self._processparam(*p)
params[p[0]] = p[1]
return params
def _processparam(self, name, value):
"""process a parameter, applying its effect if needed
Parameter starting with a lower case letter are advisory and will be
ignored when unknown. Those starting with an upper case letter are
mandatory and will this function will raise a KeyError when unknown.
Note: no option are currently supported. Any input will be either
ignored or failing.
"""
if not name:
raise ValueError('empty parameter name')
if name[0] not in string.letters:
raise ValueError('non letter first character: %r' % name)
# Some logic will be later added here to try to process the option for
# a dict of known parameter.
if name[0].islower():
self.ui.debug("ignoring unknown parameter %r\n" % name)
else:
raise error.UnsupportedPartError(params=(name,))
def iterparts(self):
"""yield all parts contained in the stream"""
# make sure param have been loaded
self.params
self.ui.debug('start extraction of bundle2 parts\n')
headerblock = self._readpartheader()
while headerblock is not None:
part = unbundlepart(self.ui, headerblock, self._fp)
yield part
part.seek(0, 2)
headerblock = self._readpartheader()
self.ui.debug('end of bundle2 stream\n')
def _readpartheader(self):
"""reads a part header size and return the bytes blob
returns None if empty"""
headersize = self._unpack(_fpartheadersize)[0]
if headersize < 0:
raise error.BundleValueError('negative part header size: %i'
% headersize)
self.ui.debug('part header size: %i\n' % headersize)
if headersize:
return self._readexact(headersize)
return None
def compressed(self):
return False
formatmap = {'20': unbundle20}
class bundlepart(object):
"""A bundle2 part contains application level payload
The part `type` is used to route the part to the application level
handler.
The part payload is contained in ``part.data``. It could be raw bytes or a
generator of byte chunks.
You can add parameters to the part using the ``addparam`` method.
Parameters can be either mandatory (default) or advisory. Remote side
should be able to safely ignore the advisory ones.
Both data and parameters cannot be modified after the generation has begun.
"""
def __init__(self, parttype, mandatoryparams=(), advisoryparams=(),
data='', mandatory=True):
validateparttype(parttype)
self.id = None
self.type = parttype
self._data = data
self._mandatoryparams = list(mandatoryparams)
self._advisoryparams = list(advisoryparams)
# checking for duplicated entries
self._seenparams = set()
for pname, __ in self._mandatoryparams + self._advisoryparams:
if pname in self._seenparams:
raise RuntimeError('duplicated params: %s' % pname)
self._seenparams.add(pname)
# status of the part's generation:
# - None: not started,
# - False: currently generated,
# - True: generation done.
self._generated = None
self.mandatory = mandatory
def copy(self):
"""return a copy of the part
The new part have the very same content but no partid assigned yet.
Parts with generated data cannot be copied."""
assert not util.safehasattr(self.data, 'next')
return self.__class__(self.type, self._mandatoryparams,
self._advisoryparams, self._data, self.mandatory)
# methods used to defines the part content
def __setdata(self, data):
if self._generated is not None:
raise error.ReadOnlyPartError('part is being generated')
self._data = data
def __getdata(self):
return self._data
data = property(__getdata, __setdata)
@property
def mandatoryparams(self):
# make it an immutable tuple to force people through ``addparam``
return tuple(self._mandatoryparams)
@property
def advisoryparams(self):
# make it an immutable tuple to force people through ``addparam``
return tuple(self._advisoryparams)
def addparam(self, name, value='', mandatory=True):
if self._generated is not None:
raise error.ReadOnlyPartError('part is being generated')
if name in self._seenparams:
raise ValueError('duplicated params: %s' % name)
self._seenparams.add(name)
params = self._advisoryparams
if mandatory:
params = self._mandatoryparams
params.append((name, value))
# methods used to generates the bundle2 stream
def getchunks(self):
if self._generated is not None:
raise RuntimeError('part can only be consumed once')
self._generated = False
#### header
if self.mandatory:
parttype = self.type.upper()
else:
parttype = self.type.lower()
## parttype
header = [_pack(_fparttypesize, len(parttype)),
parttype, _pack(_fpartid, self.id),
]
## parameters
# count
manpar = self.mandatoryparams
advpar = self.advisoryparams
header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
# size
parsizes = []
for key, value in manpar:
parsizes.append(len(key))
parsizes.append(len(value))
for key, value in advpar:
parsizes.append(len(key))
parsizes.append(len(value))
paramsizes = _pack(_makefpartparamsizes(len(parsizes) / 2), *parsizes)
header.append(paramsizes)
# key, value
for key, value in manpar:
header.append(key)
header.append(value)
for key, value in advpar:
header.append(key)
header.append(value)
## finalize header
headerchunk = ''.join(header)
yield _pack(_fpartheadersize, len(headerchunk))
yield headerchunk
## payload
try:
for chunk in self._payloadchunks():
yield _pack(_fpayloadsize, len(chunk))
yield chunk
except Exception, exc:
# backup exception data for later
exc_info = sys.exc_info()
msg = 'unexpected error: %s' % exc
interpart = bundlepart('error:abort', [('message', msg)],
mandatory=False)
interpart.id = 0
yield _pack(_fpayloadsize, -1)
for chunk in interpart.getchunks():
yield chunk
# abort current part payload
yield _pack(_fpayloadsize, 0)
raise exc_info[0], exc_info[1], exc_info[2]
# end of payload
yield _pack(_fpayloadsize, 0)
self._generated = True
def _payloadchunks(self):
"""yield chunks of a the part payload
Exists to handle the different methods to provide data to a part."""
# we only support fixed size data now.
# This will be improved in the future.
if util.safehasattr(self.data, 'next'):
buff = util.chunkbuffer(self.data)
chunk = buff.read(preferedchunksize)
while chunk:
yield chunk
chunk = buff.read(preferedchunksize)
elif len(self.data):
yield self.data
flaginterrupt = -1
class interrupthandler(unpackermixin):
"""read one part and process it with restricted capability
This allows to transmit exception raised on the producer size during part
iteration while the consumer is reading a part.
Part processed in this manner only have access to a ui object,"""
def __init__(self, ui, fp):
super(interrupthandler, self).__init__(fp)
self.ui = ui
def _readpartheader(self):
"""reads a part header size and return the bytes blob
returns None if empty"""
headersize = self._unpack(_fpartheadersize)[0]
if headersize < 0:
raise error.BundleValueError('negative part header size: %i'
% headersize)
self.ui.debug('part header size: %i\n' % headersize)
if headersize:
return self._readexact(headersize)
return None
def __call__(self):
self.ui.debug('bundle2 stream interruption, looking for a part.\n')
headerblock = self._readpartheader()
if headerblock is None:
self.ui.debug('no part found during interruption.\n')
return
part = unbundlepart(self.ui, headerblock, self._fp)
op = interruptoperation(self.ui)
_processpart(op, part)
class interruptoperation(object):
"""A limited operation to be use by part handler during interruption
It only have access to an ui object.
"""
def __init__(self, ui):
self.ui = ui
self.reply = None
self.captureoutput = False
@property
def repo(self):
raise RuntimeError('no repo access from stream interruption')
def gettransaction(self):
raise TransactionUnavailable('no repo access from stream interruption')
class unbundlepart(unpackermixin):
"""a bundle part read from a bundle"""
def __init__(self, ui, header, fp):
super(unbundlepart, self).__init__(fp)
self.ui = ui
# unbundle state attr
self._headerdata = header
self._headeroffset = 0
self._initialized = False
self.consumed = False
# part data
self.id = None
self.type = None
self.mandatoryparams = None
self.advisoryparams = None
self.params = None
self.mandatorykeys = ()
self._payloadstream = None
self._readheader()
self._mandatory = None
self._chunkindex = [] #(payload, file) position tuples for chunk starts
self._pos = 0
def _fromheader(self, size):
"""return the next <size> byte from the header"""
offset = self._headeroffset
data = self._headerdata[offset:(offset + size)]
self._headeroffset = offset + size
return data
def _unpackheader(self, format):
"""read given format from header
This automatically compute the size of the format to read."""
data = self._fromheader(struct.calcsize(format))
return _unpack(format, data)
def _initparams(self, mandatoryparams, advisoryparams):
"""internal function to setup all logic related parameters"""
# make it read only to prevent people touching it by mistake.
self.mandatoryparams = tuple(mandatoryparams)
self.advisoryparams = tuple(advisoryparams)
# user friendly UI
self.params = dict(self.mandatoryparams)
self.params.update(dict(self.advisoryparams))
self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
def _payloadchunks(self, chunknum=0):
'''seek to specified chunk and start yielding data'''
if len(self._chunkindex) == 0:
assert chunknum == 0, 'Must start with chunk 0'
self._chunkindex.append((0, super(unbundlepart, self).tell()))
else:
assert chunknum < len(self._chunkindex), \
'Unknown chunk %d' % chunknum
super(unbundlepart, self).seek(self._chunkindex[chunknum][1])
pos = self._chunkindex[chunknum][0]
payloadsize = self._unpack(_fpayloadsize)[0]
self.ui.debug('payload chunk size: %i\n' % payloadsize)
while payloadsize:
if payloadsize == flaginterrupt:
# interruption detection, the handler will now read a
# single part and process it.
interrupthandler(self.ui, self._fp)()
elif payloadsize < 0:
msg = 'negative payload chunk size: %i' % payloadsize
raise error.BundleValueError(msg)
else:
result = self._readexact(payloadsize)
chunknum += 1
pos += payloadsize
if chunknum == len(self._chunkindex):
self._chunkindex.append((pos,
super(unbundlepart, self).tell()))
yield result
payloadsize = self._unpack(_fpayloadsize)[0]
self.ui.debug('payload chunk size: %i\n' % payloadsize)
def _findchunk(self, pos):
'''for a given payload position, return a chunk number and offset'''
for chunk, (ppos, fpos) in enumerate(self._chunkindex):
if ppos == pos:
return chunk, 0
elif ppos > pos:
return chunk - 1, pos - self._chunkindex[chunk - 1][0]
raise ValueError('Unknown chunk')
def _readheader(self):
"""read the header and setup the object"""
typesize = self._unpackheader(_fparttypesize)[0]
self.type = self._fromheader(typesize)
self.ui.debug('part type: "%s"\n' % self.type)
self.id = self._unpackheader(_fpartid)[0]
self.ui.debug('part id: "%s"\n' % self.id)
# extract mandatory bit from type
self.mandatory = (self.type != self.type.lower())
self.type = self.type.lower()
## reading parameters
# param count
mancount, advcount = self._unpackheader(_fpartparamcount)
self.ui.debug('part parameters: %i\n' % (mancount + advcount))
# param size
fparamsizes = _makefpartparamsizes(mancount + advcount)
paramsizes = self._unpackheader(fparamsizes)
# make it a list of couple again
paramsizes = zip(paramsizes[::2], paramsizes[1::2])
# split mandatory from advisory
mansizes = paramsizes[:mancount]
advsizes = paramsizes[mancount:]
# retrieve param value
manparams = []
for key, value in mansizes:
manparams.append((self._fromheader(key), self._fromheader(value)))
advparams = []
for key, value in advsizes:
advparams.append((self._fromheader(key), self._fromheader(value)))
self._initparams(manparams, advparams)
## part payload
self._payloadstream = util.chunkbuffer(self._payloadchunks())
# we read the data, tell it
self._initialized = True
def read(self, size=None):
"""read payload data"""
if not self._initialized:
self._readheader()
if size is None:
data = self._payloadstream.read()
else:
data = self._payloadstream.read(size)
if size is None or len(data) < size:
self.consumed = True
self._pos += len(data)
return data
def tell(self):
return self._pos
def seek(self, offset, whence=0):
if whence == 0:
newpos = offset
elif whence == 1:
newpos = self._pos + offset
elif whence == 2:
if not self.consumed:
self.read()
newpos = self._chunkindex[-1][0] - offset
else:
raise ValueError('Unknown whence value: %r' % (whence,))
if newpos > self._chunkindex[-1][0] and not self.consumed:
self.read()
if not 0 <= newpos <= self._chunkindex[-1][0]:
raise ValueError('Offset out of range')
if self._pos != newpos:
chunk, internaloffset = self._findchunk(newpos)
self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
adjust = self.read(internaloffset)
if len(adjust) != internaloffset:
raise util.Abort(_('Seek failed\n'))
self._pos = newpos
capabilities = {'HG20': (),
'listkeys': (),
'pushkey': (),
'digests': tuple(sorted(util.DIGESTS.keys())),
'remote-changegroup': ('http', 'https'),
}
def getrepocaps(repo, allowpushback=False):
"""return the bundle2 capabilities for a given repo
Exists to allow extensions (like evolution) to mutate the capabilities.
"""
caps = capabilities.copy()
caps['changegroup'] = tuple(sorted(changegroup.packermap.keys()))
if obsolete.isenabled(repo, obsolete.exchangeopt):
supportedformat = tuple('V%i' % v for v in obsolete.formats)
caps['obsmarkers'] = supportedformat
if allowpushback:
caps['pushback'] = ()
return caps
def bundle2caps(remote):
"""return the bundle capabilities of a peer as dict"""
raw = remote.capable('bundle2')
if not raw and raw != '':
return {}
capsblob = urllib.unquote(remote.capable('bundle2'))
return decodecaps(capsblob)
def obsmarkersversion(caps):
"""extract the list of supported obsmarkers versions from a bundle2caps dict
"""
obscaps = caps.get('obsmarkers', ())
return [int(c[1:]) for c in obscaps if c.startswith('V')]
@parthandler('changegroup', ('version',))
def handlechangegroup(op, inpart):
"""apply a changegroup part on the repo
This is a very early implementation that will massive rework before being
inflicted to any end-user.
"""
# Make sure we trigger a transaction creation
#
# The addchangegroup function will get a transaction object by itself, but
# we need to make sure we trigger the creation of a transaction object used
# for the whole processing scope.
op.gettransaction()
unpackerversion = inpart.params.get('version', '01')
# We should raise an appropriate exception here
unpacker = changegroup.packermap[unpackerversion][1]
cg = unpacker(inpart, 'UN')
# the source and url passed here are overwritten by the one contained in
# the transaction.hookargs argument. So 'bundle2' is a placeholder
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
# This is definitely not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('reply:changegroup', mandatory=False)
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
part.addparam('return', '%i' % ret, mandatory=False)
assert not inpart.read()
_remotechangegroupparams = tuple(['url', 'size', 'digests'] +
['digest:%s' % k for k in util.DIGESTS.keys()])
@parthandler('remote-changegroup', _remotechangegroupparams)
def handleremotechangegroup(op, inpart):
"""apply a bundle10 on the repo, given an url and validation information
All the information about the remote bundle to import are given as
parameters. The parameters include:
- url: the url to the bundle10.
- size: the bundle10 file size. It is used to validate what was
retrieved by the client matches the server knowledge about the bundle.
- digests: a space separated list of the digest types provided as
parameters.
- digest:<digest-type>: the hexadecimal representation of the digest with
that name. Like the size, it is used to validate what was retrieved by
the client matches what the server knows about the bundle.
When multiple digest types are given, all of them are checked.
"""
try:
raw_url = inpart.params['url']
except KeyError:
raise util.Abort(_('remote-changegroup: missing "%s" param') % 'url')
parsed_url = util.url(raw_url)
if parsed_url.scheme not in capabilities['remote-changegroup']:
raise util.Abort(_('remote-changegroup does not support %s urls') %
parsed_url.scheme)
try:
size = int(inpart.params['size'])
except ValueError:
raise util.Abort(_('remote-changegroup: invalid value for param "%s"')
% 'size')
except KeyError:
raise util.Abort(_('remote-changegroup: missing "%s" param') % 'size')
digests = {}
for typ in inpart.params.get('digests', '').split():
param = 'digest:%s' % typ
try:
value = inpart.params[param]
except KeyError:
raise util.Abort(_('remote-changegroup: missing "%s" param') %
param)
digests[typ] = value
real_part = util.digestchecker(url.open(op.ui, raw_url), size, digests)
# Make sure we trigger a transaction creation
#
# The addchangegroup function will get a transaction object by itself, but
# we need to make sure we trigger the creation of a transaction object used
# for the whole processing scope.
op.gettransaction()
import exchange
cg = exchange.readbundle(op.repo.ui, real_part, raw_url)
if not isinstance(cg, changegroup.cg1unpacker):
raise util.Abort(_('%s: not a bundle version 1.0') %
util.hidepassword(raw_url))
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
# This is definitely not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('reply:changegroup')
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
part.addparam('return', '%i' % ret, mandatory=False)
try:
real_part.validate()
except util.Abort, e:
raise util.Abort(_('bundle at %s is corrupted:\n%s') %
(util.hidepassword(raw_url), str(e)))
assert not inpart.read()
@parthandler('reply:changegroup', ('return', 'in-reply-to'))
def handlereplychangegroup(op, inpart):
ret = int(inpart.params['return'])
replyto = int(inpart.params['in-reply-to'])
op.records.add('changegroup', {'return': ret}, replyto)
@parthandler('check:heads')
def handlecheckheads(op, inpart):
"""check that head of the repo did not change
This is used to detect a push race when using unbundle.
This replaces the "heads" argument of unbundle."""
h = inpart.read(20)
heads = []
while len(h) == 20:
heads.append(h)
h = inpart.read(20)
assert not h
if heads != op.repo.heads():
raise error.PushRaced('repository changed while pushing - '
'please try again')
@parthandler('output')
def handleoutput(op, inpart):
"""forward output captured on the server to the client"""
for line in inpart.read().splitlines():
op.ui.status(('remote: %s\n' % line))
@parthandler('replycaps')
def handlereplycaps(op, inpart):
"""Notify that a reply bundle should be created
The payload contains the capabilities information for the reply"""
caps = decodecaps(inpart.read())
if op.reply is None:
op.reply = bundle20(op.ui, caps)
@parthandler('error:abort', ('message', 'hint'))
def handleerrorabort(op, inpart):
"""Used to transmit abort error over the wire"""
raise util.Abort(inpart.params['message'], hint=inpart.params.get('hint'))
@parthandler('error:unsupportedcontent', ('parttype', 'params'))
def handleerrorunsupportedcontent(op, inpart):
"""Used to transmit unknown content error over the wire"""
kwargs = {}
parttype = inpart.params.get('parttype')
if parttype is not None:
kwargs['parttype'] = parttype
params = inpart.params.get('params')
if params is not None:
kwargs['params'] = params.split('\0')
raise error.UnsupportedPartError(**kwargs)
@parthandler('error:pushraced', ('message',))
def handleerrorpushraced(op, inpart):
"""Used to transmit push race error over the wire"""
raise error.ResponseError(_('push failed:'), inpart.params['message'])
@parthandler('listkeys', ('namespace',))
def handlelistkeys(op, inpart):
"""retrieve pushkey namespace content stored in a bundle2"""
namespace = inpart.params['namespace']
r = pushkey.decodekeys(inpart.read())
op.records.add('listkeys', (namespace, r))
@parthandler('pushkey', ('namespace', 'key', 'old', 'new'))
def handlepushkey(op, inpart):
"""process a pushkey request"""
dec = pushkey.decode
namespace = dec(inpart.params['namespace'])
key = dec(inpart.params['key'])
old = dec(inpart.params['old'])
new = dec(inpart.params['new'])
ret = op.repo.pushkey(namespace, key, old, new)
record = {'namespace': namespace,
'key': key,
'old': old,
'new': new}
op.records.add('pushkey', record)
if op.reply is not None:
rpart = op.reply.newpart('reply:pushkey')
rpart.addparam('in-reply-to', str(inpart.id), mandatory=False)
rpart.addparam('return', '%i' % ret, mandatory=False)
@parthandler('reply:pushkey', ('return', 'in-reply-to'))
def handlepushkeyreply(op, inpart):
"""retrieve the result of a pushkey request"""
ret = int(inpart.params['return'])
partid = int(inpart.params['in-reply-to'])
op.records.add('pushkey', {'return': ret}, partid)
@parthandler('obsmarkers')
def handleobsmarker(op, inpart):
"""add a stream of obsmarkers to the repo"""
tr = op.gettransaction()
markerdata = inpart.read()
if op.ui.config('experimental', 'obsmarkers-exchange-debug', False):
op.ui.write(('obsmarker-exchange: %i bytes received\n')
% len(markerdata))
new = op.repo.obsstore.mergemarkers(tr, markerdata)
if new:
op.repo.ui.status(_('%i new obsolescence markers\n') % new)
op.records.add('obsmarkers', {'new': new})
if op.reply is not None:
rpart = op.reply.newpart('reply:obsmarkers')
rpart.addparam('in-reply-to', str(inpart.id), mandatory=False)
rpart.addparam('new', '%i' % new, mandatory=False)
@parthandler('reply:obsmarkers', ('new', 'in-reply-to'))
def handlepushkeyreply(op, inpart):
"""retrieve the result of a pushkey request"""
ret = int(inpart.params['new'])
partid = int(inpart.params['in-reply-to'])
op.records.add('obsmarkers', {'new': ret}, partid)
| gpl-2.0 |
apechimp/servo | tests/wpt/web-platform-tests/webdriver/javascript/execute_script_test.py | 142 | 5724 | import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
from selenium.webdriver.remote.webelement import WebElement
class ExecuteScriptTest(base_test.WebDriverBaseTest):
def test_ecmascript_translates_null_return_to_none(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return null;")
self.assertIsNone(result)
def test_ecmascript_translates_undefined_return_to_none(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("var undef; return undef;")
self.assertIsNone(result)
def test_can_return_numbers_from_scripts(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
self.assertEquals(1, self.driver.execute_script("return 1;"))
self.assertEquals(3.14, self.driver.execute_script("return 3.14;"))
def test_can_return_strings_from_scripts(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
self.assertEquals("hello, world!",
self.driver.execute_script("return 'hello, world!'"))
def test_can_return_booleans_from_scripts(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
self.assertTrue(self.driver.execute_script("return true;"))
self.assertFalse(self.driver.execute_script("return false;"))
def test_can_return_an_array_of_primitives(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return [1, false, null, 3.14]")
self.assertListEqual([1, False, None, 3.14], result)
def test_can_return_nested_arrays(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return [[1, 2, [3]]]")
self.assertIsInstance(result, list)
self.assertEquals(1, len(result))
result = result[0]
self.assertListEqual([1, 2], result[:2])
self.assertListEqual([3], result[2])
def test_can_return_object_literals(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return {}")
self.assertDictEqual({}, result)
result = self.driver.execute_script("return {a: 1, b: false, c: null}")
self.assertDictEqual({
"a": 1,
"b": False,
"c": None
}, result)
def test_can_return_complex_object_literals(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return {a:{b: 'hello'}}")
self.assertIsInstance(result, dict)
self.assertIsInstance(result['a'], dict)
self.assertDictEqual({"b": "hello"}, result["a"])
def test_dom_element_return_value_is_translated_to_a_web_element(self):
self.driver.get(self.webserver.where_is(
"javascript/res/return_document_body.html"))
result = self.driver.execute_script("return document.body")
self.assertEquals(result.text, "Hello, world!")
def test_return_an_array_of_dom_elements(self):
self.driver.get(self.webserver.where_is(
"javascript/res/return_array_of_dom_elements.html"))
result = self.driver.execute_script(
"var nodes = document.getElementsByTagName('div');"
"return [nodes[0], nodes[1]]")
self.assertIsInstance(result, list)
self.assertEquals(2, len(result))
self.assertEquals("a", result[0].text)
self.assertEquals("b", result[1].text)
def test_node_list_return_value_is_translated_to_list_of_web_elements(self):
self.driver.get(self.webserver.where_is(
"javascript/res/return_array_of_dom_elements.html"))
result = self.driver.execute_script(
"return document.getElementsByTagName('div');")
self.assertIsInstance(result, list)
self.assertEquals(2, len(result))
self.assertEquals("a", result[0].text)
self.assertEquals("b", result[1].text)
def test_return_object_literal_with_dom_element_property(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return {a: document.body}")
self.assertIsInstance(result, dict)
self.assertEquals("body", result["a"].tag_name)
def test_scripts_execute_in_anonymous_function_and_do_not_pollute_global_scope(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
self.driver.execute_script("var x = 1;")
self.assertEquals("undefined", self.driver.execute_script("return typeof x;"));
def test_scripts_can_modify_context_window_object(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
self.driver.execute_script("window.x = 1;")
self.assertEquals("number", self.driver.execute_script("return typeof x;"));
self.assertEquals(1, self.driver.execute_script("return x;"));
def test_that_ecmascript_returns_document_title(self):
self.driver.get(self.webserver.where_is("javascript/res/execute_script_test.html"))
result = self.driver.execute_script("return document.title;")
self.assertEquals("executeScript test", result)
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
Lessica/shadowsocks | shadowsocks/shell.py | 652 | 12736 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def print_shadowsocks():
version = ''
try:
import pkg_resources
version = pkg_resources.get_distribution('shadowsocks').version
except Exception:
pass
print('Shadowsocks %s' % version)
def find_config():
config_path = 'config.json'
if os.path.exists(config_path):
return config_path
config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json')
if os.path.exists(config_path):
return config_path
return None
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None) \
and not config.get('manager_address'):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if config.get('server_port', None) and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warn('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if (config.get('method', '') or '').lower() == 'table':
logging.warn('warning: table is not safe; please use a safer cipher, '
'like AES-256-CFB')
if (config.get('method', '') or '').lower() == 'rc4':
logging.warn('warning: RC4 is not safe; please use a safer cipher, '
'like AES-256-CFB')
if config.get('timeout', 300) < 100:
logging.warn('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warn('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
config_path = find_config()
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
if config_path:
logging.info('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(f.read().decode('utf8'))
except ValueError as e:
logging.error('found an error in config.json: %s',
e.message)
sys.exit(1)
else:
config = {}
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value).split(',')
elif key in ('-h', '--help'):
if is_local:
print_local_help()
else:
print_server_help()
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['port_password'] = config.get('port_password', None)
config['timeout'] = int(config.get('timeout', 300))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocks.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocks.log')
config['verbose'] = config.get('verbose', False)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', None)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
| apache-2.0 |
kinpa200296/python_labs | lab2/tests/MyCollectionsTests/FilteredCollectionTests.py | 1 | 1548 | #!/usr/bin/env python
from mycollections import FilteredCollection
import unittest
__author__ = 'kinpa200296'
def get_sequence_of_size(sequence, size):
res = []
for x in sequence:
if size > 0:
res.append(x)
else:
return res
size -= 1
class TestFilteredCollection(unittest.TestCase):
def test_empty_iterable(self):
with self.assertRaises(ValueError):
FilteredCollection([])
def test_not_iterable(self):
with self.assertRaises(TypeError):
FilteredCollection(None)
def test_cycle(self):
a = FilteredCollection(xrange(5))
self.assertEqual(get_sequence_of_size(a, 10), [i for i in xrange(5)] * 2)
def test_filter(self):
a = FilteredCollection(xrange(10))
def odd(x):
return (x & 1) == 1
def even(x):
return (x & 1) == 0
def divisible_by_three(x):
return x % 3 == 0
def divisible_by_four(x):
return x % 4 == 0
self.assertEqual(get_sequence_of_size(a.filter(even), 10), [0, 2, 4, 6, 8] * 2)
self.assertEqual(get_sequence_of_size(a.filter(odd), 10), [1, 3, 5, 7, 9] * 2)
self.assertEqual(get_sequence_of_size(a.filter(odd).filter(divisible_by_three), 10), [3, 9] * 5)
self.assertEqual(get_sequence_of_size(a.filter(even).filter(divisible_by_four), 9), [0, 4, 8] * 3)
suite = unittest.TestLoader().loadTestsFromTestCase(TestFilteredCollection)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
Weicong-Lin/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/encodings/bz2_codec.py | 501 | 2993 | """ Python 'bz2_codec' Codec - bz2 compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Adapted by Raymond Hettinger from zlib_codec.py which was written
by Marc-Andre Lemburg ([email protected]).
"""
import codecs
import bz2 # this codec needs the optional bz2 module !
### Codec APIs
def bz2_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = bz2.compress(input)
return (output, len(input))
def bz2_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = bz2.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return bz2_encode(input, errors)
def decode(self, input, errors='strict'):
return bz2_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = bz2.BZ2Compressor()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = bz2.BZ2Compressor()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = bz2.BZ2Decompressor()
def decode(self, input, final=False):
try:
return self.decompressobj.decompress(input)
except EOFError:
return ''
def reset(self):
self.decompressobj = bz2.BZ2Decompressor()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name="bz2",
encode=bz2_encode,
decode=bz2_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
yewang15215/django | tests/serializers/models/data.py | 9 | 7680 | """
******** Models for test_data.py ***********
The following classes are for testing basic data marshalling, including
NULL values, where allowed.
The basic idea is to have a model for each Django data type.
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from .base import BaseModel
class BinaryData(models.Model):
data = models.BinaryField(null=True)
class BooleanData(models.Model):
data = models.BooleanField(default=False)
class CharData(models.Model):
data = models.CharField(max_length=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class DecimalData(models.Model):
data = models.DecimalField(null=True, decimal_places=3, max_digits=5)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True)
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
class BigIntegerData(models.Model):
data = models.BigIntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class GenericIPAddressData(models.Model):
data = models.GenericIPAddressField(null=True)
class NullBooleanData(models.Model):
data = models.NullBooleanField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class Meta:
ordering = ('id',)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, models.SET_NULL, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor)
class O2OData(models.Model):
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, models.CASCADE, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', models.CASCADE, null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, models.SET_NULL, null=True, to_field='data')
class FKDataToO2O(models.Model):
data = models.ForeignKey(O2OData, models.SET_NULL, null=True)
class M2MIntermediateData(models.Model):
data = models.ManyToManyField(Anchor, through='Intermediate')
class Intermediate(models.Model):
left = models.ForeignKey(M2MIntermediateData, models.CASCADE)
right = models.ForeignKey(Anchor, models.CASCADE)
extra = models.CharField(max_length=30, blank=True, default="doesn't matter")
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True, default=False)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)
# class DatePKData(models.Model):
# data = models.DateField(primary_key=True)
# class DateTimePKData(models.Model):
# data = models.DateTimeField(primary_key=True)
class DecimalPKData(models.Model):
data = models.DecimalField(primary_key=True, decimal_places=3, max_digits=5)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
# class FilePKData(models.Model):
# data = models.FileField(primary_key=True)
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class GenericIPAddressPKData(models.Model):
data = models.GenericIPAddressField(primary_key=True)
# This is just a Boolean field with null=True, and we can't test a PK value of NULL.
# class NullBooleanPKData(models.Model):
# data = models.NullBooleanField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class UUIDData(models.Model):
data = models.UUIDField(primary_key=True)
class FKToUUID(models.Model):
data = models.ForeignKey(UUIDData, models.CASCADE)
# Tests for handling fields with pre_save functions, or
# models with save functions that modify data
class AutoNowDateTimeData(models.Model):
data = models.DateTimeField(null=True, auto_now=True)
class ModifyingSaveData(models.Model):
data = models.IntegerField(null=True)
def save(self, *args, **kwargs):
"""
A save method that modifies the data in the object.
A user-defined save() method isn't called when objects are deserialized
(#4459).
"""
self.data = 666
super(ModifyingSaveData, self).save(*args, **kwargs)
# Tests for serialization of models using inheritance.
# Regression for #7202, #7350
class AbstractBaseModel(models.Model):
parent_data = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel(AbstractBaseModel):
child_data = models.IntegerField()
class InheritBaseModel(BaseModel):
child_data = models.IntegerField()
class ExplicitInheritBaseModel(BaseModel):
parent = models.OneToOneField(BaseModel, models.CASCADE, parent_link=True)
child_data = models.IntegerField()
class LengthModel(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
| bsd-3-clause |
shaufi/odoo | openerp/report/preprocess.py | 443 | 4700 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import re
rml_parents = ['tr','story','section']
html_parents = ['tr','body','div']
sxw_parents = ['{http://openoffice.org/2000/table}table-row','{http://openoffice.org/2000/office}body','{http://openoffice.org/2000/text}section']
odt_parents = ['{urn:oasis:names:tc:opendocument:xmlns:office:1.0}body','{urn:oasis:names:tc:opendocument:xmlns:table:1.0}table-row','{urn:oasis:names:tc:opendocument:xmlns:text:1.0}section']
class report(object):
def preprocess_rml(self, root_node,type='pdf'):
_regex1 = re.compile("\[\[(.*?)(repeatIn\(.*?\s*,\s*[\'\"].*?[\'\"]\s*(?:,\s*(.*?)\s*)?\s*\))(.*?)\]\]")
_regex11= re.compile("\[\[(.*?)(repeatIn\(.*?\s*\(.*?\s*[\'\"].*?[\'\"]\s*\),[\'\"].*?[\'\"](?:,\s*(.*?)\s*)?\s*\))(.*?)\]\]")
_regex2 = re.compile("\[\[(.*?)(removeParentNode\(\s*(?:['\"](.*?)['\"])\s*\))(.*?)\]\]")
_regex3 = re.compile("\[\[\s*(.*?setTag\(\s*['\"](.*?)['\"]\s*,\s*['\"].*?['\"]\s*(?:,.*?)?\).*?)\s*\]\]")
for node in root_node:
if node.tag == etree.Comment:
continue
if node.text or node.tail:
def _sub3(txt):
n = node
while n.tag != txt.group(2):
n = n.getparent()
n.set('rml_tag', txt.group(1))
return "[[ '' ]]"
def _sub2(txt):
if txt.group(3):
n = node
try:
while n.tag != txt.group(3):
n = n.getparent()
except Exception:
n = node
else:
n = node.getparent()
n.set('rml_except', txt.group(0)[2:-2])
return txt.group(0)
def _sub1(txt):
if len(txt.group(4)) > 1:
return " "
match = rml_parents
if type == 'odt':
match = odt_parents
if type == 'sxw':
match = sxw_parents
if type =='html2html':
match = html_parents
if txt.group(3):
group_3 = txt.group(3)
if group_3.startswith("'") or group_3.startswith('"'):
group_3 = group_3[1:-1]
match = [group_3]
n = node
while n.tag not in match:
n = n.getparent()
n.set('rml_loop', txt.group(2))
return '[['+txt.group(1)+"''"+txt.group(4)+']]'
t = _regex1.sub(_sub1, node.text or node.tail)
if t == " ":
t = _regex11.sub(_sub1, node.text or node.tail)
t = _regex3.sub(_sub3, t)
node.text = _regex2.sub(_sub2, t)
self.preprocess_rml(node,type)
return root_node
if __name__=='__main__':
node = etree.XML('''<story>
<para>This is a test[[ setTag('para','xpre') ]]</para>
<blockTable>
<tr>
<td><para>Row 1 [[ setTag('tr','tr',{'style':'TrLevel'+str(a['level']), 'paraStyle':('Level'+str(a['level']))}) ]] </para></td>
<td>Row 2 [[ True and removeParentNode('td') ]] </td>
</tr><tr>
<td>Row 1 [[repeatIn(o.order_line,'o')]] </td>
<td>Row 2</td>
</tr>
</blockTable>
<p>This isa test</p>
</story>''')
a = report()
result = a.preprocess_rml(node)
print etree.tostring(result)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
andrewmegaris/APM_OcPoC_Zynq | Tools/ardupilotwaf/cmake.py | 16 | 13101 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) 2015-2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Waf tool for external builds with cmake. This tool defines the feature
'cmake_build', for building through the cmake interface.
You can use CMAKE_MIN_VERSION environment variable before loading this tool in
the configuration to set a minimum version required for cmake. Example::
def configure(cfg):
cfg.CMAKE_MIN_VERSION = '3.5.2'
cfg.load('cmake')
Usage example::
def build(bld):
# cmake configuration
foo = bld.cmake(
name='foo',
cmake_src='path/to/foosrc', # where is the source tree
cmake_bld='path/to/foobld', # where to generate the build system
cmake_vars=dict(
CMAKE_BUILD_TYPE='Release',
...
),
)
# cmake build for external target 'bar'
bld(
features='cmake_build',
cmake_config='foo', # this build depends on the cmake generation above defined
cmake_target='bar', # what to pass to option --target of cmake
)
# cmake build for target 'baz' (syntactic sugar)
foo.build('baz')
The keys of cmake_vars are sorted so that unnecessary execution is avoided. If
you want to ensure an order in which the variables are passed to cmake, use an
OrderedDict. Example::
def build(bld):
foo_vars = OrderedDict()
foo_vars['CMAKE_BUILD_TYPE'] = 'Release'
foo_vars['FOO'] = 'value_of_foo'
foo_vars['BAR'] = 'value_of_bar'
# cmake configuration
foo = bld.cmake(
cmake_vars=foo_vars,
...
)
There may be cases when you want to establish dependency between other tasks and
the external build system's products (headers and libraries, for example). In
that case, you can specify the specific files in the option 'target' of your
cmake_build task generator. Example::
def build(bld):
...
# declaring on target only what I'm interested in
foo.build('baz', target='path/to/foobld/include/baz.h')
# myprogram.c includes baz.h, so the dependency is (implicitly)
# established
bld.program(target='myprogram', source='myprogram.c')
# another example
foo.build('another', target='another.txt')
bld(
rule='${CP} ${SRC} ${TGT}',
source=bld.bldnode.find_or_declare('another.txt'),
target='another_copied.txt',
)
You can also establish the dependency directly on a task object::
@feature('myfeature')
def process_myfeature(self):
baz_taskgen = self.bld.get_tgen_by_name('baz')
baz_taskgen.post()
# every cmake_build taskgen stores its task in cmake_build_task
baz_task = baz_taskgen.cmake_build_task
tsk = self.create_task('mytask')
tsk.set_run_after(baz_task)
# tsk is run whenever baz_task changes its outputs, namely,
# path/to/foobld/include/baz.h
tsk.dep_nodes.extend(baz_task.outputs)
If your cmake build creates several files (that may be dependency for several
tasks), you can use the parameter cmake_output_patterns. It receives a pattern
or a list of patterns relative to the cmake build directory. After the build
task is run, the files that match those patterns are set as output of the cmake
build task, so that they get a signature. Example::
def build(bld):
...
foo.build('baz', cmake_output_patterns='include/*.h')
...
"""
from waflib import Context, Node, Task, Utils
from waflib.Configure import conf
from waflib.TaskGen import feature, taskgen_method
from collections import OrderedDict
import os
import re
import sys
class cmake_configure_task(Task.Task):
vars = ['CMAKE_BLD_DIR']
run_str = '${CMAKE} ${CMAKE_SRC_DIR} ${CMAKE_VARS} ${CMAKE_GENERATOR_OPTION}'
color = 'BLUE'
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(cmake_configure_task, self).exec_command(cmd, **kw)
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
def u(s):
m.update(s.encode('utf-8'))
u(self.__class__.__name__)
u(self.env.get_flat('CMAKE_SRC_DIR'))
u(self.env.get_flat('CMAKE_BLD_DIR'))
u(self.env.get_flat('CMAKE_VARS'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return self.cmake.name
def keyword(self):
return 'CMake Configure'
# Clean cmake configuration
cmake_configure_task._original_run = cmake_configure_task.run
def _cmake_configure_task_run(self):
cmakecache_path = self.outputs[0].abspath()
if os.path.exists(cmakecache_path):
os.remove(cmakecache_path)
self._original_run()
cmake_configure_task.run = _cmake_configure_task_run
class cmake_build_task(Task.Task):
run_str = '${CMAKE} --build ${CMAKE_BLD_DIR} --target ${CMAKE_TARGET}'
color = 'BLUE'
# the cmake-generated build system is responsible of managing its own
# dependencies
always_run = True
def exec_command(self, cmd, **kw):
kw['stdout'] = sys.stdout
return super(cmake_build_task, self).exec_command(cmd, **kw)
def uid(self):
if not hasattr(self, 'uid_'):
m = Utils.md5()
def u(s):
m.update(s.encode('utf-8'))
u(self.__class__.__name__)
u(self.env.get_flat('CMAKE_BLD_DIR'))
u(self.env.get_flat('CMAKE_TARGET'))
self.uid_ = m.digest()
return self.uid_
def __str__(self):
return '%s %s' % (self.cmake.name, self.cmake_target)
def keyword(self):
return 'CMake Build'
# allow tasks to depend on possible headers or other resources if the user
# declares outputs for the cmake build
cmake_build_task = Task.update_outputs(cmake_build_task)
cmake_build_task.original_post_run = cmake_build_task.post_run
def _cmake_build_task_post_run(self):
self.output_patterns = Utils.to_list(self.output_patterns)
if not self.output_patterns:
return self.original_post_run()
bldnode = self.cmake.bldnode
for node in bldnode.ant_glob(self.output_patterns, remove=False):
self.set_outputs(node)
return self.original_post_run()
cmake_build_task.post_run = _cmake_build_task_post_run
class CMakeConfig(object):
'''
CMake configuration. This object shouldn't be instantiated directly. Use
bld.cmake().
'''
def __init__(self, bld, name, srcnode, bldnode, cmake_vars):
self.bld = bld
self.name = name
self.srcnode = srcnode
self.bldnode = bldnode
self.vars = cmake_vars
self._config_task = None
self.last_build_task = None
def vars_keys(self):
keys = list(self.vars.keys())
if not isinstance(self.vars, OrderedDict):
keys.sort()
return keys
def config_sig(self):
m = Utils.md5()
def u(s):
m.update(s.encode('utf-8'))
u(self.srcnode.abspath())
u(self.bldnode.abspath())
keys = self.vars_keys()
for k in keys:
u(k)
u(self.vars[k])
return m.digest()
def config_task(self, taskgen):
sig = self.config_sig()
if self._config_task and self._config_task.cmake_config_sig == sig:
return self._config_task
self._config_task = taskgen.create_task('cmake_configure_task')
self._config_task.cwd = self.bldnode
self._config_task.cmake = self
self._config_task.cmake_config_sig = sig
env = self._config_task.env
env.CMAKE_BLD_DIR = self.bldnode.abspath()
env.CMAKE_SRC_DIR = self.srcnode.abspath()
keys = self.vars_keys()
env.CMAKE_VARS = ["-D%s='%s'" % (k, self.vars[k]) for k in keys]
self._config_task.set_outputs(
self.bldnode.find_or_declare('CMakeCache.txt'),
)
if self.last_build_task:
self._config_task.set_run_after(self.last_build_task)
self.bldnode.mkdir()
return self._config_task
def build(self, cmake_target, **kw):
return self.bld.cmake_build(self.name, cmake_target, **kw)
_cmake_instances = {}
def get_cmake(name):
if name not in _cmake_instances:
raise Exception('cmake: configuration named "%s" not found' % name)
return _cmake_instances[name]
@conf
def cmake(bld, name, cmake_src=None, cmake_bld=None, cmake_vars={}):
'''
This function has two signatures:
- bld.cmake(name, cmake_src, cmake_bld, cmake_vars):
Create a cmake configuration.
- bld.cmake(name):
Get the cmake configuration with name.
'''
if not cmake_src and not cmake_bld and not cmake_vars:
return get_cmake(name)
if name in _cmake_instances:
bld.fatal('cmake: configuration named "%s" already exists' % name)
if not isinstance(cmake_src, Node.Node):
cmake_src = bld.path.find_dir(cmake_src)
if not cmake_bld:
cmake_bld = cmake_src.get_bld()
elif not isinstance(cmake_bld, Node.Node):
cmake_bld = bld.bldnode.make_node(cmake_bld)
c = CMakeConfig(bld, name, cmake_src, cmake_bld, cmake_vars)
_cmake_instances[name] = c
return c
@feature('cmake_build')
def process_cmake_build(self):
if not hasattr(self, 'cmake_target'):
self.bld.fatal('cmake_build: taskgen is missing cmake_target')
if not hasattr(self, 'cmake_config'):
self.bld.fatal('cmake_build: taskgen is missing cmake_config')
tsk = self.create_cmake_build_task(self.cmake_config, self.cmake_target)
self.cmake_build_task = tsk
outputs = Utils.to_list(getattr(self, 'target', ''))
if not isinstance(outputs, list):
outputs = [outputs]
for o in outputs:
if not isinstance(o, Node.Node):
o = self.path.find_or_declare(o)
tsk.set_outputs(o)
tsk.output_patterns = getattr(self, 'cmake_output_patterns', [])
@conf
def cmake_build(bld, cmake_config, cmake_target, **kw):
kw['cmake_config'] = cmake_config
kw['cmake_target'] = cmake_target
kw['features'] = Utils.to_list(kw.get('features', [])) + ['cmake_build']
if 'name' not in kw:
kw['name'] = '%s_%s' % (cmake_config, cmake_target)
return bld(**kw)
@taskgen_method
def create_cmake_build_task(self, cmake_config, cmake_target):
cmake = get_cmake(cmake_config)
tsk = self.create_task('cmake_build_task')
tsk.cmake = cmake
tsk.cmake_target = cmake_target
tsk.output_patterns = []
tsk.env.CMAKE_BLD_DIR = cmake.bldnode.abspath()
tsk.env.CMAKE_TARGET = cmake_target
self.cmake_config_task = cmake.config_task(self)
tsk.set_run_after(self.cmake_config_task)
if cmake.last_build_task:
tsk.set_run_after(cmake.last_build_task)
cmake.last_build_task = tsk
return tsk
def _check_min_version(cfg):
cfg.start_msg('Checking cmake version')
cmd = cfg.env.get_flat('CMAKE'), '--version'
out = cfg.cmd_and_log(cmd, quiet=Context.BOTH)
m = re.search(r'\d+\.\d+(\.\d+(\.\d+)?)?', out)
if not m:
cfg.end_msg(
'unable to parse version, build is not guaranteed to succeed',
color='YELLOW',
)
else:
version = Utils.num2ver(m.group(0))
minver_str = cfg.env.get_flat('CMAKE_MIN_VERSION')
minver = Utils.num2ver(minver_str)
if version < minver:
cfg.fatal('cmake must be at least at version %s' % minver_str)
cfg.end_msg(m.group(0))
generators = dict(
default=[
(['ninja', 'ninja-build'], 'Ninja'),
(['make'], 'Unix Makefiles'),
],
win32=[
(['ninja', 'ninja-build'], 'Ninja'),
(['nmake'], 'NMake Makefiles'),
],
)
def configure(cfg):
cfg.find_program('cmake')
if cfg.env.CMAKE_MIN_VERSION:
_check_min_version(cfg)
l = generators.get(Utils.unversioned_sys_platform(), generators['default'])
for names, generator in l:
if cfg.find_program(names, mandatory=False):
cfg.env.CMAKE_GENERATOR_OPTION = '-G%s' % generator
break
else:
cfg.fatal("cmake: couldn't find a suitable CMake generator. " +
"The ones supported by this Waf tool for this platform are: %s" % ', '.join(g for _, g in l))
| gpl-3.0 |
aselle/tensorflow | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
BuildingLink/sentry | src/sentry/api/endpoints/organization_index.py | 2 | 7988 | from __future__ import absolute_import
import six
from django.db import IntegrityError, transaction
from django.db.models import Count, Q, Sum
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry import features, options, roles
from sentry.app import ratelimiter
from sentry.api.base import DocSection, Endpoint
from sentry.api.bases.organization import OrganizationPermission
from sentry.api.paginator import DateTimePaginator, OffsetPaginator
from sentry.api.serializers import serialize
from sentry.db.models.query import in_iexact
from sentry.models import (
AuditLogEntryEvent, Organization, OrganizationMember,
OrganizationMemberTeam, OrganizationStatus, ProjectPlatform
)
from sentry.search.utils import tokenize_query
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('ListYourOrganizations')
def list_your_organizations_scenario(runner):
runner.request(
method='GET',
path='/organizations/'
)
class OrganizationSerializer(serializers.Serializer):
name = serializers.CharField(max_length=64, required=True)
slug = serializers.RegexField(r'^[a-z0-9_\-]+$', max_length=50,
required=False)
defaultTeam = serializers.BooleanField(required=False)
class OrganizationIndexEndpoint(Endpoint):
doc_section = DocSection.ORGANIZATIONS
permission_classes = (OrganizationPermission,)
@attach_scenarios([list_your_organizations_scenario])
def get(self, request):
"""
List your Organizations
```````````````````````
Return a list of organizations available to the authenticated
session. This is particularly useful for requests with an
user bound context. For API key based requests this will
only return the organization that belongs to the key.
:qparam bool member: restrict results to organizations which you have
membership
:auth: required
"""
member_only = request.GET.get('member') in ('1', 'true')
queryset = Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
)
if request.auth and not request.user.is_authenticated():
if hasattr(request.auth, 'project'):
queryset = queryset.filter(
id=request.auth.project.organization_id
)
elif request.auth.organization is not None:
queryset = queryset.filter(
id=request.auth.organization.id
)
elif member_only or not request.is_superuser():
queryset = queryset.filter(
id__in=OrganizationMember.objects.filter(
user=request.user,
).values('organization'),
)
query = request.GET.get('query')
if query:
tokens = tokenize_query(query)
for key, value in six.iteritems(tokens):
if key == 'query':
value = ' '.join(value)
queryset = queryset.filter(
Q(name__icontains=value) |
Q(slug__icontains=value) |
Q(members__email__iexact=value)
)
elif key == 'slug':
queryset = queryset.filter(
in_iexact('slug', value)
)
elif key == 'email':
queryset = queryset.filter(
in_iexact('members__email', value)
)
elif key == 'platform':
queryset = queryset.filter(
project__in=ProjectPlatform.objects.filter(
platform__in=value,
).values('project_id')
)
elif key == 'id':
queryset = queryset.filter(id__in=value)
sort_by = request.GET.get('sortBy')
if sort_by == 'members':
queryset = queryset.annotate(
member_count=Count('member_set'),
)
order_by = '-member_count'
paginator_cls = OffsetPaginator
elif sort_by == 'projects':
queryset = queryset.annotate(
project_count=Count('project'),
)
order_by = '-project_count'
paginator_cls = OffsetPaginator
elif sort_by == 'events':
queryset = queryset.annotate(
event_count=Sum('stats__events_24h'),
).filter(
stats__events_24h__isnull=False,
)
order_by = '-event_count'
paginator_cls = OffsetPaginator
else:
order_by = '-date_added'
paginator_cls = DateTimePaginator
return self.paginate(
request=request,
queryset=queryset,
order_by=order_by,
on_results=lambda x: serialize(x, request.user),
paginator_cls=paginator_cls,
)
# XXX: endpoint useless for end-users as it needs user context.
def post(self, request):
"""
Create a New Organization
`````````````````````````
Create a new organization owned by the request's user. To create
an organization only the name is required.
:param string name: the human readable name for the new organization.
:param string slug: the unique URL slug for this organization. If
this is not provided a slug is automatically
generated based on the name.
:auth: required, user-context-needed
"""
if not request.user.is_authenticated():
return Response({'detail': 'This endpoint requires user info'},
status=401)
if not features.has('organizations:create', actor=request.user):
return Response({
'detail': 'Organizations are not allowed to be created by this user.'
}, status=401)
limit = options.get('api.rate-limit.org-create')
if limit and ratelimiter.is_limited(
u'org-create:{}'.format(request.user.id),
limit=limit, window=3600,
):
return Response({
'detail': 'You are attempting to create too many organizations too quickly.'
}, status=429)
serializer = OrganizationSerializer(data=request.DATA)
if serializer.is_valid():
result = serializer.object
try:
with transaction.atomic():
org = Organization.objects.create(
name=result['name'],
slug=result.get('slug'),
)
except IntegrityError:
return Response(
{'detail': 'An organization with this slug already exists.'},
status=409,
)
om = OrganizationMember.objects.create(
organization=org,
user=request.user,
role=roles.get_top_dog().id,
)
if result.get('defaultTeam'):
team = org.team_set.create(
name=org.name,
)
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=om,
is_active=True
)
self.create_audit_entry(
request=request,
organization=org,
target_object=org.id,
event=AuditLogEntryEvent.ORG_ADD,
data=org.get_audit_log_data(),
)
return Response(serialize(org, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| bsd-3-clause |
JacobJacob/volatility | volatility/plugins/linux/ifconfig.py | 44 | 3460 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.plugins.linux.common as linux_common
import volatility.debug as debug
import volatility.obj as obj
class linux_ifconfig(linux_common.AbstractLinuxCommand):
"""Gathers active interfaces"""
def _get_devs_base(self):
net_device_ptr = obj.Object("Pointer", offset = self.addr_space.profile.get_symbol("dev_base"), vm = self.addr_space)
net_device = net_device_ptr.dereference_as("net_device")
for net_dev in linux_common.walk_internal_list("net_device", "next", net_device):
yield net_dev
def _get_devs_namespace(self):
nslist_addr = self.addr_space.profile.get_symbol("net_namespace_list")
nethead = obj.Object("list_head", offset = nslist_addr, vm = self.addr_space)
# walk each network namespace
# http://www.linuxquestions.org/questions/linux-kernel-70/accessing-ip-address-from-kernel-ver-2-6-31-13-module-815578/
for net in nethead.list_of_type("net", "list"):
# walk each device in the current namespace
for net_dev in net.dev_base_head.list_of_type("net_device", "dev_list"):
yield net_dev
def _gather_net_dev_info(self, net_dev):
mac_addr = net_dev.mac_addr
promisc = str(net_dev.promisc)
in_dev = obj.Object("in_device", offset = net_dev.ip_ptr, vm = self.addr_space)
for dev in in_dev.devices():
ip_addr = dev.ifa_address.cast('IpAddress')
name = dev.ifa_label
yield (name, ip_addr, mac_addr, promisc)
def calculate(self):
linux_common.set_plugin_members(self)
# newer kernels
if self.addr_space.profile.get_symbol("net_namespace_list"):
for net_dev in self._get_devs_namespace():
for ip_addr_info in self._gather_net_dev_info(net_dev):
yield ip_addr_info
elif self.addr_space.profile.get_symbol("dev_base"):
for net_dev in self._get_devs_base():
for ip_addr_info in self._gather_net_dev_info(net_dev):
yield ip_addr_info
else:
debug.error("Unable to determine ifconfig information")
def render_text(self, outfd, data):
self.table_header(outfd, [("Interface", "16"),
("IP Address", "20"),
("MAC Address", "18"),
("Promiscous Mode", "5")])
for (name, ip_addr, mac_addr, promisc) in data:
self.table_row(outfd, name, ip_addr, mac_addr, promisc)
| gpl-2.0 |
kartoza/watchkeeper | django_project/event_mapper/migrations/0015_auto_20150511_0858.py | 5 | 2161 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('event_mapper', '0014_auto_20150508_1133'),
]
operations = [
migrations.CreateModel(
name='Movement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('region', django.contrib.gis.db.models.fields.PolygonField(srid=4326)),
('notes', models.TextField(help_text=b'Notes for the movement.', null=True, verbose_name=b'Notes', blank=True)),
('notified_immediately', models.BooleanField(default=False, help_text=b'If True, there will be immediate notification.', verbose_name=b'Notified Immediately')),
('notification_sent', models.BooleanField(default=False, help_text=b'If True, a notification has been sent for this event.', verbose_name=b'Notification Sent')),
('last_updater', models.ForeignKey(verbose_name=b'Last Updater', to=settings.AUTH_USER_MODEL, help_text=b'The last user who update the movement.')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(help_text=b'The name of the rating.', max_length=100, verbose_name=b'Rating label')),
('level', models.IntegerField(help_text=b'The level of the rating.', verbose_name=b'Level')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='movement',
name='rating',
field=models.ForeignKey(verbose_name=b'Rating', to='event_mapper.Rating', help_text=b'The rating of the movement.'),
preserve_default=True,
),
]
| bsd-2-clause |
chenyyx/scikit-learn-doc-zh | examples/zh/linear_model/plot_sparse_logistic_regression_20newsgroups.py | 56 | 4172 | """
=====================================================
Multiclass sparse logisitic regression on newgroups20
=====================================================
Comparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression
to classify documents from the newgroups20 dataset. Multinomial logistic
regression yields more accurate results and is faster to train on the larger
scale dataset.
Here we use the l1 sparsity that trims the weights of not informative
features to zero. This is good if the goal is to extract the strongly
discriminative vocabulary of each class. If the goal is to get the best
predictive accuracy, it is better to use the non sparsity-inducing l2 penalty
instead.
A more traditional (and possibly better) way to predict on a sparse subset of
input features would be to use univariate feature selection followed by a
traditional (l2-penalised) logistic regression model.
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
print(__doc__)
# Author: Arthur Mensch
t0 = time.clock()
# We use SAGA solver
solver = 'saga'
# Turn down for faster run time
n_samples = 10000
# Memorized fetch_rcv1 for faster access
dataset = fetch_20newsgroups_vectorized('all')
X = dataset.data
y = dataset.target
X = X[:n_samples]
y = y[:n_samples]
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42,
stratify=y,
test_size=0.1)
train_samples, n_features = X_train.shape
n_classes = np.unique(y).shape[0]
print('Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i'
% (train_samples, n_features, n_classes))
models = {'ovr': {'name': 'One versus Rest', 'iters': [1, 3]},
'multinomial': {'name': 'Multinomial', 'iters': [1, 3, 7]}}
for model in models:
# Add initial chance-level values for plotting purpose
accuracies = [1 / n_classes]
times = [0]
densities = [1]
model_params = models[model]
# Small number of epochs for fast runtime
for this_max_iter in model_params['iters']:
print('[model=%s, solver=%s] Number of epochs: %s' %
(model_params['name'], solver, this_max_iter))
lr = LogisticRegression(solver=solver,
multi_class=model,
C=1,
penalty='l1',
fit_intercept=True,
max_iter=this_max_iter,
random_state=42,
)
t1 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t1
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
density = np.mean(lr.coef_ != 0, axis=1) * 100
accuracies.append(accuracy)
densities.append(density)
times.append(train_time)
models[model]['times'] = times
models[model]['densities'] = densities
models[model]['accuracies'] = accuracies
print('Test accuracy for model %s: %.4f' % (model, accuracies[-1]))
print('%% non-zero coefficients for model %s, '
'per class:\n %s' % (model, densities[-1]))
print('Run time (%i epochs) for model %s:'
'%.2f' % (model_params['iters'][-1], model, times[-1]))
fig = plt.figure()
ax = fig.add_subplot(111)
for model in models:
name = models[model]['name']
times = models[model]['times']
accuracies = models[model]['accuracies']
ax.plot(times, accuracies, marker='o',
label='Model: %s' % name)
ax.set_xlabel('Train time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
fig.suptitle('Multinomial vs One-vs-Rest Logistic L1\n'
'Dataset %s' % '20newsgroups')
fig.tight_layout()
fig.subplots_adjust(top=0.85)
run_time = time.clock() - t0
print('Example run in %.3f s' % run_time)
plt.show()
| gpl-3.0 |
swilly22/redis-module-graph | tests/flow/base.py | 2 | 3552 | import os
import warnings
from rmtest import ModuleTestCase
class FlowTestsBase(ModuleTestCase(os.path.dirname(os.path.abspath(__file__)) + '/../../src/redisgraph.so')):
def _skip_header_row(self, resultset):
self.assertGreaterEqual(len(resultset), 1)
return resultset[1:]
def _assert_number_of_results(self, actual_resultset, expected_resultset):
self.assertEqual(len(actual_resultset), len(expected_resultset))
# Make sure resultset is as expected, but we don't care for the order
# of the columns.
def _assert_results_ignore_col_order(self, actual_result, query_info):
actual_result_set = self._skip_header_row(actual_result.result_set)
# assert num results
self._assert_number_of_results(actual_result_set, query_info.expected_result)
# As we don't care for the order of the columns
# replace row array representation with set.
for i in range(len(query_info.expected_result)):
query_info.expected_result[i] = set(query_info.expected_result[i])
actual_result_set[i] = set(actual_result_set[i])
# assert actual values vs expected values
for res in query_info.expected_result:
self.assertTrue(res in actual_result_set,
'The item %s is NOT in the actual result\n'
'The actual result: %s\nThe expected result: %s' %
(str(res), str(actual_result_set), str(query_info.expected_result)))
def _assert_only_expected_resuls_are_in_actual_results(self,
actual_result,
query_info):
actual_result_set = self._skip_header_row(actual_result.result_set)
# assert num results
self._assert_number_of_results(actual_result_set, query_info.expected_result)
# assert actual values vs expected values
for res in query_info.expected_result:
self.assertTrue(res in actual_result_set,
'The item %s is NOT in the actual result\n'
'The actual result: %s\nThe expected result: %s' %
(str(res), str(actual_result_set), str(query_info.expected_result)))
def _assert_actual_results_contained_in_expected_results(self,
actual_result,
query_info,
num_contained_results):
actual_result_set = self._skip_header_row(actual_result.result_set)
# assert num results
self.assertEqual(len(actual_result_set), num_contained_results)
# assert actual values vs expected values
expected_result = query_info.expected_result
count = len([res for res in expected_result if res in actual_result_set])
# assert number of different results is as expected
self.assertEqual(count,
num_contained_results,
'The actual result is: %s\nThe expected result is: %s' %
(str(actual_result_set), str(query_info.expected_result)))
def _assert_run_time(self, actual_result, query_info):
if actual_result.run_time_ms > query_info.max_run_time_ms:
warnings.warn('Query \"%s\" execution took too long' % query_info.description,
Warning)
| agpl-3.0 |
timohtey/mediadrop_copy | mediacore_env/Lib/site-packages/setuptools/tests/test_packageindex.py | 377 | 7625 | """Package Index Tests
"""
import sys
import os
import unittest
import pkg_resources
from setuptools.compat import urllib2, httplib, HTTPError, unicode, pathname2url
import distutils.errors
import setuptools.package_index
from setuptools.tests.server import IndexServer
class TestPackageIndex(unittest.TestCase):
def test_bad_url_bad_port(self):
index = setuptools.package_index.PackageIndex()
url = 'http://127.0.0.1:0/nonesuch/test_package_index'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_typo(self):
# issue 16
# easy_install inquant.contentmirror.plone breaks because of a typo
# in its home URL
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'url:%20https://svn.plone.org/svn/collective/inquant.contentmirror.plone/trunk'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue(url in str(v))
else:
self.assertTrue(isinstance(v, HTTPError))
def test_bad_url_bad_status_line(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
def _urlopen(*args):
raise httplib.BadStatusLine('line')
index.opener = _urlopen
url = 'http://example.com'
try:
v = index.open_url(url)
except Exception:
v = sys.exc_info()[1]
self.assertTrue('line' in str(v))
else:
raise AssertionError('Should have raise here!')
def test_bad_url_double_scheme(self):
"""
A bad URL with a double scheme should raise a DistutilsError.
"""
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue 20
url = 'http://http://svn.pythonpaste.org/Paste/wphp/trunk'
try:
index.open_url(url)
except distutils.errors.DistutilsError:
error = sys.exc_info()[1]
msg = unicode(error)
assert 'nonnumeric port' in msg or 'getaddrinfo failed' in msg or 'Name or service not known' in msg
return
raise RuntimeError("Did not raise")
def test_bad_url_screwy_href(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
# issue #160
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
# this should not fail
url = 'http://example.com'
page = ('<a href="http://www.famfamfam.com]('
'http://www.famfamfam.com/">')
index.process_index(url, page)
def test_url_ok(self):
index = setuptools.package_index.PackageIndex(
hosts=('www.example.com',)
)
url = 'file:///tmp/test_package_index'
self.assertTrue(index.url_ok(url, True))
def test_links_priority(self):
"""
Download links from the pypi simple index should be used before
external download links.
https://bitbucket.org/tarek/distribute/issue/163
Usecase :
- someone uploads a package on pypi, a md5 is generated
- someone manually copies this link (with the md5 in the url) onto an
external page accessible from the package page.
- someone reuploads the package (with a different md5)
- while easy_installing, an MD5 error occurs because the external link
is used
-> Setuptools should use the link from pypi, not the external one.
"""
if sys.platform.startswith('java'):
# Skip this test on jython because binding to :0 fails
return
# start an index server
server = IndexServer()
server.start()
index_url = server.base_url() + 'test_links_priority/simple/'
# scan a test index
pi = setuptools.package_index.PackageIndex(index_url)
requirement = pkg_resources.Requirement.parse('foobar')
pi.find_packages(requirement)
server.stop()
# the distribution has been found
self.assertTrue('foobar' in pi)
# we have only one link, because links are compared without md5
self.assertTrue(len(pi['foobar'])==1)
# the link should be from the index
self.assertTrue('correct_md5' in pi['foobar'][0].location)
def test_parse_bdist_wininst(self):
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32-py2.4.exe'), ('reportlab-2.5', '2.4', 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win32.exe'), ('reportlab-2.5', None, 'win32'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64-py2.7.exe'), ('reportlab-2.5', '2.7', 'win-amd64'))
self.assertEqual(setuptools.package_index.parse_bdist_wininst(
'reportlab-2.5.win-amd64.exe'), ('reportlab-2.5', None, 'win-amd64'))
def test__vcs_split_rev_from_url(self):
"""
Test the basic usage of _vcs_split_rev_from_url
"""
vsrfu = setuptools.package_index.PackageIndex._vcs_split_rev_from_url
url, rev = vsrfu('https://example.com/bar@2995')
self.assertEqual(url, 'https://example.com/bar')
self.assertEqual(rev, '2995')
def test_local_index(self):
"""
local_open should be able to read an index from the file system.
"""
f = open('index.html', 'w')
f.write('<div>content</div>')
f.close()
try:
url = 'file:' + pathname2url(os.getcwd()) + '/'
res = setuptools.package_index.local_open(url)
finally:
os.remove('index.html')
assert 'content' in res.read()
class TestContentCheckers(unittest.TestCase):
def test_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
checker.feed('You should probably not be using MD5'.encode('ascii'))
self.assertEqual(checker.hash.hexdigest(),
'f12895fdffbd45007040d2e44df98478')
self.assertTrue(checker.is_valid())
def test_other_fragment(self):
"Content checks should succeed silently if no hash is present"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#something%20completely%20different')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_blank_md5(self):
"Content checks should succeed if a hash is empty"
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=')
checker.feed('anything'.encode('ascii'))
self.assertTrue(checker.is_valid())
def test_get_hash_name_md5(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
self.assertEqual(checker.hash_name, 'md5')
def test_report(self):
checker = setuptools.package_index.HashChecker.from_url(
'http://foo/bar#md5=f12895fdffbd45007040d2e44df98478')
rep = checker.report(lambda x: x, 'My message about %s')
self.assertEqual(rep, 'My message about md5')
| gpl-3.0 |
akash1808/nova_test_latest | nova/tests/unit/objects/test_instance_info_cache.py | 40 | 5861 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_utils import timeutils
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.network import model as network_model
from nova.objects import instance_info_cache
from nova.tests.unit.objects import test_objects
fake_info_cache = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'instance_uuid': 'fake-uuid',
'network_info': '[]',
}
class _TestInstanceInfoCacheObject(object):
def test_get_by_instance_uuid(self):
nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
self.mox.StubOutWithMock(db, 'instance_info_cache_get')
db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(
dict(fake_info_cache, network_info=nwinfo.json()))
self.mox.ReplayAll()
obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(obj.instance_uuid, 'fake-uuid')
self.assertEqual(obj.network_info, nwinfo)
def test_get_by_instance_uuid_no_entries(self):
self.mox.StubOutWithMock(db, 'instance_info_cache_get')
db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(
exception.InstanceInfoCacheNotFound,
instance_info_cache.InstanceInfoCache.get_by_instance_uuid,
self.context, 'fake-uuid')
def test_new(self):
obj = instance_info_cache.InstanceInfoCache.new(self.context,
'fake-uuid')
self.assertEqual(set(['instance_uuid', 'network_info']),
obj.obj_what_changed())
self.assertEqual('fake-uuid', obj.instance_uuid)
self.assertIsNone(obj.network_info)
def _save_helper(self, cell_type, update_cells):
obj = instance_info_cache.InstanceInfoCache()
cells_api = cells_rpcapi.CellsAPI()
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
self.mox.StubOutWithMock(cells_opts, 'get_cell_type')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(cells_api,
'instance_info_cache_update_at_top')
nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
new_info_cache = fake_info_cache.copy()
new_info_cache['network_info'] = nwinfo.json()
db.instance_info_cache_update(
self.context, 'fake-uuid',
{'network_info': nwinfo.json()}).AndReturn(new_info_cache)
if update_cells:
cells_opts.get_cell_type().AndReturn(cell_type)
if cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api)
cells_api.instance_info_cache_update_at_top(
self.context, 'foo')
self.mox.ReplayAll()
obj._context = self.context
obj.instance_uuid = 'fake-uuid'
obj.network_info = nwinfo
obj.save(update_cells=update_cells)
def test_save_with_update_cells_and_compute_cell(self):
self._save_helper('compute', True)
def test_save_with_update_cells_and_non_compute_cell(self):
self._save_helper(None, True)
def test_save_without_update_cells(self):
self._save_helper(None, False)
@mock.patch.object(db, 'instance_info_cache_update')
def test_save_updates_self(self, mock_update):
fake_updated_at = datetime.datetime(2015, 1, 1)
nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
nwinfo_json = nwinfo.json()
new_info_cache = fake_info_cache.copy()
new_info_cache['id'] = 1
new_info_cache['updated_at'] = fake_updated_at
new_info_cache['network_info'] = nwinfo_json
mock_update.return_value = new_info_cache
obj = instance_info_cache.InstanceInfoCache(context=self.context)
obj.instance_uuid = 'fake-uuid'
obj.network_info = nwinfo_json
obj.save()
mock_update.assert_called_once_with(self.context, 'fake-uuid',
{'network_info': nwinfo_json})
self.assertEqual(timeutils.normalize_time(fake_updated_at),
timeutils.normalize_time(obj.updated_at))
def test_refresh(self):
obj = instance_info_cache.InstanceInfoCache.new(self.context,
'fake-uuid1')
self.mox.StubOutWithMock(db, 'instance_info_cache_get')
db.instance_info_cache_get(self.context, 'fake-uuid1').AndReturn(
fake_info_cache)
self.mox.ReplayAll()
obj.refresh()
self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid)
class TestInstanceInfoCacheObject(test_objects._LocalTest,
_TestInstanceInfoCacheObject):
pass
class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest,
_TestInstanceInfoCacheObject):
pass
| apache-2.0 |
javierag/samba | source4/heimdal/lib/wind/rfc3454.py | 88 | 2296 | #!/usr/local/bin/python
# -*- coding: iso-8859-1 -*-
# $Id$
# Copyright (c) 2004 Kungliga Tekniska Högskolan
# (Royal Institute of Technology, Stockholm, Sweden).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import re
import string
def read(filename):
"""return a dict of tables from rfc3454"""
f = open(filename, 'r')
inTable = False
ret = {}
while True:
l = f.readline()
if not l:
break
if inTable:
m = re.search('^ *----- End Table ([A-Z0-9\.]+) ----- *$', l)
if m:
ret[m.group(1)] = t
inTable = False
else:
t.append(l)
if re.search('^ *----- Start Table ([A-Z0-9\.]+) ----- *$', l):
inTable = True
t = []
f.close()
return ret
| gpl-3.0 |
christoph-buente/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/config/queues.py | 116 | 1805 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Eventually the list of queues may be stored in the data store.
all_queue_names = [
"commit-queue",
"style-queue",
"qt-ews",
"qt-wk2-ews",
"gtk-ews",
"gtk-wk2-ews",
"mac-ews",
"mac-wk2-ews",
"win-ews",
"efl-ews",
"efl-wk2-ews",
]
| bsd-3-clause |
jlnaudin/x-drone | MissionPlanner-master/Lib/lib2to3/fixes/fix_raise.py | 327 | 2934 | """Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = u" "
if "val" not in results:
# One-argument raise
new = pytree.Node(syms.raise_stmt, [Name(u"raise"), exc])
new.prefix = node.prefix
return new
val = results["val"].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = u""
args = [val]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = u""
e = exc
# If there's a traceback and None is passed as the value, then don't
# add a call, since the user probably just wants to add a
# traceback. See issue #9661.
if val.type != token.NAME or val.value != u"None":
e = Call(exc, args)
with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name(u"raise")] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt,
[Name(u"raise"), Call(exc, args)],
prefix=node.prefix)
| gpl-3.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/tutorials/plot_introduction.py | 9 | 13464 | """
.. _intro_tutorial:
Basic MEG and EEG data processing
=================================
MNE-Python reimplements most of MNE-C's (the original MNE command line utils)
functionality and offers transparent scripting.
On top of that it extends MNE-C's functionality considerably (customize events,
compute
contrasts, group statistics, time-frequency analysis, EEG-sensor space analyses
, etc.) It uses the same files as standard MNE unix commands:
no need to convert your files to a new system or database.
What you can do with MNE Python
-------------------------------
- **Raw data visualization** to visualize recordings, can also use
*mne_browse_raw* for extended functionality (see :ref:`ch_browse`)
- **Epoching**: Define epochs, baseline correction, handle conditions etc.
- **Averaging** to get Evoked data
- **Compute SSP pojectors** to remove ECG and EOG artifacts
- **Compute ICA** to remove artifacts or select latent sources.
- **Boundary Element Modeling**: single and three-layer BEM model
creation and solution computation.
- **Forward modeling**: BEM computation and mesh creation
(see :ref:`ch_forward`)
- **Linear inverse solvers** (dSPM, sLORETA, MNE, LCMV, DICS)
- **Sparse inverse solvers** (L1/L2 mixed norm MxNE, Gamma Map,
Time-Frequency MxNE)
- **Connectivity estimation** in sensor and source space
- **Visualization of sensor and source space data**
- **Time-frequency** analysis with Morlet wavelets (induced power,
intertrial coherence, phase lock value) also in the source space
- **Spectrum estimation** using multi-taper method
- **Mixed Source Models** combining cortical and subcortical structures
- **Dipole Fitting**
- **Decoding** multivariate pattern analyis of M/EEG topographies
- **Compute contrasts** between conditions, between sensors, across
subjects etc.
- **Non-parametric statistics** in time, space and frequency
(including cluster-level)
- **Scripting** (batch and parallel computing)
What you're not supposed to do with MNE Python
----------------------------------------------
- **Brain and head surface segmentation** for use with BEM models -- use Freesurfer.
.. note:: Package based on the FIF file format from Neuromag. It can read and
convert CTF, BTI/4D, KIT and various EEG formats to FIF.
Installation of the required materials
---------------------------------------
See :ref:`getting_started` with Python.
.. note:: The expected location for the MNE-sample data is
my-path-to/mne-python/examples. If you downloaded data and an example asks
you whether to download it again, make sure
the data reside in the examples directory and you run the script from its
current directory.
From IPython e.g. say::
cd examples/preprocessing
%run plot_find_ecg_artifacts.py
From raw data to evoked data
----------------------------
.. _ipython: http://ipython.scipy.org/
Now, launch `ipython`_ (Advanced Python shell) using the QT backend which best
supported across systems::
$ ipython --pylab -qt
First, load the mne package:
"""
import mne
##############################################################################
# If you'd like to turn information status messages off:
mne.set_log_level('WARNING')
##############################################################################
# But it's generally a good idea to leave them on:
mne.set_log_level('INFO')
##############################################################################
# You can set the default level by setting the environment variable
# "MNE_LOGGING_LEVEL", or by having mne-python write preferences to a file:
mne.set_config('MNE_LOGGING_LEVEL','WARNING')
##############################################################################
# Note that the location of the mne-python preferences file (for easier manual
# editing) can be found using:
mne.get_config_path()
##############################################################################
# By default logging messages print to the console, but look at
# mne.set_log_file() to save output to a file.
#
# Access raw data
# ^^^^^^^^^^^^^^^
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
print(raw_fname)
##############################################################################
# .. note:: The MNE sample dataset should be downloaded automatically but be
# patient (approx. 2GB)
#
# Read data from file:
raw = mne.io.Raw(raw_fname)
print(raw)
print(raw.info)
##############################################################################
# Look at the channels in raw:
print(raw.ch_names)
##############################################################################
# Read and plot a segment of raw data
start, stop = raw.time_as_index([100, 115]) # 100 s to 115 s data segment
data, times = raw[:, start:stop]
print(data.shape)
print(times.shape)
data, times = raw[2:20:3, start:stop] # access underlying data
raw.plot()
##############################################################################
# Save a segment of 150s of raw data (MEG only):
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True,
exclude='bads')
raw.save('sample_audvis_meg_raw.fif', tmin=0, tmax=150, picks=picks,
overwrite=True)
##############################################################################
# Define and read epochs
# ^^^^^^^^^^^^^^^^^^^^^^
#
# First extract events:
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5])
##############################################################################
# Note that, by default, we use stim_channel='STI 014'. If you have a different
# system (e.g., a newer system that uses channel 'STI101' by default), you can
# use the following to set the default stim channel to use for finding events:
mne.set_config('MNE_STIM_CHANNEL', 'STI101')
##############################################################################
# Events are stored as 2D numpy array where the first column is the time
# instant and the last one is the event number. It is therefore easy to
# manipulate.
#
# Define epochs parameters:
event_id = dict(aud_l=1, aud_r=2) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
##############################################################################
# Exclude some channels (original bads + 2 more):
raw.info['bads'] += ['MEG 2443', 'EEG 053']
##############################################################################
# The variable raw.info['bads'] is just a python list.
#
# Pick the good channels, excluding raw.info['bads']:
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True, stim=False,
exclude='bads')
##############################################################################
# Alternatively one can restrict to magnetometers or gradiometers with:
mag_picks = mne.pick_types(raw.info, meg='mag', eog=True, exclude='bads')
grad_picks = mne.pick_types(raw.info, meg='grad', eog=True, exclude='bads')
##############################################################################
# Define the baseline period:
baseline = (None, 0) # means from the first instant to t = 0
##############################################################################
# Define peak-to-peak rejection parameters for gradiometers, magnetometers and EOG:
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
##############################################################################
# Read epochs:
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, preload=False, reject=reject)
print(epochs)
##############################################################################
# Get single epochs for one condition:
epochs_data = epochs['aud_l'].get_data()
print(epochs_data.shape)
##############################################################################
# epochs_data is a 3D array of dimension (55 epochs, 365 channels, 106 time
# instants).
#
# Scipy supports read and write of matlab files. You can save your single
# trials with:
from scipy import io
io.savemat('epochs_data.mat', dict(epochs_data=epochs_data), oned_as='row')
##############################################################################
# or if you want to keep all the information about the data you can save your
# epochs in a fif file:
epochs.save('sample-epo.fif')
##############################################################################
# and read them later with:
saved_epochs = mne.read_epochs('sample-epo.fif')
##############################################################################
# Compute evoked responses for auditory responses by averaging and plot it:
evoked = epochs['aud_l'].average()
print(evoked)
evoked.plot()
##############################################################################
# .. topic:: Exercise
#
# 1. Extract the max value of each epoch
max_in_each_epoch = [e.max() for e in epochs['aud_l']] # doctest:+ELLIPSIS
print(max_in_each_epoch[:4]) # doctest:+ELLIPSIS
##############################################################################
# It is also possible to read evoked data stored in a fif file:
evoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked1 = mne.read_evokeds(
evoked_fname, condition='Left Auditory', baseline=(None, 0), proj=True)
##############################################################################
# Or another one stored in the same file:
evoked2 = mne.read_evokeds(
evoked_fname, condition='Right Auditory', baseline=(None, 0), proj=True)
##############################################################################
# Compute a contrast:
contrast = evoked1 - evoked2
print(contrast)
##############################################################################
# Time-Frequency: Induced power and inter trial coherence
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Define parameters:
import numpy as np
n_cycles = 2 # number of cycles in Morlet wavelet
freqs = np.arange(7, 30, 3) # frequencies of interest
##############################################################################
# Compute induced power and phase-locking values and plot gradiometers:
from mne.time_frequency import tfr_morlet
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
return_itc=True, decim=3, n_jobs=1)
# power.plot()
##############################################################################
# Inverse modeling: MNE and dSPM on evoked and raw data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Import the required functions:
from mne.minimum_norm import apply_inverse, read_inverse_operator
##############################################################################
# Read the inverse operator:
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inverse_operator = read_inverse_operator(fname_inv)
##############################################################################
# Define the inverse parameters:
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM"
##############################################################################
# Compute the inverse solution:
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
##############################################################################
# Save the source time courses to disk:
stc.save('mne_dSPM_inverse')
##############################################################################
# Now, let's compute dSPM on a raw file within a label:
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
label = mne.read_label(fname_label)
##############################################################################
# Compute inverse solution during the first 15s:
from mne.minimum_norm import apply_inverse_raw
start, stop = raw.time_as_index([0, 15]) # read the first 15s of data
stc = apply_inverse_raw(raw, inverse_operator, lambda2, method, label,
start, stop)
##############################################################################
# Save result in stc files:
stc.save('mne_dSPM_raw_inverse_Aud')
##############################################################################
# What else can you do?
# ^^^^^^^^^^^^^^^^^^^^^
#
# - detect heart beat QRS component
# - detect eye blinks and EOG artifacts
# - compute SSP projections to remove ECG or EOG artifacts
# - compute Independent Component Analysis (ICA) to remove artifacts or
# select latent sources
# - estimate noise covariance matrix from Raw and Epochs
# - visualize cross-trial response dynamics using epochs images
# - compute forward solutions
# - estimate power in the source space
# - estimate connectivity in sensor and source space
# - morph stc from one brain to another for group studies
# - compute mass univariate statistics base on custom contrasts
# - visualize source estimates
# - export raw, epochs, and evoked data to other python data analysis
# libraries e.g. pandas
# - and many more things ...
#
# Want to know more ?
# ^^^^^^^^^^^^^^^^^^^
#
# Browse :ref:`examples-index` gallery.
print("Done!")
| bsd-3-clause |
anotherpyr/pushmac | pushcast/pcfilter.py | 1 | 1066 | '''
Created on Aug 24, 2015
@author: anotherpyr
'''
class SimpleDescription():
def filter(self, lines):
output = u""
append = False
for k in range(0, len(lines)):
line = lines[k]
# Remove excess URLs from descriptions
if line.find("://") < 0:
if append:
output += u" "
else:
append = True
output += line
# if this removed all of the lines
if len(output) < 1:
#Then remove just URLs (could be humorous
for k in range(0, len(lines)):
if append:
output += u" "
else:
append = True
output += lines[k]
output = output.replace("<p>", "")
output = output.replace("</p>", "")
output = output.replace("<span>", "")
output = output.replace("</span>", "")
return output | apache-2.0 |
dl1ksv/gnuradio | gr-digital/python/digital/qa_ofdm_txrx.py | 5 | 6704 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
import numpy
import pmt
from gnuradio import gr, gr_unittest
from gnuradio import digital
from gnuradio import blocks
from gnuradio import channels
from gnuradio.digital.ofdm_txrx import ofdm_tx, ofdm_rx
from gnuradio.digital.utils import tagged_streams
# Set this to true if you need to write out data
LOG_DEBUG_INFO = False
class ofdm_tx_fg (gr.top_block):
def __init__(
self,
data,
len_tag_key,
scramble_bits=False,
additional_tags=[]):
gr.top_block.__init__(self, "ofdm_tx")
tx_data, tags = tagged_streams.packets_to_vectors((data,), len_tag_key)
src = blocks.vector_source_b(data, False, 1, tags + additional_tags)
self.tx = ofdm_tx(
packet_length_tag_key=len_tag_key,
debug_log=LOG_DEBUG_INFO,
scramble_bits=scramble_bits)
self.sink = blocks.vector_sink_c()
self.connect(src, self.tx, self.sink)
def get_tx_samples(self):
return self.sink.data()
class ofdm_rx_fg (gr.top_block):
def __init__(
self,
samples,
len_tag_key,
channel=None,
prepend_zeros=100,
scramble_bits=False):
gr.top_block.__init__(self, "ofdm_rx")
if prepend_zeros:
samples = (0,) * prepend_zeros + tuple(samples)
src = blocks.vector_source_c(list(samples) + [0, ] * 1000)
self.rx = ofdm_rx(
frame_length_tag_key=len_tag_key,
debug_log=LOG_DEBUG_INFO,
scramble_bits=scramble_bits)
if channel is not None:
self.connect(src, channel, self.rx)
else:
self.connect(src, self.rx)
self.sink = blocks.vector_sink_b()
self.connect(self.rx, self.sink)
def get_rx_bytes(self):
return self.sink.data()
class test_ofdm_txrx (gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_tx(self):
""" Just make sure the Tx works in general """
# This tag gets put onto the first item of the transmit data,
# it should be transmitted first, too
timing_tag = gr.tag_t()
timing_tag.offset = 0
timing_tag.key = pmt.string_to_symbol('tx_timing')
timing_tag.value = pmt.to_pmt('now')
len_tag_key = 'frame_len'
n_bytes = 52
n_samples_expected = (numpy.ceil(1.0 * (n_bytes + 4) / 6) + 3) * 80
test_data = [random.randint(0, 255) for x in range(n_bytes)]
tx_fg = ofdm_tx_fg(
test_data,
len_tag_key,
additional_tags=[
timing_tag,
])
tx_fg.run()
self.assertEqual(len(tx_fg.get_tx_samples()), n_samples_expected)
tags_rx = [gr.tag_to_python(x) for x in tx_fg.sink.tags()]
tags_rx = sorted([(x.offset, x.key, x.value) for x in tags_rx])
tags_expected = [
(0, 'frame_len', n_samples_expected),
(0, 'tx_timing', 'now'),
]
self.assertEqual(tags_rx, tags_expected)
def test_002_rx_only_noise(self):
""" Run the RX with only noise, check it doesn't crash
or return a burst. """
len_tag_key = 'frame_len'
samples = (0,) * 1000
channel = channels.channel_model(0.1)
rx_fg = ofdm_rx_fg(samples, len_tag_key, channel)
rx_fg.run()
self.assertEqual(len(rx_fg.get_rx_bytes()), 0)
def test_003_tx1packet(self):
""" Transmit one packet, with slight AWGN and slight frequency + timing offset.
Check packet is received and no bit errors have occurred. """
len_tag_key = 'frame_len'
n_bytes = 21
fft_len = 64
test_data = list([random.randint(0, 255) for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier, a fine freq offset stays below that
freq_offset = 1.0 / fft_len * 0.7
#channel = channels.channel_model(0.01, freq_offset)
channel = None
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(list(tx_fg.tx.sync_word1), list(rx_fg.rx.sync_word1))
self.assertEqual(list(tx_fg.tx.sync_word2), list(rx_fg.rx.sync_word2))
self.assertEqual(test_data, rx_data)
def test_003_tx1packet_scramble(self):
""" Same as before, use scrambler. """
len_tag_key = 'frame_len'
n_bytes = 21
fft_len = 64
test_data = list([random.randint(0, 255) for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier, a fine freq offset stays below that
freq_offset = 1.0 / fft_len * 0.7
#channel = channels.channel_model(0.01, freq_offset)
channel = None
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key, scramble_bits=True)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(
tx_samples,
len_tag_key,
channel,
prepend_zeros=100,
scramble_bits=True)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(list(tx_fg.tx.sync_word1), list(rx_fg.rx.sync_word1))
self.assertEqual(list(tx_fg.tx.sync_word2), list(rx_fg.rx.sync_word2))
self.assertEqual(test_data, rx_data)
def test_004_tx1packet_large_fO(self):
""" Transmit one packet, with slight AWGN and large frequency offset.
Check packet is received and no bit errors have occurred. """
fft_len = 64
len_tag_key = 'frame_len'
n_bytes = 21
test_data = list([random.randint(0, 255) for x in range(n_bytes)])
#test_data = tuple([255 for x in range(n_bytes)])
# 1.0/fft_len is one sub-carrier
frequency_offset = 1.0 / fft_len * 2.5
channel = channels.channel_model(0.00001, frequency_offset)
# Tx
tx_fg = ofdm_tx_fg(test_data, len_tag_key)
tx_fg.run()
tx_samples = tx_fg.get_tx_samples()
# Rx
rx_fg = ofdm_rx_fg(tx_samples, len_tag_key, channel, prepend_zeros=100)
rx_fg.run()
rx_data = rx_fg.get_rx_bytes()
self.assertEqual(test_data, rx_data)
if __name__ == '__main__':
gr_unittest.run(test_ofdm_txrx)
| gpl-3.0 |
kll334477/NewsScrapy | thepaper/thepaper/spiders/cnta_spider.py | 2 | 3849 | # -*- coding: utf-8 -*-
__author__ = 'k'
import re
import scrapy
from bs4 import BeautifulSoup
import logging
from thepaper.items import NewsItem
import json
logger = logging.getLogger("NbdSpider")
from thepaper.settings import *
from thepaper.util import judge_news_crawl
class CntaSpider(scrapy.spiders.Spider):
domain = "http://www.cnta.gov.cn"
name = "cnta"
allowed_domains = ["cnta.gov.cn",]
flag = {}
start_urls = [
"http://www.cnta.gov.cn/xxfb/mrgx/",
"http://www.cnta.gov.cn/xxfb/jjgat/index.shtml",
"http://www.cnta.gov.cn/xxfb/xwlb/index.shtml",
"http://www.cnta.gov.cn/zwgk/tzggnew/gztz/index.shtml",
]
def parse(self,response):
origin_url = response.url
if "index" not in origin_url:
soup = BeautifulSoup(response.body,"lxml")
catalogue = soup.find("a",class_ = "blue CurrChnlCls").get("title").strip()
news_list = soup.find("div", class_ = "lie_main_m").find_all("li")
for news in news_list:
title = news.find("a").text.strip()
news_url = "http://www.cnta.gov.cn/xxfb" + news.find("a").get("href")[2:]
news_no = news_url.rsplit("/",1)[-1].split(".")[0]
item = NewsItem(
news_url =news_url,
title = title,
news_no = news_no,
catalogue = catalogue,
)
yield scrapy.Request(item["news_url"],callback=self.parse_news,meta={'item':item})
else:
topic_url = origin_url.rsplit(".",1)[0]
self.flag.setdefault(topic_url,0)
yield scrapy.Request(origin_url,callback=self.parse_topic)
def parse_topic(self,response):
origin_url = response.url
if "_" not in origin_url:
pageindex = 0
topic_url = origin_url.rsplit(".",1)[0]
else:
temp = origin_url.rsplit("_",1)
pageindex = temp[-1].split(".",1)[0]
topic_url = temp[0]
soup = BeautifulSoup(response.body,"lxml")
catalogue = soup.find("a",class_ = "blue CurrChnlCls").get("title").strip()
news_list = soup.find("div", class_ = "lie_main_m").find_all("li")
for news in news_list:
news_date = news.find("span").text.strip() + " 00:00:00"
title = news.find("a").text.strip()[10:]
news_url = topic_url.rsplit("/",1)[0] + news.find("a").get("href")[1:]
news_no = news_url.rsplit("/",1)[-1].split(".")[0]
item = NewsItem(
news_date = news_date,
news_url =news_url,
title = title,
news_no = news_no,
catalogue = catalogue,
)
item = judge_news_crawl(item)
if item:
yield scrapy.Request(item["news_url"],callback=self.parse_news,meta={'item':item})
else:
self.flag[topic_url] = pageindex
if not self.flag[topic_url]:
next_url = topic_url + "_" + str(int(pageindex) + 1) + ".shtml"
yield scrapy.Request(next_url,callback=self.parse_topic)
def parse_news(self,response):
item = response.meta.get("item",NewsItem())
soup = BeautifulSoup(response.body,"lxml")
temp = soup.find("div",class_ = "main_t").find_all("span")
news_date = temp[0].text
referer_web = temp[1].text.split(u":")[1]
temp = soup.find("div",class_ = "TRS_Editor")
content = "\n\n".join([ t.text.strip() for t in temp.find_all("p")])
item["news_date"] = news_date
item["referer_web"] = referer_web
item["content"] = content
item['crawl_date'] = NOW
yield item
| lgpl-3.0 |
DJMelonz/basic-blog | django/utils/dateformat.py | 234 | 8956 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print df.format('jS F Y H:i')
7th October 2003 11:39
>>>
"""
import re
import time
import calendar
from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
from django.utils.tzinfo import LocalTimezone
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDEfFgGhHiIjlLmMnNOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_unicode(formatstr))):
if i % 2:
pieces.append(force_unicode(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return u''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, t):
self.data = t
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return u'%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return u'%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return u'%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return u'%02d' % self.data.minute
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return u'%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return u'%02d' % self.data.second
def u(self):
"Microseconds"
return self.data.microsecond
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def __init__(self, dt):
# Accepts either a datetime or date object.
self.data = dt
self.timezone = getattr(dt, 'tzinfo', None)
if hasattr(self.data, 'hour') and not self.timezone:
self.timezone = LocalTimezone(dt)
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return u'%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return u'1'
else:
return u'0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return u'%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def O(self):
"Difference to Greenwich time in hours; e.g. '+0200'"
seconds = self.Z()
return u"%+03d%02d" % (seconds // 3600, (seconds // 60) % 60)
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return u'th'
last = self.data.day % 10
if last == 1:
return u'st'
if last == 2:
return u'nd'
if last == 3:
return u'rd'
return u'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return u'%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def T(self):
"Time zone of this machine; e.g. 'EST' or 'MDT'"
name = self.timezone and self.timezone.tzname(self.data) or None
if name is None:
name = self.format('O')
return unicode(name)
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if getattr(self.data, 'tzinfo', None):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return unicode(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
"""
if not self.timezone:
return 0
offset = self.timezone.utcoffset(self.data)
# Only days can be negative, so negative offsets have days=-1 and
# seconds positive. Positive offsets have days=0
return offset.days * 86400 + offset.seconds
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| bsd-3-clause |
hhm0/supysonic | supysonic/watcher.py | 2 | 7119 | # coding: utf-8
# This file is part of Supysonic.
#
# Supysonic is a Python implementation of the Subsonic server API.
# Copyright (C) 2014 Alban 'spl0k' Féron
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import logging
from signal import signal, SIGTERM
from threading import Thread, Condition, Timer
from logging.handlers import TimedRotatingFileHandler
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from supysonic import config, db
from supysonic.scanner import Scanner
OP_SCAN = 1
OP_REMOVE = 2
OP_MOVE = 4
class SupysonicWatcherEventHandler(PatternMatchingEventHandler):
def __init__(self, queue, logger):
extensions = config.get('base', 'scanner_extensions')
patterns = map(lambda e: "*." + e.lower(), extensions.split()) if extensions else None
super(SupysonicWatcherEventHandler, self).__init__(patterns = patterns, ignore_directories = True)
self.__queue = queue
self.__logger = logger
def dispatch(self, event):
try:
super(SupysonicWatcherEventHandler, self).dispatch(event)
except Exception, e:
self.__logger.critical(e)
def on_created(self, event):
self.__logger.debug("File created: '%s'", event.src_path)
self.__queue.put(event.src_path, OP_SCAN)
def on_deleted(self, event):
self.__logger.debug("File deleted: '%s'", event.src_path)
self.__queue.put(event.src_path, OP_REMOVE)
def on_modified(self, event):
self.__logger.debug("File modified: '%s'", event.src_path)
self.__queue.put(event.src_path, OP_SCAN)
def on_moved(self, event):
self.__logger.debug("File moved: '%s' -> '%s'", event.src_path, event.dest_path)
self.__queue.put(event.dest_path, OP_MOVE, src_path = event.src_path)
class Event(object):
def __init__(self, path, operation, **kwargs):
if operation & (OP_SCAN | OP_REMOVE) == (OP_SCAN | OP_REMOVE):
raise Exception("Flags SCAN and REMOVE both set")
self.__path = path
self.__time = time.time()
self.__op = operation
self.__src = kwargs.get("src_path")
def set(self, operation, **kwargs):
if operation & (OP_SCAN | OP_REMOVE) == (OP_SCAN | OP_REMOVE):
raise Exception("Flags SCAN and REMOVE both set")
self.__time = time.time()
if operation & OP_SCAN:
self.__op &= ~OP_REMOVE
if operation & OP_REMOVE:
self.__op &= ~OP_SCAN
self.__op |= operation
src_path = kwargs.get("src_path")
if src_path:
self.__src = src_path
@property
def path(self):
return self.__path
@property
def time(self):
return self.__time
@property
def operation(self):
return self.__op
@property
def src_path(self):
return self.__src
class ScannerProcessingQueue(Thread):
def __init__(self, logger):
super(ScannerProcessingQueue, self).__init__()
self.__logger = logger
self.__cond = Condition()
self.__timer = None
self.__queue = {}
self.__running = True
def run(self):
try:
self.__run()
except Exception, e:
self.__logger.critical(e)
def __run(self):
while self.__running:
time.sleep(0.1)
with self.__cond:
self.__cond.wait()
if not self.__queue:
continue
self.__logger.debug("Instantiating scanner")
store = db.get_store(config.get('base', 'database_uri'))
scanner = Scanner(store)
item = self.__next_item()
while item:
if item.operation & OP_MOVE:
self.__logger.info("Moving: '%s' -> '%s'", item.src_path, item.path)
scanner.move_file(item.src_path, item.path)
if item.operation & OP_SCAN:
self.__logger.info("Scanning: '%s'", item.path)
scanner.scan_file(item.path)
if item.operation & OP_REMOVE:
self.__logger.info("Removing: '%s'", item.path)
scanner.remove_file(item.path)
item = self.__next_item()
scanner.finish()
store.commit()
store.close()
self.__logger.debug("Freeing scanner")
del scanner
def stop(self):
self.__running = False
with self.__cond:
self.__cond.notify()
def put(self, path, operation, **kwargs):
if not self.__running:
raise RuntimeError("Trying to put an item in a stopped queue")
with self.__cond:
if path in self.__queue:
event = self.__queue[path]
event.set(operation, **kwargs)
else:
event = Event(path, operation, **kwargs)
self.__queue[path] = event
if operation & OP_MOVE and kwargs["src_path"] in self.__queue:
previous = self.__queue[kwargs["src_path"]]
event.set(previous.operation, src_path = previous.src_path)
del self.__queue[kwargs["src_path"]]
if self.__timer:
self.__timer.cancel()
self.__timer = Timer(5, self.__wakeup)
self.__timer.start()
def __wakeup(self):
with self.__cond:
self.__cond.notify()
self.__timer = None
def __next_item(self):
with self.__cond:
if not self.__queue:
return None
next = min(self.__queue.iteritems(), key = lambda i: i[1].time)
if not self.__running or next[1].time + 5 <= time.time():
del self.__queue[next[0]]
return next[1]
return None
class SupysonicWatcher(object):
def run(self):
if not config.check():
return
logger = logging.getLogger(__name__)
if config.get('daemon', 'log_file'):
log_handler = TimedRotatingFileHandler(config.get('daemon', 'log_file'), when = 'midnight')
else:
log_handler = logging.NullHandler()
log_handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(message)s"))
logger.addHandler(log_handler)
if config.get('daemon', 'log_level'):
mapping = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRTICAL': logging.CRITICAL
}
logger.setLevel(mapping.get(config.get('daemon', 'log_level').upper(), logging.NOTSET))
store = db.get_store(config.get('base', 'database_uri'))
folders = store.find(db.Folder, db.Folder.root == True)
if not folders.count():
logger.info("No folder set. Exiting.")
store.close()
return
queue = ScannerProcessingQueue(logger)
handler = SupysonicWatcherEventHandler(queue, logger)
observer = Observer()
for folder in folders:
logger.info("Starting watcher for %s", folder.path)
observer.schedule(handler, folder.path, recursive = True)
store.close()
signal(SIGTERM, self.__terminate)
self.__running = True
queue.start()
observer.start()
while self.__running:
time.sleep(2)
logger.info("Stopping watcher")
observer.stop()
observer.join()
queue.stop()
queue.join()
def stop(self):
self.__running = False
def __terminate(self, signum, frame):
self.stop()
| agpl-3.0 |
mwiebe/blaze | blaze/datadescriptor/tests/test_csv_data_descriptor.py | 2 | 5241 | from __future__ import absolute_import, division, print_function
import unittest
import tempfile
import os
import datashape
from blaze.datadescriptor import (
CSVDataDescriptor, DyNDDataDescriptor, IDataDescriptor, dd_as_py)
# A CSV toy example
csv_buf = u"""k1,v1,1,False
k2,v2,2,True
k3,v3,3,False
"""
csv_schema = "{ f0: string, f1: string, f2: int16, f3: bool }"
class TestCSVDataDescriptor(unittest.TestCase):
def setUp(self):
handle, self.csv_file = tempfile.mkstemp(".csv")
with os.fdopen(handle, "w") as f:
f.write(csv_buf)
def tearDown(self):
os.remove(self.csv_file)
def test_basic_object_type(self):
self.assertTrue(issubclass(CSVDataDescriptor, IDataDescriptor))
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
self.assertTrue(isinstance(dd, IDataDescriptor))
self.assertTrue(isinstance(dd.dshape.shape[0], datashape.Var))
self.assertEqual(dd_as_py(dd), [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iter(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
# Iteration should produce DyNDDataDescriptor instances
vals = []
for el in dd:
self.assertTrue(isinstance(el, DyNDDataDescriptor))
self.assertTrue(isinstance(el, IDataDescriptor))
vals.append(dd_as_py(el))
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iterchunks(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
# Iteration should produce DyNDDataDescriptor instances
vals = []
for el in dd.iterchunks(blen=2):
self.assertTrue(isinstance(el, DyNDDataDescriptor))
self.assertTrue(isinstance(el, IDataDescriptor))
vals.extend(dd_as_py(el))
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iterchunks_start(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
vals = []
for el in dd.iterchunks(blen=2, start=1):
vals.extend(dd_as_py(el))
self.assertEqual(vals, [
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_iterchunks_stop(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
vals = [dd_as_py(v) for v in dd.iterchunks(blen=1, stop=2)]
self.assertEqual(vals, [
[{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}],
[{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}]])
def test_iterchunks_start_stop(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
vals = [dd_as_py(v) for v in dd.iterchunks(blen=1, start=1, stop=2)]
self.assertEqual(vals, [[
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}]])
def test_append(self):
# Get a private file so as to not mess the original one
handle, csv_file = tempfile.mkstemp(".csv")
with os.fdopen(handle, "w") as f:
f.write(csv_buf)
dd = CSVDataDescriptor(csv_file, schema=csv_schema)
dd.append(["k4", "v4", 4, True])
vals = [dd_as_py(v) for v in dd.iterchunks(blen=1, start=3)]
self.assertEqual(vals, [[
{u'f0': u'k4', u'f1': u'v4', u'f2': 4, u'f3': True}]])
os.remove(csv_file)
def test_getitem_start(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[0]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}])
def test_getitem_stop(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[:1]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False}])
def test_getitem_step(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[::2]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k1', u'f1': u'v1', u'f2': 1, u'f3': False},
{u'f0': u'k3', u'f1': u'v3', u'f2': 3, u'f3': False}])
def test_getitem_start_step(self):
dd = CSVDataDescriptor(self.csv_file, schema=csv_schema)
el = dd[1::2]
self.assertTrue(isinstance(el, DyNDDataDescriptor))
vals = dd_as_py(el)
self.assertEqual(vals, [
{u'f0': u'k2', u'f1': u'v2', u'f2': 2, u'f3': True}])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
jmanero/mesos-service | zookeeper-3.4.6/src/contrib/rest/src/python/zkrest.py | 115 | 7227 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib2
import urllib
import simplejson
from contextlib import contextmanager
class RequestWithMethod(urllib2.Request):
""" Request class that know how to set the method name """
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self._method = None
def get_method(self):
return self._method or \
urllib2.Request.get_method(self)
def set_method(self, method):
self._method = method
class ZooKeeper(object):
class Error(Exception): pass
class NotFound(Error): pass
class ZNodeExists(Error): pass
class InvalidSession(Error): pass
class WrongVersion(Error): pass
def __init__(self, uri = 'http://localhost:9998'):
self._base = uri
self._session = None
def start_session(self, expire=5, id=None):
""" Create a session and return the ID """
if id is None:
url = "%s/sessions/v1/?op=create&expire=%d" % (self._base, expire)
self._session = self._do_post(url)['id']
else:
self._session = id
return self._session
def close_session(self):
""" Close the session on the server """
if self._session is not None:
url = '%s/sessions/v1/%s' % (self._base, self._session)
self._do_delete(url)
self._session = None
def heartbeat(self):
""" Send a heartbeat request. This is needed in order to keep a session alive """
if self._session is not None:
url = '%s/sessions/v1/%s' % (self._base, self._session)
self._do_put(url, '')
@contextmanager
def session(self, *args, **kwargs):
""" Session handling using a context manager """
yield self.start_session(*args, **kwargs)
self.close_session()
def get(self, path):
""" Get a node """
url = "%s/znodes/v1%s" % (self._base, path)
return self._do_get(url)
def get_children(self, path):
""" Get all the children for a given path. This function creates a generator """
url = "%s/znodes/v1%s?view=children" % (self._base, path)
resp = self._do_get(url)
for child in resp.get('children', []):
try:
yield self._do_get(resp['child_uri_template']\
.replace('{child}', urllib2.quote(child)))
except ZooKeeper.NotFound:
continue
def create(self, path, data=None, sequence=False, ephemeral=False):
""" Create a new node. By default this call creates a persistent znode.
You can also create an ephemeral or a sequential znode.
"""
ri = path.rindex('/')
head, name = path[:ri+1], path[ri+1:]
if head != '/': head = head[:-1]
flags = {
'null': 'true' if data is None else 'false',
'ephemeral': 'true' if ephemeral else 'false',
'sequence': 'true' if sequence else 'false'
}
if ephemeral:
if self._session:
flags['session'] = self._session
else:
raise ZooKeeper.Error, 'You need a session '\
'to create an ephemeral node'
flags = urllib.urlencode(flags)
url = "%s/znodes/v1%s?op=create&name=%s&%s" % \
(self._base, head, name, flags)
return self._do_post(url, data)
def set(self, path, data=None, version=-1, null=False):
""" Set the value of node """
url = "%s/znodes/v1%s?%s" % (self._base, path, \
urllib.urlencode({
'version': version,
'null': 'true' if null else 'false'
}))
return self._do_put(url, data)
def delete(self, path, version=-1):
""" Delete a znode """
if type(path) is list:
map(lambda el: self.delete(el, version), path)
return
url = '%s/znodes/v1%s?%s' % (self._base, path, \
urllib.urlencode({
'version':version
}))
try:
return self._do_delete(url)
except urllib2.HTTPError, e:
if e.code == 412:
raise ZooKeeper.WrongVersion(path)
elif e.code == 404:
raise ZooKeeper.NotFound(path)
raise
def exists(self, path):
""" Do a znode exists """
try:
self.get(path)
return True
except ZooKeeper.NotFound:
return False
def _do_get(self, uri):
""" Send a GET request and convert errors to exceptions """
try:
req = urllib2.urlopen(uri)
resp = simplejson.load(req)
if 'Error' in resp:
raise ZooKeeper.Error(resp['Error'])
return resp
except urllib2.HTTPError, e:
if e.code == 404:
raise ZooKeeper.NotFound(uri)
raise
def _do_post(self, uri, data=None):
""" Send a POST request and convert errors to exceptions """
try:
req = urllib2.Request(uri, {})
req.add_header('Content-Type', 'application/octet-stream')
if data is not None:
req.add_data(data)
resp = simplejson.load(urllib2.urlopen(req))
if 'Error' in resp:
raise ZooKeeper.Error(resp['Error'])
return resp
except urllib2.HTTPError, e:
if e.code == 201:
return True
elif e.code == 409:
raise ZooKeeper.ZNodeExists(uri)
elif e.code == 401:
raise ZooKeeper.InvalidSession(uri)
raise
def _do_delete(self, uri):
""" Send a DELETE request """
req = RequestWithMethod(uri)
req.set_method('DELETE')
req.add_header('Content-Type', 'application/octet-stream')
return urllib2.urlopen(req).read()
def _do_put(self, uri, data):
""" Send a PUT request """
try:
req = RequestWithMethod(uri)
req.set_method('PUT')
req.add_header('Content-Type', 'application/octet-stream')
if data is not None:
req.add_data(data)
return urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
if e.code == 412: # precondition failed
raise ZooKeeper.WrongVersion(uri)
raise
| mit |
krikru/tensorflow-opencl | tensorflow/contrib/tfprof/python/tools/tfprof/tfprof_logger.py | 48 | 5809 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging tensorflow::tfprof::OpLog.
OpLog is used to add extra model information for offline analysis by tfprof.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.tools.tfprof import tfprof_log_pb2
TRAINABLE_VARIABLES = '_trainable_variables'
REGISTERED_FLOP_STATS = 'flops'
def _fill_missing_graph_shape(graph, run_meta):
"""Fill Tensor shapes in 'graph' with run time shape from 'run_meta'."""
for dev_stat in run_meta.step_stats.dev_stats:
for node_stat in dev_stat.node_stats:
if not node_stat.output:
continue
try:
op = graph.get_operation_by_name(node_stat.node_name)
except KeyError as e:
# Graph doesn't contains the node_stat, usually RecvTensor.
continue
if len(node_stat.output) != len(op.outputs):
# For example, conditional op has only 1 output at run time.
continue
for (i, node_stat_out) in enumerate(node_stat.output):
if op.outputs[i].get_shape().is_fully_defined():
continue
node_stat_dims = node_stat_out.tensor_description.shape.dim
node_stat_shape = tensor_shape.TensorShape(
[d.size for d in node_stat_dims])
try:
op.outputs[i].set_shape(op.outputs[i].get_shape().merge_with(
node_stat_shape))
except ValueError as e:
sys.stderr.write('Node %s incompatible shapes: %s.\n' %
(node_stat.node_name, e))
return graph
def _get_logged_ops(graph, run_meta=None):
"""Extract trainable model parameters and FLOPs for ops from a Graph.
Args:
graph: tf.Graph.
run_meta: RunMetadata proto used to complete shape information.
Returns:
logged_ops: dict mapping from op_name to OpLogEntry.
"""
if run_meta:
graph = _fill_missing_graph_shape(graph, run_meta)
op_missing_shape = 0
logged_ops = {}
graph_def = graph.as_graph_def()
for node in graph_def.node:
try:
stats = ops.get_stats_for_node_def(graph, node, REGISTERED_FLOP_STATS)
except ValueError:
# Catch Exception When shape is incomplete. Skip it.
op_missing_shape += 1
stats = None
if not stats or not stats.value:
continue
if node.name not in logged_ops:
entry = tfprof_log_pb2.OpLogEntry()
entry.name = node.name
entry.float_ops = int(stats.value)
logged_ops[entry.name] = entry
for v in graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES):
if v.op.name not in logged_ops:
entry = tfprof_log_pb2.OpLogEntry()
entry.name = v.op.name
entry.types.append(TRAINABLE_VARIABLES)
logged_ops[entry.name] = entry
else:
logged_ops[v.op.name].types.append(TRAINABLE_VARIABLES)
if op_missing_shape > 0 and not run_meta:
sys.stderr.write('%d ops no flops stats due to incomplete shapes. '
'Consider passing run_meta to use run_time shapes.\n' %
op_missing_shape)
return logged_ops
def _merge_default_with_oplog(graph, op_log=None, run_meta=None):
"""Merge the tfprof default extra info with caller's op_log.
Args:
graph: tf.Graph.
op_log: OpLog proto.
run_meta: RunMetadata proto used to complete shape information.
Returns:
tmp_op_log: Merged OpLog proto.
"""
tmp_op_log = tfprof_log_pb2.OpLog()
logged_ops = _get_logged_ops(graph, run_meta)
if not op_log:
tmp_op_log.log_entries.extend(logged_ops.values())
else:
all_ops = dict()
for entry in op_log.log_entries:
all_ops[entry.name] = entry
for op_name, entry in six.iteritems(logged_ops):
if op_name in all_ops:
all_ops[op_name].types.extend(entry.types)
if entry.float_ops > 0 and all_ops[op_name].float_ops == 0:
all_ops[op_name].float_ops = entry.float_ops
else:
all_ops[op_name] = entry
tmp_op_log.log_entries.extend(all_ops.values())
return tmp_op_log
def write_op_log(graph, log_dir, op_log=None, run_meta=None):
"""Log provided 'op_log', and add additional model information below.
The API also assigns ops in tf.trainable_variables() an op type called
'_trainable_variables'.
The API also logs 'flops' statistics for ops with op.RegisterStatistics()
defined. flops calculation depends on Tensor shapes defined in 'graph',
which might not be complete, 'run_meta', if provided, completes the shape
information with best effort.
Args:
graph: tf.Graph.
log_dir: directory to write the log file.
op_log: (Optional) OpLog proto to be written. If not provided, an new
one is created.
run_meta: (Optional) RunMetadata proto that helps flops computation using
run time shape information.
"""
op_log = _merge_default_with_oplog(graph, op_log, run_meta)
with gfile.Open(os.path.join(log_dir, 'tfprof_log'), 'w') as log:
log.write(op_log.SerializeToString())
| apache-2.0 |
sasukeh/neutron | neutron/tests/unit/api/rpc/handlers/test_dvr_rpc.py | 34 | 2111 | # Copyright (c) 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.api.rpc.handlers import dvr_rpc
from neutron.tests import base
class DVRServerRpcApiTestCase(base.BaseTestCase):
def setUp(self):
self.client_p = mock.patch.object(dvr_rpc.n_rpc, "get_client")
self.client = self.client_p.start()
self.rpc = dvr_rpc.DVRServerRpcApi('fake_topic')
self.mock_cctxt = self.rpc.client.prepare.return_value
self.ctxt = mock.ANY
super(DVRServerRpcApiTestCase, self).setUp()
def test_get_dvr_mac_address_by_host(self):
self.rpc.get_dvr_mac_address_by_host(self.ctxt, 'foo_host')
self.mock_cctxt.call.assert_called_with(
self.ctxt, 'get_dvr_mac_address_by_host', host='foo_host')
def test_get_dvr_mac_address_list(self):
self.rpc.get_dvr_mac_address_list(self.ctxt)
self.mock_cctxt.call.assert_called_with(
self.ctxt, 'get_dvr_mac_address_list')
def test_get_ports_on_host_by_subnet(self):
self.rpc.get_ports_on_host_by_subnet(
self.ctxt, 'foo_host', 'foo_subnet')
self.mock_cctxt.call.assert_called_with(
self.ctxt, 'get_ports_on_host_by_subnet',
host='foo_host', subnet='foo_subnet')
def test_get_subnet_for_dvr(self):
self.rpc.get_subnet_for_dvr(
self.ctxt, 'foo_subnet', fixed_ips='foo_fixed_ips')
self.mock_cctxt.call.assert_called_with(
self.ctxt, 'get_subnet_for_dvr',
subnet='foo_subnet',
fixed_ips='foo_fixed_ips')
| apache-2.0 |
infoxchange/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/admin_views/tests.py | 38 | 111514 | # coding: utf-8
import re
import datetime
import urlparse
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.files import temp as tempfile
from django.core.urlresolvers import reverse
# Register auth models with the admin.
from django.contrib.auth import REDIRECT_FIELD_NAME, admin
from django.contrib.auth.models import User, Permission, UNUSABLE_PASSWORD
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.models import LogEntry, DELETION
from django.contrib.admin.sites import LOGIN_FORM_KEY
from django.contrib.admin.util import quote
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.forms.util import ErrorList
from django.test import TestCase
from django.utils import formats
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.translation import activate, deactivate
import django.template.context
# local test models
from models import Article, BarAccount, CustomArticle, EmptyModel, \
FooAccount, Gallery, ModelWithStringPrimaryKey, \
Person, Persona, Picture, Podcast, Section, Subscriber, Vodcast, \
Language, Collector, Widget, Grommet, DooHickey, FancyDoodad, Whatsit, \
Category, Post, Plot, FunkyTag, WorkHour, Employee, Inquisition, Actor
class AdminViewBasicTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-colors.xml', 'admin-views-fabrics.xml']
# Store the bit of the URL where the admin is registered as a class
# variable. That way we can test a second AdminSite just by subclassing
# this test case and changing urlbit.
urlbit = 'admin'
def setUp(self):
self.old_USE_I18N = settings.LANGUAGE_CODE
self.old_USE_L10N = settings.USE_L10N
self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE
self.client.login(username='super', password='secret')
settings.USE_I18N = True
def tearDown(self):
settings.USE_I18N = self.old_USE_I18N
settings.USE_L10N = self.old_USE_L10N
settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE
self.client.logout()
formats.reset_format_cache()
def testTrailingSlashRequired(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
request = self.client.get('/test_admin/%s/admin_views/article/add' % self.urlbit)
self.assertRedirects(request,
'/test_admin/%s/admin_views/article/add/' % self.urlbit, status_code=301
)
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit)
self.assertEqual(response.status_code, 200)
def testAddWithGETArgs(self):
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit, {'name': 'My Section'})
self.assertEqual(response.status_code, 200)
self.assertTrue(
'value="My Section"' in response.content,
"Couldn't find an input with the right value in the response."
)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/test_admin/%s/admin_views/section/1/' % self.urlbit)
self.assertEqual(response.status_code, 200)
def testBasicEditGetStringPK(self):
"""
A smoke test to ensure GET on the change_view works (returns an HTTP
404 error, see #11191) when passing a string as the PK argument for a
model with an integer PK field.
"""
response = self.client.get('/test_admin/%s/admin_views/section/abc/' % self.urlbit)
self.assertEqual(response.status_code, 404)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": u"Another Section",
# inline data
"article_set-TOTAL_FORMS": u"3",
"article_set-INITIAL_FORMS": u"0",
"article_set-MAX_NUM_FORMS": u"0",
}
response = self.client.post('/test_admin/%s/admin_views/section/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testPopupAddPost(self):
"""
Ensure http response from a popup is properly escaped.
"""
post_data = {
'_popup': u'1',
'title': u'title with a new\nline',
'content': u'some content',
'date_0': u'2010-09-10',
'date_1': u'14:55:39',
}
response = self.client.post('/test_admin/%s/admin_views/article/add/' % self.urlbit, post_data)
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddAnotherPopup')
self.assertContains(response, 'title with a new\u000Aline')
# Post data for edit inline
inline_post_data = {
"name": u"Test section",
# inline data
"article_set-TOTAL_FORMS": u"6",
"article_set-INITIAL_FORMS": u"3",
"article_set-MAX_NUM_FORMS": u"0",
"article_set-0-id": u"1",
# there is no title in database, give one here or formset will fail.
"article_set-0-title": u"Norske bostaver æøå skaper problemer",
"article_set-0-content": u"<p>Middle content</p>",
"article_set-0-date_0": u"2008-03-18",
"article_set-0-date_1": u"11:54:58",
"article_set-0-section": u"1",
"article_set-1-id": u"2",
"article_set-1-title": u"Need a title.",
"article_set-1-content": u"<p>Oldest content</p>",
"article_set-1-date_0": u"2000-03-18",
"article_set-1-date_1": u"11:54:58",
"article_set-2-id": u"3",
"article_set-2-title": u"Need a title.",
"article_set-2-content": u"<p>Newest content</p>",
"article_set-2-date_0": u"2009-03-18",
"article_set-2-date_1": u"11:54:58",
"article_set-3-id": u"",
"article_set-3-title": u"",
"article_set-3-content": u"",
"article_set-3-date_0": u"",
"article_set-3-date_1": u"",
"article_set-4-id": u"",
"article_set-4-title": u"",
"article_set-4-content": u"",
"article_set-4-date_0": u"",
"article_set-4-date_1": u"",
"article_set-5-id": u"",
"article_set-5-title": u"",
"article_set-5-content": u"",
"article_set-5-date_0": u"",
"article_set-5-date_1": u"",
}
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
response = self.client.post('/test_admin/%s/admin_views/section/1/' % self.urlbit, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testEditSaveAs(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update({
'_saveasnew': u'Save+as+new',
"article_set-1-section": u"1",
"article_set-2-section": u"1",
"article_set-3-section": u"1",
"article_set-4-section": u"1",
"article_set-5-section": u"1",
})
response = self.client.post('/test_admin/%s/admin_views/section/1/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testChangeListSortingCallable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'ot': 'asc', 'o': 2})
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.content.index('Oldest content') < response.content.index('Middle content') and
response.content.index('Middle content') < response.content.index('Newest content'),
"Results of sorting on callable are out of order."
)
def testChangeListSortingModel(self):
"""
Ensure we can sort on a list_display field that is a Model method
(colunn 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'ot': 'dsc', 'o': 3})
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.content.index('Newest content') < response.content.index('Middle content') and
response.content.index('Middle content') < response.content.index('Oldest content'),
"Results of sorting on Model method are out of order."
)
def testChangeListSortingModelAdmin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(colunn 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'ot': 'asc', 'o': 4})
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.content.index('Oldest content') < response.content.index('Middle content') and
response.content.index('Middle content') < response.content.index('Newest content'),
"Results of sorting on ModelAdmin method are out of order."
)
def testLimitedFilter(self):
"""Ensure admin changelist filters do not contain objects excluded via limit_choices_to."""
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'<div id="changelist-filter">' in response.content,
"Expected filter not found in changelist view."
)
self.assertFalse(
'<a href="?color__id__exact=3">Blue</a>' in response.content,
"Changelist filter not correctly limited by limit_choices_to."
)
def testIncorrectLookupParameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'notarealfield': '5'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'color__id__exact': 'StringNotInteger!'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
def testIsNullLookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(title="I Could Go Anywhere", content="Versatile", date=datetime.datetime.now())
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertTrue('4 articles' in response.content, '"4 articles" missing from response')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': 'false'})
self.assertTrue('3 articles' in response.content, '"3 articles" missing from response')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': 'true'})
self.assertTrue('1 article' in response.content, '"1 article" missing from response')
def testLogoutAndPasswordChangeURLs(self):
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertFalse('<a href="/test_admin/%s/logout/">' % self.urlbit not in response.content)
self.assertFalse('<a href="/test_admin/%s/password_change/">' % self.urlbit not in response.content)
def testNamedGroupFieldChoicesChangeList(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
response = self.client.get('/test_admin/%s/admin_views/fabric/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'<a href="1/">Horizontal</a>' in response.content and
'<a href="2/">Vertical</a>' in response.content,
"Changelist table isn't showing the right human-readable values set by a model field 'choices' option named group."
)
def testNamedGroupFieldChoicesFilter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get('/test_admin/%s/admin_views/fabric/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'<div id="changelist-filter">' in response.content,
"Expected filter not found in changelist view."
)
self.assertTrue(
'<a href="?surface__exact=x">Horizontal</a>' in response.content and
'<a href="?surface__exact=y">Vertical</a>' in response.content,
"Changelist filter isn't showing options contained inside a model field 'choices' option named group."
)
def testChangeListNullBooleanDisplay(self):
Post.objects.create(public=None)
# This hard-codes the URl because it'll fail if it runs
# against the 'admin2' custom admin (which doesn't have the
# Post model).
response = self.client.get("/test_admin/admin/admin_views/post/")
self.assertTrue('icon-unknown.gif' in response.content)
def testI18NLanguageNonEnglishDefault(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
try:
settings.LANGUAGE_CODE = 'fr'
activate('en-us')
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
finally:
deactivate()
def testI18NLanguageNonEnglishFallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
try:
settings.LANGUAGE_CODE = 'fr'
activate('none')
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertContains(response, 'Choisir une heure')
finally:
deactivate()
def testL10NDeactivated(self):
"""
Check if L10N is deactivated, the Javascript i18n view doesn't
return localized date/time formats. Refs #14824.
"""
try:
settings.LANGUAGE_CODE = 'ru'
settings.USE_L10N = False
activate('ru')
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertNotContains(response, '%d.%m.%Y %H:%M:%S')
self.assertContains(response, '%Y-%m-%d %H:%M:%S')
finally:
deactivate()
def test_disallowed_filtering(self):
self.assertRaises(SuspiciousOperation,
self.client.get, "/test_admin/admin/admin_views/album/?owner__email__startswith=fuzzy"
)
try:
self.client.get("/test_admin/admin/admin_views/person/?age__gt=30")
except SuspiciousOperation:
self.fail("Filters should be allowed if they involve a local field without the need to whitelist them in list_filter or date_hierarchy.")
e1 = Employee.objects.create(name='Anonymous', gender=1, age=22, alive=True, code='123')
e2 = Employee.objects.create(name='Visitor', gender=2, age=19, alive=True, code='124')
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get("/test_admin/admin/admin_views/workhour/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'employee__person_ptr__exact')
response = self.client.get("/test_admin/admin/admin_views/workhour/?employee__person_ptr__exact=%d" % e1.pk)
self.assertEqual(response.status_code, 200)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
try:
self.client.get("/test_admin/admin/admin_views/inquisition/?leader__name=Palin&leader__age=27")
except SuspiciousOperation:
self.fail("Filters should be allowed if they are defined on a ForeignKey pointing to this model")
class AdminJavaScriptTest(AdminViewBasicTest):
def testSingleWidgetFirsFieldFocus(self):
"""
JavaScript-assisted auto-focus on first field.
"""
response = self.client.get('/test_admin/%s/admin_views/picture/add/' % self.urlbit)
self.assertContains(
response,
'<script type="text/javascript">document.getElementById("id_name").focus();</script>'
)
def testMultiWidgetFirsFieldFocus(self):
"""
JavaScript-assisted auto-focus should work if a model/ModelAdmin setup
is such that the first form field has a MultiWidget.
"""
response = self.client.get('/test_admin/%s/admin_views/reservation/add/' % self.urlbit)
self.assertContains(
response,
'<script type="text/javascript">document.getElementById("id_start_date_0").focus();</script>'
)
class SaveAsTests(TestCase):
fixtures = ['admin-views-users.xml','admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_as_duplication(self):
"""Ensure save as actually creates a new person"""
post_data = {'_saveasnew':'', 'name':'John M', 'gender':1, 'age': 42}
response = self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(len(Person.objects.filter(name='John M')), 1)
self.assertEqual(len(Person.objects.filter(id=1)), 1)
def test_save_as_display(self):
"""
Ensure that 'save as' is displayed when activated and after submitting
invalid data aside save_as_new will not show us a form to overwrite the
initial model.
"""
response = self.client.get('/test_admin/admin/admin_views/person/1/')
self.assert_(response.context['save_as'])
post_data = {'_saveasnew':'', 'name':'John M', 'gender':3, 'alive':'checked'}
response = self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(response.context['form_url'], '../add/')
class CustomModelAdminTest(AdminViewBasicTest):
urlbit = "admin2"
def testCustomAdminSiteLoginTemplate(self):
self.client.logout()
request = self.client.get('/test_admin/admin2/')
self.assertTemplateUsed(request, 'custom_admin/login.html')
self.assert_('Hello from a custom login template' in request.content)
def testCustomAdminSiteLogoutTemplate(self):
request = self.client.get('/test_admin/admin2/logout/')
self.assertTemplateUsed(request, 'custom_admin/logout.html')
self.assert_('Hello from a custom logout template' in request.content)
def testCustomAdminSiteIndexViewAndTemplate(self):
request = self.client.get('/test_admin/admin2/')
self.assertTemplateUsed(request, 'custom_admin/index.html')
self.assert_('Hello from a custom index template *bar*' in request.content)
def testCustomAdminSitePasswordChangeTemplate(self):
request = self.client.get('/test_admin/admin2/password_change/')
self.assertTemplateUsed(request, 'custom_admin/password_change_form.html')
self.assert_('Hello from a custom password change form template' in request.content)
def testCustomAdminSitePasswordChangeDoneTemplate(self):
request = self.client.get('/test_admin/admin2/password_change/done/')
self.assertTemplateUsed(request, 'custom_admin/password_change_done.html')
self.assert_('Hello from a custom password change done template' in request.content)
def testCustomAdminSiteView(self):
self.client.login(username='super', password='secret')
response = self.client.get('/test_admin/%s/my_view/' % self.urlbit)
self.assert_(response.content == "Django is a magical pony!", response.content)
def get_perm(Model, perm):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model)
return Permission.objects.get(content_type=ct, codename=perm)
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
fixtures = ['admin-views-users.xml']
def setUp(self):
"""Test setup."""
# Setup permissions, for our users who can add, change, and delete.
# We can't put this into the fixture, because the content type id
# and the permission id could be different on each run of the test.
opts = Article._meta
# User who can add Articles
add_user = User.objects.get(username='adduser')
add_user.user_permissions.add(get_perm(Article,
opts.get_add_permission()))
# User who can change Articles
change_user = User.objects.get(username='changeuser')
change_user.user_permissions.add(get_perm(Article,
opts.get_change_permission()))
# User who can delete Articles
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Article,
opts.get_delete_permission()))
delete_user.user_permissions.add(get_perm(Section,
Section._meta.get_delete_permission()))
# login POST dicts
self.super_login = {
LOGIN_FORM_KEY: 1,
'username': 'super',
'password': 'secret'}
self.super_email_login = {
LOGIN_FORM_KEY: 1,
'username': '[email protected]',
'password': 'secret'}
self.super_email_bad_login = {
LOGIN_FORM_KEY: 1,
'username': '[email protected]',
'password': 'notsecret'}
self.adduser_login = {
LOGIN_FORM_KEY: 1,
'username': 'adduser',
'password': 'secret'}
self.changeuser_login = {
LOGIN_FORM_KEY: 1,
'username': 'changeuser',
'password': 'secret'}
self.deleteuser_login = {
LOGIN_FORM_KEY: 1,
'username': 'deleteuser',
'password': 'secret'}
self.joepublic_login = {
LOGIN_FORM_KEY: 1,
'username': 'joepublic',
'password': 'secret'}
self.no_username_login = {
LOGIN_FORM_KEY: 1,
'password': 'secret'}
def testLogin(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the orignal url.
Unsuccessfull attempts will continue to render the login page with
a 200 status code.
"""
# Super User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.super_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Test if user enters e-mail address
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.super_email_login)
self.assertContains(login, "Your e-mail address is not your username")
# only correct passwords get a username hint
login = self.client.post('/test_admin/admin/', self.super_email_bad_login)
self.assertContains(login, "Please enter a correct username and password")
new_user = User(username='jondoe', password='secret', email='[email protected]')
new_user.save()
# check to ensure if there are multiple e-mail addresses a user doesn't get a 500
login = self.client.post('/test_admin/admin/', self.super_email_login)
self.assertContains(login, "Please enter a correct username and password")
# Add User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.adduser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Change User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.changeuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Delete User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, "Please enter a correct username and password.")
# Requests without username should not return 500 errors.
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.no_username_login)
self.assertEqual(login.status_code, 200)
form = login.context[0].get('form')
self.assert_(login.context[0].get('error_message'))
def testLoginSuccessfullyRedirectsToOriginalUrl(self):
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
query_string = 'the-answer=42'
redirect_url = '/test_admin/admin/?%s' % query_string
new_next = {REDIRECT_FIELD_NAME: redirect_url}
login = self.client.post('/test_admin/admin/', dict(self.super_login, **new_next), QUERY_STRING=query_string)
self.assertRedirects(login, redirect_url)
def testAddView(self):
"""Test add view restricts access and actually adds items."""
add_dict = {'title' : 'Døm ikke',
'content': '<p>great article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': 1}
# Change User should not have access to add articles
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.changeuser_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
request = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(request.status_code, 403)
# Try POST just to make sure
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Add user may login and POST to add view, then redirect to admin root
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
addpage = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(addpage.status_code, 200)
change_list_link = '<a href="../">Articles</a> ›'
self.assertFalse(change_list_link in addpage.content,
'User restricted to add permission is given link to change list view in breadcrumbs.')
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertRedirects(post, '/test_admin/admin/')
self.assertEqual(Article.objects.all().count(), 4)
self.client.get('/test_admin/admin/logout/')
# Super can add too, but is redirected to the change list view
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.super_login)
addpage = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(addpage.status_code, 200)
self.assertFalse(change_list_link not in addpage.content,
'Unrestricted user is not given link to change list view in breadcrumbs.')
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertRedirects(post, '/test_admin/admin/admin_views/article/')
self.assertEqual(Article.objects.all().count(), 5)
self.client.get('/test_admin/admin/logout/')
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
login = self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
def testChangeView(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {'title' : 'Ikke fordømt',
'content': '<p>edited article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': 1}
# add user shoud not be able to view the list of article or change any of them
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
request = self.client.get('/test_admin/admin/admin_views/article/')
self.assertEqual(request.status_code, 403)
request = self.client.get('/test_admin/admin/admin_views/article/1/')
self.assertEqual(request.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(post.status_code, 403)
self.client.get('/test_admin/admin/logout/')
# change user can view all items and edit them
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.changeuser_login)
request = self.client.get('/test_admin/admin/admin_views/article/')
self.assertEqual(request.status_code, 200)
request = self.client.get('/test_admin/admin/admin_views/article/1/')
self.assertEqual(request.status_code, 200)
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertRedirects(post, '/test_admin/admin/admin_views/article/')
self.assertEqual(Article.objects.get(pk=1).content, '<p>edited article</p>')
# one error in form should produce singular error message, multiple errors plural
change_dict['title'] = ''
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(request.status_code, 200)
self.assertTrue('Please correct the error below.' in post.content,
'Singular error message not found in response to post with one error.')
change_dict['content'] = ''
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(request.status_code, 200)
self.assertTrue('Please correct the errors below.' in post.content,
'Plural error message not found in response to post with multiple errors.')
self.client.get('/test_admin/admin/logout/')
def testCustomModelAdminTemplates(self):
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.super_login)
# Test custom change list template with custom extra context
request = self.client.get('/test_admin/admin/admin_views/customarticle/')
self.assertEqual(request.status_code, 200)
self.assert_("var hello = 'Hello!';" in request.content)
self.assertTemplateUsed(request, 'custom_admin/change_list.html')
# Test custom add form template
request = self.client.get('/test_admin/admin/admin_views/customarticle/add/')
self.assertTemplateUsed(request, 'custom_admin/add_form.html')
# Add an article so we can test delete, change, and history views
post = self.client.post('/test_admin/admin/admin_views/customarticle/add/', {
'content': '<p>great article</p>',
'date_0': '2008-03-18',
'date_1': '10:54:39'
})
self.assertRedirects(post, '/test_admin/admin/admin_views/customarticle/')
self.assertEqual(CustomArticle.objects.all().count(), 1)
# Test custom delete, change, and object history templates
# Test custom change form template
request = self.client.get('/test_admin/admin/admin_views/customarticle/1/')
self.assertTemplateUsed(request, 'custom_admin/change_form.html')
request = self.client.get('/test_admin/admin/admin_views/customarticle/1/delete/')
self.assertTemplateUsed(request, 'custom_admin/delete_confirmation.html')
request = self.client.post('/test_admin/admin/admin_views/customarticle/', data={
'index': 0,
'action': ['delete_selected'],
'_selected_action': ['1'],
})
self.assertTemplateUsed(request, 'custom_admin/delete_selected_confirmation.html')
request = self.client.get('/test_admin/admin/admin_views/customarticle/1/history/')
self.assertTemplateUsed(request, 'custom_admin/object_history.html')
self.client.get('/test_admin/admin/logout/')
def testDeleteView(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {'post': 'yes'}
# add user shoud not be able to delete articles
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
request = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertEqual(request.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Delete user can delete
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.deleteuser_login)
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
# test response contains link to related Article
self.assertContains(response, "admin_views/article/1/")
response = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertEqual(response.status_code, 200)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertRedirects(post, '/test_admin/admin/')
self.assertEqual(Article.objects.all().count(), 2)
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, u'1')
self.client.get('/test_admin/admin/logout/')
def testDisabledPermissionsWhenLoggedIn(self):
self.client.login(username='super', password='secret')
superuser = User.objects.get(username='super')
superuser.is_active = False
superuser.save()
response = self.client.get('/test_admin/admin/')
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, 'Log out')
response = self.client.get('/test_admin/admin/secure-view/')
self.assertContains(response, 'id="login-form"')
class AdminViewDeletedObjectsTest(TestCase):
fixtures = ['admin-views-users.xml', 'deleted-objects.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(r"""<li>Plot: <a href=".+/admin_views/plot/1/">World Domination</a>\s*<ul>\s*<li>Plot details: <a href=".+/admin_views/plotdetails/1/">almost finished</a>""")
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertTrue(pattern.search(response.content))
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = """<li>Cyclic one: <a href="/test_admin/admin/admin_views/cyclicone/1/">I am recursive</a>"""
two = """<li>Cyclic two: <a href="/test_admin/admin/admin_views/cyclictwo/1/">I am recursive too</a>"""
response = self.client.get('/test_admin/admin/admin_views/cyclicone/%s/delete/' % quote(1))
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Plot,
Plot._meta.get_delete_permission()))
self.assertTrue(self.client.login(username='deleteuser',
password='secret'))
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(1))
self.assertContains(response, "your account doesn't have permission to delete the following types of objects")
self.assertContains(response, "<li>plot details</li>")
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = """<li>Plot: <a href="/test_admin/admin/admin_views/plot/1/">World Domination</a>"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertContains(response, should_contain)
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(2))
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = """<li>Plot: <a href="/test_admin/admin/admin_views/plot/2/">World Peace</a></li>"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(2))
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
"""<li>Villain: <a href="/test_admin/admin/admin_views/villain/3/">Bob</a>""",
"""<li>Super villain: <a href="/test_admin/admin/admin_views/supervillain/3/">Bob</a>""",
"""<li>Secret hideout: floating castle""",
"""<li>Super secret hideout: super floating castle!"""
]
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(3))
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get('/test_admin/admin/admin_views/supervillain/%s/delete/' % quote(3))
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = Plot.objects.get(pk=3)
tag = FunkyTag.objects.create(content_object=plot, name='hott')
should_contain = """<li>Funky tag: hott"""
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(3))
self.assertContains(response, should_contain)
class AdminViewStringPrimaryKeyTest(TestCase):
fixtures = ['admin-views-users.xml', 'string-primary-key.xml']
def __init__(self, *args):
super(AdminViewStringPrimaryKeyTest, self).__init__(*args)
self.pk = """abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
def setUp(self):
self.client.login(username='super', password='secret')
content_type_pk = ContentType.objects.get_for_model(ModelWithStringPrimaryKey).pk
LogEntry.objects.log_action(100, content_type_pk, self.pk, self.pk, 2, change_message='')
def tearDown(self):
self.client.logout()
def test_get_history_view(self):
"Retrieving the history for the object using urlencoded form of primary key should work"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/history/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_changelist_to_changeform_link(self):
"The link from the changelist referring to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/')
should_contain = """<th><a href="%s/">%s</a></th></tr>""" % (quote(self.pk), escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"The link from the recent actions list referring to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/')
should_contain = """<a href="admin_views/modelwithstringprimarykey/%s/">%s</a>""" % (quote(self.pk), escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_without_content_type(self):
"If a LogEntry is missing content_type it will not display it in span tag under the hyperlink."
response = self.client.get('/test_admin/admin/')
should_contain = """<a href="admin_views/modelwithstringprimarykey/%s/">%s</a>""" % (quote(self.pk), escape(self.pk))
self.assertContains(response, should_contain)
should_contain = "Model with string primary key" # capitalized in Recent Actions
self.assertContains(response, should_contain)
logentry = LogEntry.objects.get(content_type__name__iexact=should_contain)
# http://code.djangoproject.com/ticket/10275
# if the log entry doesn't have a content type it should still be
# possible to view the Recent Actions part
logentry.content_type = None
logentry.save()
counted_presence_before = response.content.count(should_contain)
response = self.client.get('/test_admin/admin/')
counted_presence_after = response.content.count(should_contain)
self.assertEquals(counted_presence_before - 1,
counted_presence_after)
def test_deleteconfirmation_link(self):
"The link from the delete confirmation page referring back to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/delete/' % quote(self.pk))
# this URL now comes through reverse(), thus iri_to_uri encoding
should_contain = """/%s/">%s</a>""" % (iri_to_uri(quote(self.pk)), escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add should be visible"
add_model = ModelWithStringPrimaryKey(id="i have something to add")
add_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(add_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(id="delete")
delete_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(delete_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(id="history")
history_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(history_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
class SecureViewTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
# login POST dicts
self.super_login = {
LOGIN_FORM_KEY: 1,
'username': 'super',
'password': 'secret'}
self.super_email_login = {
LOGIN_FORM_KEY: 1,
'username': '[email protected]',
'password': 'secret'}
self.super_email_bad_login = {
LOGIN_FORM_KEY: 1,
'username': '[email protected]',
'password': 'notsecret'}
self.adduser_login = {
LOGIN_FORM_KEY: 1,
'username': 'adduser',
'password': 'secret'}
self.changeuser_login = {
LOGIN_FORM_KEY: 1,
'username': 'changeuser',
'password': 'secret'}
self.deleteuser_login = {
LOGIN_FORM_KEY: 1,
'username': 'deleteuser',
'password': 'secret'}
self.joepublic_login = {
LOGIN_FORM_KEY: 1,
'username': 'joepublic',
'password': 'secret'}
def tearDown(self):
self.client.logout()
def test_secure_view_shows_login_if_not_logged_in(self):
"Ensure that we see the login form"
response = self.client.get('/test_admin/admin/secure-view/' )
self.assertTemplateUsed(response, 'admin/login.html')
def test_secure_view_login_successfully_redirects_to_original_url(self):
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
query_string = 'the-answer=42'
redirect_url = '/test_admin/admin/secure-view/?%s' % query_string
new_next = {REDIRECT_FIELD_NAME: redirect_url}
login = self.client.post('/test_admin/admin/secure-view/', dict(self.super_login, **new_next), QUERY_STRING=query_string)
self.assertRedirects(login, redirect_url)
def test_staff_member_required_decorator_works_as_per_admin_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the orignal url.
Unsuccessfull attempts will continue to render the login page with
a 200 status code.
"""
# Super User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.super_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
# Test if user enters e-mail address
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.super_email_login)
self.assertContains(login, "Your e-mail address is not your username")
# only correct passwords get a username hint
login = self.client.post('/test_admin/admin/secure-view/', self.super_email_bad_login)
self.assertContains(login, "Please enter a correct username and password")
new_user = User(username='jondoe', password='secret', email='[email protected]')
new_user.save()
# check to ensure if there are multiple e-mail addresses a user doesn't get a 500
login = self.client.post('/test_admin/admin/secure-view/', self.super_email_login)
self.assertContains(login, "Please enter a correct username and password")
# Add User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.adduser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Change User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.changeuser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Delete User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.joepublic_login)
self.assertEqual(login.status_code, 200)
# Login.context is a list of context dicts we just need to check the first one.
self.assert_(login.context[0].get('error_message'))
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
login = self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get('/test_admin/admin/secure-view/')
self.client.post('/test_admin/admin/secure-view/', self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
class AdminViewUnicodeTest(TestCase):
fixtures = ['admin-views-unicode.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testUnicodeEdit(self):
"""
A test to ensure that POST on edit_view handles non-ascii characters.
"""
post_data = {
"name": u"Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": u"6",
"chapter_set-INITIAL_FORMS": u"3",
"chapter_set-MAX_NUM_FORMS": u"0",
"chapter_set-0-id": u"1",
"chapter_set-0-title": u"Norske bostaver æøå skaper problemer",
"chapter_set-0-content": u"<p>Svært frustrerende med UnicodeDecodeError</p>",
"chapter_set-1-id": u"2",
"chapter_set-1-title": u"Kjærlighet.",
"chapter_set-1-content": u"<p>La kjærligheten til de lidende seire.</p>",
"chapter_set-2-id": u"3",
"chapter_set-2-title": u"Need a title.",
"chapter_set-2-content": u"<p>Newest content</p>",
"chapter_set-3-id": u"",
"chapter_set-3-title": u"",
"chapter_set-3-content": u"",
"chapter_set-4-id": u"",
"chapter_set-4-title": u"",
"chapter_set-4-content": u"",
"chapter_set-5-id": u"",
"chapter_set-5-title": u"",
"chapter_set-5-content": u"",
}
response = self.client.post('/test_admin/admin/admin_views/book/1/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testUnicodeDelete(self):
"""
Ensure that the delete_view handles non-ascii characters
"""
delete_dict = {'post': 'yes'}
response = self.client.get('/test_admin/admin/admin_views/book/1/delete/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/test_admin/admin/admin_views/book/1/delete/', delete_dict)
self.assertRedirects(response, '/test_admin/admin/admin_views/book/')
class AdminViewListEditable(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_inheritance(self):
Podcast.objects.create(name="This Week in Django",
release_date=datetime.date.today())
response = self.client.get('/test_admin/admin/admin_views/podcast/')
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get('/test_admin/admin/admin_views/vodcast/')
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso='en', name='English', english_name='English')
response = self.client.get('/test_admin/admin/admin_views/language/')
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get('/test_admin/admin/admin_views/person/')
# 2 inputs per object(the field and the hidden id field) = 6
# 3 management hidden fields = 3
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 1
# field to track 'select all' across paginated views = 1
# 6 + 3 + 4 + 1 + 2 + 1 + 1 = 18 inputs
self.assertEqual(response.content.count("<input"), 18)
# 1 select per object = 3 selects
self.assertEqual(response.content.count("<select"), 4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/',
data, follow=True)
self.assertEqual(len(response.context['messages']), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "1",
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": "3",
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/?gender__exact=1', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "1",
"form-0-gender": "1",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/?q=mauchly', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "2",
"form-0-alive": "1",
"form-0-gender": "2",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "2",
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/', data)
non_form_errors = response.context['cl'].formset.non_form_errors()
self.assert_(isinstance(non_form_errors, ErrorList))
self.assertEqual(str(non_form_errors), str(ErrorList(["Grace is not a Zombie"])))
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/category/', data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": [u'3'],
"action": [u'', u'delete_selected'],
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
"_selected_action": [u'1'],
"action": [u'', u'delete_selected'],
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
class AdminSearchTest(TestCase):
fixtures = ['admin-views-users','multiple-child-classes']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_search_on_sibling_models(self):
"Check that a search that mentions sibling models"
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=bar')
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
class AdminInheritedInlinesTest(TestCase):
fixtures = ['admin-views-users.xml',]
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testInline(self):
"Ensure that inline models which inherit from a common parent are correctly handled by admin."
foo_user = u"foo username"
bar_user = u"bar username"
name_re = re.compile('name="(.*?)"')
# test the add case
response = self.client.get('/test_admin/admin/admin_views/persona/add/')
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": u"Test Name",
# inline data
"accounts-TOTAL_FORMS": u"1",
"accounts-INITIAL_FORMS": u"0",
"accounts-MAX_NUM_FORMS": u"0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": u"1",
"accounts-2-INITIAL_FORMS": u"0",
"accounts-2-MAX_NUM_FORMS": u"0",
"accounts-2-0-username": bar_user,
}
response = self.client.post('/test_admin/admin/admin_views/persona/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
# test the edit case
response = self.client.get('/test_admin/admin/admin_views/persona/1/')
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": u"Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": u"1",
"accounts-MAX_NUM_FORMS": u"0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": "1",
"accounts-0-persona": "1",
"accounts-2-TOTAL_FORMS": u"2",
"accounts-2-INITIAL_FORMS": u"1",
"accounts-2-MAX_NUM_FORMS": u"0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": "2",
"accounts-2-0-persona": "1",
}
response = self.client.post('/test_admin/admin/admin_views/persona/1/', post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
from django.core import mail
class AdminActionsTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-actions.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_model_admin_custom_action(self):
"Tests a custom action defined in a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'mail_admin',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, 'Greetings from a ModelAdmin action')
def test_model_admin_default_delete_action(self):
"Tests the default delete action defined as a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action' : 'delete_selected',
'index': 0,
}
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action' : 'delete_selected',
'post': 'yes',
}
confirmation = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertContains(confirmation, "Are you sure you want to delete the selected subscriber objects")
self.assertTrue(confirmation.content.count(ACTION_CHECKBOX_NAME) == 2)
response = self.client.post('/test_admin/admin/admin_views/subscriber/', delete_confirmation_data)
self.assertEqual(Subscriber.objects.count(), 0)
def test_custom_function_mail_action(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'external_mail',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, 'Greetings from a function action')
def test_custom_function_action_with_redirect(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'redirect_to',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEqual(response.status_code, 302)
def test_default_redirect(self):
"""
Test that actions which don't return an HttpResponse are redirected to
the same page, retaining the querystring (which may contain changelist
information).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'external_mail',
'index': 0,
}
url = '/test_admin/admin/admin_views/externalsubscriber/?ot=asc&o=1'
response = self.client.post(url, action_data)
self.assertRedirects(response, url)
def test_model_without_action(self):
"Tests a ModelAdmin without any action"
response = self.client.get('/test_admin/admin/admin_views/oldsubscriber/')
self.assertEquals(response.context["action_form"], None)
self.assert_(
'<input type="checkbox" class="action-select"' not in response.content,
"Found an unexpected action toggle checkboxbox in response"
)
self.assert_('action-checkbox-column' not in response.content,
"Found unexpected action-checkbox-column class in response")
def test_model_without_action_still_has_jquery(self):
"Tests that a ModelAdmin without any actions still gets jQuery included in page"
response = self.client.get('/test_admin/admin/admin_views/oldsubscriber/')
self.assertEquals(response.context["action_form"], None)
self.assert_('jquery.min.js' in response.content,
"jQuery missing from admin pages for model with no admin actions"
)
def test_action_column_class(self):
"Tests that the checkbox column class is present in the response"
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertNotEqual(response.context["action_form"], None)
self.assert_('action-checkbox-column' in response.content,
"Expected an action-checkbox-column in response")
def test_multiple_actions_form(self):
"""
Test that actions come from the form whose submit button was pressed (#10618).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
# Two different actions selected on the two forms...
'action': ['external_mail', 'delete_selected'],
# ...but we clicked "go" on the top form.
'index': 0
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
# Send mail, don't delete.
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, 'Greetings from a function action')
def test_user_message_on_none_selected(self):
"""
User should see a warning when 'Go' is pressed and no items are selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [],
'action' : 'delete_selected',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
msg = """Items must be selected in order to perform actions on them. No items have been changed."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_user_message_on_no_action(self):
"""
User should see a warning when 'Go' is pressed and no action is selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action' : '',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
msg = """No action selected."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_selection_counter(self):
"""
Check if the selection counter is there.
"""
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertContains(response, '0 of 2 selected')
class TestCustomChangeList(TestCase):
fixtures = ['admin-views-users.xml']
urlbit = 'admin'
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": u"First Gadget"}
response = self.client.post('/test_admin/%s/admin_views/gadget/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get('/test_admin/%s/admin_views/gadget/' % self.urlbit)
# Ensure that that data is still not visible on the page
response = self.client.get('/test_admin/%s/admin_views/gadget/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'First Gadget')
class TestInlineNotEditable(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get('/test_admin/admin/admin_views/parent/add/')
self.assertEqual(response.status_code, 200)
class AdminCustomQuerysetTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.pks = [EmptyModel.objects.create().id for i in range(3)]
def test_changelist_view(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodel/')
for i in self.pks:
if i > 1:
self.assertContains(response, 'Primary key = %s' % i)
else:
self.assertNotContains(response, 'Primary key = %s' % i)
def test_change_view(self):
for i in self.pks:
response = self.client.get('/test_admin/admin/admin_views/emptymodel/%s/' % i)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
class AdminInlineFileUploadTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-actions.xml']
urlbit = 'admin'
def setUp(self):
self.client.login(username='super', password='secret')
# Set up test Picture and Gallery.
# These must be set up here instead of in fixtures in order to allow Picture
# to use a NamedTemporaryFile.
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write('a' * (2 ** 21))
filename = file1.name
file1.close()
g = Gallery(name="Test Gallery")
g.save()
p = Picture(name="Test Picture", image=filename, gallery=g)
p.save()
def tearDown(self):
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""
Test that inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": u"Test Gallery",
"pictures-TOTAL_FORMS": u"2",
"pictures-INITIAL_FORMS": u"1",
"pictures-MAX_NUM_FORMS": u"0",
"pictures-0-id": u"1",
"pictures-0-gallery": u"1",
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": "1",
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/1/' % self.urlbit, post_data)
self.assertTrue(response._container[0].find("Currently:") > -1)
class AdminInlineTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": u"Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": u"0",
"widget_set-MAX_NUM_FORMS": u"0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": u"0",
"doohickey_set-MAX_NUM_FORMS": u"0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": u"0",
"grommet_set-MAX_NUM_FORMS": u"0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": u"0",
"whatsit_set-MAX_NUM_FORMS": u"0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": u"0",
"fancydoodad_set-MAX_NUM_FORMS": u"0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.collector = Collector(pk=1,name='John Fowles')
self.collector.save()
def tearDown(self):
self.client.logout()
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data['widget_set-0-name'] = "Widget 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="widget_set-0-id"')
# Now resave that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = "1"
self.post_data['widget_set-0-name'] = "Widget 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = "1"
self.post_data['widget_set-0-name'] = "Widget 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"A model with an explicit autofield primary key can be saved as inlines. Regression for #8093"
# First add a new inline
self.post_data['grommet_set-0-name'] = "Grommet 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = "1"
self.post_data['grommet_set-0-name'] = "Grommet 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = "1"
self.post_data['grommet_set-0-name'] = "Grommet 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = "1"
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = "1"
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""Check that an inline with an editable ordering fields is
updated correctly. Regression for #10922"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update({
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
})
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
class NeverCacheTests(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-colors.xml', 'admin-views-fabrics.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdminIndex(self):
"Check the never-cache status of the main index"
response = self.client.get('/test_admin/admin/')
self.assertEqual(get_max_age(response), 0)
def testAppIndex(self):
"Check the never-cache status of an application index"
response = self.client.get('/test_admin/admin/admin_views/')
self.assertEqual(get_max_age(response), 0)
def testModelIndex(self):
"Check the never-cache status of a model index"
response = self.client.get('/test_admin/admin/admin_views/fabric/')
self.assertEqual(get_max_age(response), 0)
def testModelAdd(self):
"Check the never-cache status of a model add page"
response = self.client.get('/test_admin/admin/admin_views/fabric/add/')
self.assertEqual(get_max_age(response), 0)
def testModelView(self):
"Check the never-cache status of a model edit page"
response = self.client.get('/test_admin/admin/admin_views/section/1/')
self.assertEqual(get_max_age(response), 0)
def testModelHistory(self):
"Check the never-cache status of a model history page"
response = self.client.get('/test_admin/admin/admin_views/section/1/history/')
self.assertEqual(get_max_age(response), 0)
def testModelDelete(self):
"Check the never-cache status of a model delete page"
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
self.assertEqual(get_max_age(response), 0)
def testLogin(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get('/test_admin/admin/')
self.assertEqual(get_max_age(response), 0)
def testLogout(self):
"Check the never-cache status of logout view"
response = self.client.get('/test_admin/admin/logout/')
self.assertEqual(get_max_age(response), 0)
def testPasswordChange(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get('/test_admin/password_change/')
self.assertEqual(get_max_age(response), None)
def testPasswordChangeDone(self):
"Check the never-cache status of the password change done view"
response = self.client.get('/test_admin/admin/password_change/done/')
self.assertEqual(get_max_age(response), None)
def testJsi18n(self):
"Check the never-cache status of the Javascript i18n view"
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertEqual(get_max_age(response), None)
class ReadonlyTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_readonly_get(self):
response = self.client.get('/test_admin/admin/admin_views/post/add/')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 4 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
self.assertEqual(response.content.count("<input"), 14)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response,
"<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unkown coolness.")
self.assertContains(response, "foo")
self.assertContains(response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7))
)
self.assertContains(response, '<div class="form-row coolness">')
self.assertContains(response, '<div class="form-row awesomeness_level">')
self.assertContains(response, '<div class="form-row posted">')
self.assertContains(response, '<div class="form-row value">')
self.assertContains(response, '<div class="form-row ">')
p = Post.objects.create(title="I worked on readonly_fields", content="Its good stuff")
response = self.client.get('/test_admin/admin/admin_views/post/%d/' % p.pk)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post('/test_admin/admin/admin_views/post/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post('/test_admin/admin/admin_views/post/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by('-id')[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get('/test_admin/admin/admin_views/pizza/add/')
self.assertEqual(response.status_code, 200)
class RawIdFieldsTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_limit_choices_to(self):
"""Regression test for 14880"""
# This includes tests integers, strings and booleans in the lookup query string
actor = Actor.objects.create(name="Palin", age=27)
inquisition1 = Inquisition.objects.create(expected=True,
leader=actor,
country="England")
inquisition2 = Inquisition.objects.create(expected=False,
leader=actor,
country="Spain")
response = self.client.get('/test_admin/admin/admin_views/sketch/add/')
# Find the link
m = re.search(r'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].replace("&", "&")
# Handle relative links
popup_url = urlparse.urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/%s/' % new_user.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertNotEqual(new_user.password, UNUSABLE_PASSWORD)
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_continue': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/%s/' % new_user.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertNotEqual(new_user.password, UNUSABLE_PASSWORD)
def test_password_mismatch(self):
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'mismatch',
})
self.assertEqual(response.status_code, 200)
adminform = response.context['adminform']
self.assertTrue('password' not in adminform.form.errors)
self.assertEqual(adminform.form.errors['password2'],
[u"The two password fields didn't match."])
def test_user_fk_popup(self):
response = self.client.get('/test_admin/admin/admin_views/album/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/test_admin/admin/auth/user/add')
self.assertContains(response, 'class="add-another" id="add_id_owner" onclick="return showAddAnotherPopup(this);"')
response = self.client.get('/test_admin/admin/auth/user/add/?_popup=1')
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_addanother': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/add/')
self.assertEqual(User.objects.count(), user_count + 1)
self.assertNotEqual(new_user.password, UNUSABLE_PASSWORD)
try:
# If docutils isn't installed, skip the AdminDocs tests.
import docutils
class AdminDocsTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_tags(self):
response = self.client.get('/test_admin/admin/doc/tags/')
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2)
# A builtin tag exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-autoescape">autoescape</h3>')
self.assertContains(response, '<li><a href="#built_in-autoescape">autoescape</a></li>')
# An app tag exists in both the index and detail
self.assertContains(response, '<h3 id="comments-get_comment_count">get_comment_count</h3>')
self.assertContains(response, '<li><a href="#comments-get_comment_count">get_comment_count</a></li>')
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2)
# An admin list tag exists in both the index and detail
self.assertContains(response, '<h3 id="admin_list-admin_actions">admin_actions</h3>')
self.assertContains(response, '<li><a href="#admin_list-admin_actions">admin_actions</a></li>')
def test_filters(self):
response = self.client.get('/test_admin/admin/doc/filters/')
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>')
self.assertContains(response, '<li><a href="#built_in-add">add</a></li>')
except ImportError:
pass
class ValidXHTMLTests(TestCase):
fixtures = ['admin-views-users.xml']
urlbit = 'admin'
def setUp(self):
self._context_processors = None
self._use_i18n, settings.USE_I18N = settings.USE_I18N, False
if 'django.core.context_processors.i18n' in settings.TEMPLATE_CONTEXT_PROCESSORS:
self._context_processors = settings.TEMPLATE_CONTEXT_PROCESSORS
cp = list(settings.TEMPLATE_CONTEXT_PROCESSORS)
cp.remove('django.core.context_processors.i18n')
settings.TEMPLATE_CONTEXT_PROCESSORS = tuple(cp)
# Force re-evaluation of the contex processor list
django.template.context._standard_context_processors = None
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
if self._context_processors is not None:
settings.TEMPLATE_CONTEXT_PROCESSORS = self._context_processors
# Force re-evaluation of the contex processor list
django.template.context._standard_context_processors = None
settings.USE_I18N = self._use_i18n
def testLangNamePresent(self):
response = self.client.get('/test_admin/%s/admin_views/' % self.urlbit)
self.assertFalse(' lang=""' in response.content)
self.assertFalse(' xml:lang=""' in response.content)
class DateHierarchyTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.old_USE_THOUSAND_SEPARATOR = settings.USE_THOUSAND_SEPARATOR
self.old_USE_L10N = settings.USE_L10N
settings.USE_THOUSAND_SEPARATOR = True
settings.USE_L10N = True
def tearDown(self):
settings.USE_THOUSAND_SEPARATOR = self.old_USE_THOUSAND_SEPARATOR
settings.USE_L10N = self.old_USE_L10N
formats.reset_format_cache()
def assert_non_localized_year(self, response, year):
"""Ensure that the year is not localized with
USE_THOUSAND_SEPARATOR. Refs #15234.
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % (date.year,))
def assert_contains_month_link(self, response, date):
self.assertContains(
response, '?release_date__year=%d&release_date__month=%d"' % (
date.year, date.month))
def assert_contains_day_link(self, response, date):
self.assertContains(
response, '?release_date__year=%d&'
'release_date__month=%d&release_date__day=%d"' % (
date.year, date.month, date.day))
def test_empty(self):
"""
Ensure that no date hierarchy links display with empty changelist.
"""
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
self.assertNotContains(response, 'release_date__year=')
self.assertNotContains(response, 'release_date__month=')
self.assertNotContains(response, 'release_date__day=')
def test_single(self):
"""
Ensure that single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
Ensure that day-level links appear for changelist within single month.
"""
DATES = (datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
Ensure that month-level links appear for changelist within single year.
"""
DATES = (datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, 'release_date__day=')
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
Ensure that year-level links appear for year-spanning changelist.
"""
DATES = (datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
# no day/month-level links
self.assertNotContains(response, 'release_date__day=')
self.assertNotContains(response, 'release_date__month=')
# and make sure GET parameters still behave correctly
for date in DATES:
url = '%s?release_date__year=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year)
response = self.client.get(url)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
response = self.client.get(url)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005) | gpl-3.0 |
unreal666/outwiker | plugins/source/source/pygments/lexers/_openedge_builtins.py | 31 | 48362 | # -*- coding: utf-8 -*-
"""
pygments.lexers._openedge_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Builtin list for the OpenEdgeLexer.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
OPENEDGEKEYWORDS = (
'ABSOLUTE',
'ABS',
'ABSO',
'ABSOL',
'ABSOLU',
'ABSOLUT',
'ACCELERATOR',
'ACCUMULATE',
'ACCUM',
'ACCUMU',
'ACCUMUL',
'ACCUMULA',
'ACCUMULAT',
'ACTIVE-FORM',
'ACTIVE-WINDOW',
'ADD',
'ADD-BUFFER',
'ADD-CALC-COLUMN',
'ADD-COLUMNS-FROM',
'ADD-EVENTS-PROCEDURE',
'ADD-FIELDS-FROM',
'ADD-FIRST',
'ADD-INDEX-FIELD',
'ADD-LAST',
'ADD-LIKE-COLUMN',
'ADD-LIKE-FIELD',
'ADD-LIKE-INDEX',
'ADD-NEW-FIELD',
'ADD-NEW-INDEX',
'ADD-SCHEMA-LOCATION',
'ADD-SUPER-PROCEDURE',
'ADM-DATA',
'ADVISE',
'ALERT-BOX',
'ALIAS',
'ALL',
'ALLOW-COLUMN-SEARCHING',
'ALLOW-REPLICATION',
'ALTER',
'ALWAYS-ON-TOP',
'AMBIGUOUS',
'AMBIG',
'AMBIGU',
'AMBIGUO',
'AMBIGUOU',
'ANALYZE',
'ANALYZ',
'AND',
'ANSI-ONLY',
'ANY',
'ANYWHERE',
'APPEND',
'APPL-ALERT-BOXES',
'APPL-ALERT',
'APPL-ALERT-',
'APPL-ALERT-B',
'APPL-ALERT-BO',
'APPL-ALERT-BOX',
'APPL-ALERT-BOXE',
'APPL-CONTEXT-ID',
'APPLICATION',
'APPLY',
'APPSERVER-INFO',
'APPSERVER-PASSWORD',
'APPSERVER-USERID',
'ARRAY-MESSAGE',
'AS',
'ASC',
'ASCENDING',
'ASCE',
'ASCEN',
'ASCEND',
'ASCENDI',
'ASCENDIN',
'ASK-OVERWRITE',
'ASSEMBLY',
'ASSIGN',
'ASYNCHRONOUS',
'ASYNC-REQUEST-COUNT',
'ASYNC-REQUEST-HANDLE',
'AT',
'ATTACHED-PAIRLIST',
'ATTR-SPACE',
'ATTR',
'ATTRI',
'ATTRIB',
'ATTRIBU',
'ATTRIBUT',
'AUDIT-CONTROL',
'AUDIT-ENABLED',
'AUDIT-EVENT-CONTEXT',
'AUDIT-POLICY',
'AUTHENTICATION-FAILED',
'AUTHORIZATION',
'AUTO-COMPLETION',
'AUTO-COMP',
'AUTO-COMPL',
'AUTO-COMPLE',
'AUTO-COMPLET',
'AUTO-COMPLETI',
'AUTO-COMPLETIO',
'AUTO-ENDKEY',
'AUTO-END-KEY',
'AUTO-GO',
'AUTO-INDENT',
'AUTO-IND',
'AUTO-INDE',
'AUTO-INDEN',
'AUTOMATIC',
'AUTO-RESIZE',
'AUTO-RETURN',
'AUTO-RET',
'AUTO-RETU',
'AUTO-RETUR',
'AUTO-SYNCHRONIZE',
'AUTO-ZAP',
'AUTO-Z',
'AUTO-ZA',
'AVAILABLE',
'AVAIL',
'AVAILA',
'AVAILAB',
'AVAILABL',
'AVAILABLE-FORMATS',
'AVERAGE',
'AVE',
'AVER',
'AVERA',
'AVERAG',
'AVG',
'BACKGROUND',
'BACK',
'BACKG',
'BACKGR',
'BACKGRO',
'BACKGROU',
'BACKGROUN',
'BACKWARDS',
'BACKWARD',
'BASE64-DECODE',
'BASE64-ENCODE',
'BASE-ADE',
'BASE-KEY',
'BATCH-MODE',
'BATCH',
'BATCH-',
'BATCH-M',
'BATCH-MO',
'BATCH-MOD',
'BATCH-SIZE',
'BEFORE-HIDE',
'BEFORE-H',
'BEFORE-HI',
'BEFORE-HID',
'BEGIN-EVENT-GROUP',
'BEGINS',
'BELL',
'BETWEEN',
'BGCOLOR',
'BGC',
'BGCO',
'BGCOL',
'BGCOLO',
'BIG-ENDIAN',
'BINARY',
'BIND',
'BIND-WHERE',
'BLANK',
'BLOCK-ITERATION-DISPLAY',
'BORDER-BOTTOM-CHARS',
'BORDER-B',
'BORDER-BO',
'BORDER-BOT',
'BORDER-BOTT',
'BORDER-BOTTO',
'BORDER-BOTTOM-PIXELS',
'BORDER-BOTTOM-P',
'BORDER-BOTTOM-PI',
'BORDER-BOTTOM-PIX',
'BORDER-BOTTOM-PIXE',
'BORDER-BOTTOM-PIXEL',
'BORDER-LEFT-CHARS',
'BORDER-L',
'BORDER-LE',
'BORDER-LEF',
'BORDER-LEFT',
'BORDER-LEFT-',
'BORDER-LEFT-C',
'BORDER-LEFT-CH',
'BORDER-LEFT-CHA',
'BORDER-LEFT-CHAR',
'BORDER-LEFT-PIXELS',
'BORDER-LEFT-P',
'BORDER-LEFT-PI',
'BORDER-LEFT-PIX',
'BORDER-LEFT-PIXE',
'BORDER-LEFT-PIXEL',
'BORDER-RIGHT-CHARS',
'BORDER-R',
'BORDER-RI',
'BORDER-RIG',
'BORDER-RIGH',
'BORDER-RIGHT',
'BORDER-RIGHT-',
'BORDER-RIGHT-C',
'BORDER-RIGHT-CH',
'BORDER-RIGHT-CHA',
'BORDER-RIGHT-CHAR',
'BORDER-RIGHT-PIXELS',
'BORDER-RIGHT-P',
'BORDER-RIGHT-PI',
'BORDER-RIGHT-PIX',
'BORDER-RIGHT-PIXE',
'BORDER-RIGHT-PIXEL',
'BORDER-TOP-CHARS',
'BORDER-T',
'BORDER-TO',
'BORDER-TOP',
'BORDER-TOP-',
'BORDER-TOP-C',
'BORDER-TOP-CH',
'BORDER-TOP-CHA',
'BORDER-TOP-CHAR',
'BORDER-TOP-PIXELS',
'BORDER-TOP-P',
'BORDER-TOP-PI',
'BORDER-TOP-PIX',
'BORDER-TOP-PIXE',
'BORDER-TOP-PIXEL',
'BOX',
'BOX-SELECTABLE',
'BOX-SELECT',
'BOX-SELECTA',
'BOX-SELECTAB',
'BOX-SELECTABL',
'BREAK',
'BROWSE',
'BUFFER',
'BUFFER-CHARS',
'BUFFER-COMPARE',
'BUFFER-COPY',
'BUFFER-CREATE',
'BUFFER-DELETE',
'BUFFER-FIELD',
'BUFFER-HANDLE',
'BUFFER-LINES',
'BUFFER-NAME',
'BUFFER-RELEASE',
'BUFFER-VALUE',
'BUTTON',
'BUTTONS',
'BY',
'BY-POINTER',
'BY-VARIANT-POINTER',
'CACHE',
'CACHE-SIZE',
'CALL',
'CALL-NAME',
'CALL-TYPE',
'CANCEL-BREAK',
'CANCEL-BUTTON',
'CAN-CREATE',
'CAN-DELETE',
'CAN-DO',
'CAN-FIND',
'CAN-QUERY',
'CAN-READ',
'CAN-SET',
'CAN-WRITE',
'CAPS',
'CAREFUL-PAINT',
'CASE',
'CASE-SENSITIVE',
'CASE-SEN',
'CASE-SENS',
'CASE-SENSI',
'CASE-SENSIT',
'CASE-SENSITI',
'CASE-SENSITIV',
'CAST',
'CATCH',
'CDECL',
'CENTERED',
'CENTER',
'CENTERE',
'CHAINED',
'CHARACTER_LENGTH',
'CHARSET',
'CHECK',
'CHECKED',
'CHOOSE',
'CHR',
'CLASS',
'CLASS-TYPE',
'CLEAR',
'CLEAR-APPL-CONTEXT',
'CLEAR-LOG',
'CLEAR-SELECTION',
'CLEAR-SELECT',
'CLEAR-SELECTI',
'CLEAR-SELECTIO',
'CLEAR-SORT-ARROWS',
'CLEAR-SORT-ARROW',
'CLIENT-CONNECTION-ID',
'CLIENT-PRINCIPAL',
'CLIENT-TTY',
'CLIENT-TYPE',
'CLIENT-WORKSTATION',
'CLIPBOARD',
'CLOSE',
'CLOSE-LOG',
'CODE',
'CODEBASE-LOCATOR',
'CODEPAGE',
'CODEPAGE-CONVERT',
'COLLATE',
'COL-OF',
'COLON',
'COLON-ALIGNED',
'COLON-ALIGN',
'COLON-ALIGNE',
'COLOR',
'COLOR-TABLE',
'COLUMN',
'COL',
'COLU',
'COLUM',
'COLUMN-BGCOLOR',
'COLUMN-DCOLOR',
'COLUMN-FGCOLOR',
'COLUMN-FONT',
'COLUMN-LABEL',
'COLUMN-LAB',
'COLUMN-LABE',
'COLUMN-MOVABLE',
'COLUMN-OF',
'COLUMN-PFCOLOR',
'COLUMN-READ-ONLY',
'COLUMN-RESIZABLE',
'COLUMNS',
'COLUMN-SCROLLING',
'COMBO-BOX',
'COMMAND',
'COMPARES',
'COMPILE',
'COMPILER',
'COMPLETE',
'COM-SELF',
'CONFIG-NAME',
'CONNECT',
'CONNECTED',
'CONSTRUCTOR',
'CONTAINS',
'CONTENTS',
'CONTEXT',
'CONTEXT-HELP',
'CONTEXT-HELP-FILE',
'CONTEXT-HELP-ID',
'CONTEXT-POPUP',
'CONTROL',
'CONTROL-BOX',
'CONTROL-FRAME',
'CONVERT',
'CONVERT-3D-COLORS',
'CONVERT-TO-OFFSET',
'CONVERT-TO-OFFS',
'CONVERT-TO-OFFSE',
'COPY-DATASET',
'COPY-LOB',
'COPY-SAX-ATTRIBUTES',
'COPY-TEMP-TABLE',
'COUNT',
'COUNT-OF',
'CPCASE',
'CPCOLL',
'CPINTERNAL',
'CPLOG',
'CPPRINT',
'CPRCODEIN',
'CPRCODEOUT',
'CPSTREAM',
'CPTERM',
'CRC-VALUE',
'CREATE',
'CREATE-LIKE',
'CREATE-LIKE-SEQUENTIAL',
'CREATE-NODE-NAMESPACE',
'CREATE-RESULT-LIST-ENTRY',
'CREATE-TEST-FILE',
'CURRENT',
'CURRENT_DATE',
'CURRENT-CHANGED',
'CURRENT-COLUMN',
'CURRENT-ENVIRONMENT',
'CURRENT-ENV',
'CURRENT-ENVI',
'CURRENT-ENVIR',
'CURRENT-ENVIRO',
'CURRENT-ENVIRON',
'CURRENT-ENVIRONM',
'CURRENT-ENVIRONME',
'CURRENT-ENVIRONMEN',
'CURRENT-ITERATION',
'CURRENT-LANGUAGE',
'CURRENT-LANG',
'CURRENT-LANGU',
'CURRENT-LANGUA',
'CURRENT-LANGUAG',
'CURRENT-QUERY',
'CURRENT-RESULT-ROW',
'CURRENT-ROW-MODIFIED',
'CURRENT-VALUE',
'CURRENT-WINDOW',
'CURSOR',
'CURS',
'CURSO',
'CURSOR-CHAR',
'CURSOR-LINE',
'CURSOR-OFFSET',
'DATABASE',
'DATA-BIND',
'DATA-ENTRY-RETURN',
'DATA-ENTRY-RET',
'DATA-ENTRY-RETU',
'DATA-ENTRY-RETUR',
'DATA-RELATION',
'DATA-REL',
'DATA-RELA',
'DATA-RELAT',
'DATA-RELATI',
'DATA-RELATIO',
'DATASERVERS',
'DATASET',
'DATASET-HANDLE',
'DATA-SOURCE',
'DATA-SOURCE-COMPLETE-MAP',
'DATA-SOURCE-MODIFIED',
'DATA-SOURCE-ROWID',
'DATA-TYPE',
'DATA-T',
'DATA-TY',
'DATA-TYP',
'DATE-FORMAT',
'DATE-F',
'DATE-FO',
'DATE-FOR',
'DATE-FORM',
'DATE-FORMA',
'DAY',
'DBCODEPAGE',
'DBCOLLATION',
'DBNAME',
'DBPARAM',
'DB-REFERENCES',
'DBRESTRICTIONS',
'DBREST',
'DBRESTR',
'DBRESTRI',
'DBRESTRIC',
'DBRESTRICT',
'DBRESTRICTI',
'DBRESTRICTIO',
'DBRESTRICTION',
'DBTASKID',
'DBTYPE',
'DBVERSION',
'DBVERS',
'DBVERSI',
'DBVERSIO',
'DCOLOR',
'DDE',
'DDE-ERROR',
'DDE-ID',
'DDE-I',
'DDE-ITEM',
'DDE-NAME',
'DDE-TOPIC',
'DEBLANK',
'DEBUG',
'DEBU',
'DEBUG-ALERT',
'DEBUGGER',
'DEBUG-LIST',
'DECIMALS',
'DECLARE',
'DECLARE-NAMESPACE',
'DECRYPT',
'DEFAULT',
'DEFAULT-BUFFER-HANDLE',
'DEFAULT-BUTTON',
'DEFAUT-B',
'DEFAUT-BU',
'DEFAUT-BUT',
'DEFAUT-BUTT',
'DEFAUT-BUTTO',
'DEFAULT-COMMIT',
'DEFAULT-EXTENSION',
'DEFAULT-EX',
'DEFAULT-EXT',
'DEFAULT-EXTE',
'DEFAULT-EXTEN',
'DEFAULT-EXTENS',
'DEFAULT-EXTENSI',
'DEFAULT-EXTENSIO',
'DEFAULT-NOXLATE',
'DEFAULT-NOXL',
'DEFAULT-NOXLA',
'DEFAULT-NOXLAT',
'DEFAULT-VALUE',
'DEFAULT-WINDOW',
'DEFINED',
'DEFINE-USER-EVENT-MANAGER',
'DELETE',
'DEL',
'DELE',
'DELET',
'DELETE-CHARACTER',
'DELETE-CHAR',
'DELETE-CHARA',
'DELETE-CHARAC',
'DELETE-CHARACT',
'DELETE-CHARACTE',
'DELETE-CURRENT-ROW',
'DELETE-LINE',
'DELETE-RESULT-LIST-ENTRY',
'DELETE-SELECTED-ROW',
'DELETE-SELECTED-ROWS',
'DELIMITER',
'DESC',
'DESCENDING',
'DESCE',
'DESCEN',
'DESCEND',
'DESCENDI',
'DESCENDIN',
'DESELECT-FOCUSED-ROW',
'DESELECTION',
'DESELECT-ROWS',
'DESELECT-SELECTED-ROW',
'DESTRUCTOR',
'DIALOG-BOX',
'DICTIONARY',
'DICT',
'DICTI',
'DICTIO',
'DICTION',
'DICTIONA',
'DICTIONAR',
'DIR',
'DISABLE',
'DISABLE-AUTO-ZAP',
'DISABLED',
'DISABLE-DUMP-TRIGGERS',
'DISABLE-LOAD-TRIGGERS',
'DISCONNECT',
'DISCON',
'DISCONN',
'DISCONNE',
'DISCONNEC',
'DISP',
'DISPLAY',
'DISPL',
'DISPLA',
'DISPLAY-MESSAGE',
'DISPLAY-TYPE',
'DISPLAY-T',
'DISPLAY-TY',
'DISPLAY-TYP',
'DISTINCT',
'DO',
'DOMAIN-DESCRIPTION',
'DOMAIN-NAME',
'DOMAIN-TYPE',
'DOS',
'DOUBLE',
'DOWN',
'DRAG-ENABLED',
'DROP',
'DROP-DOWN',
'DROP-DOWN-LIST',
'DROP-FILE-NOTIFY',
'DROP-TARGET',
'DUMP',
'DYNAMIC',
'DYNAMIC-FUNCTION',
'EACH',
'ECHO',
'EDGE-CHARS',
'EDGE',
'EDGE-',
'EDGE-C',
'EDGE-CH',
'EDGE-CHA',
'EDGE-CHAR',
'EDGE-PIXELS',
'EDGE-P',
'EDGE-PI',
'EDGE-PIX',
'EDGE-PIXE',
'EDGE-PIXEL',
'EDIT-CAN-PASTE',
'EDIT-CAN-UNDO',
'EDIT-CLEAR',
'EDIT-COPY',
'EDIT-CUT',
'EDITING',
'EDITOR',
'EDIT-PASTE',
'EDIT-UNDO',
'ELSE',
'EMPTY',
'EMPTY-TEMP-TABLE',
'ENABLE',
'ENABLED-FIELDS',
'ENCODE',
'ENCRYPT',
'ENCRYPT-AUDIT-MAC-KEY',
'ENCRYPTION-SALT',
'END',
'END-DOCUMENT',
'END-ELEMENT',
'END-EVENT-GROUP',
'END-FILE-DROP',
'ENDKEY',
'END-KEY',
'END-MOVE',
'END-RESIZE',
'END-ROW-RESIZE',
'END-USER-PROMPT',
'ENTERED',
'ENTRY',
'EQ',
'ERROR',
'ERROR-COLUMN',
'ERROR-COL',
'ERROR-COLU',
'ERROR-COLUM',
'ERROR-ROW',
'ERROR-STACK-TRACE',
'ERROR-STATUS',
'ERROR-STAT',
'ERROR-STATU',
'ESCAPE',
'ETIME',
'EVENT-GROUP-ID',
'EVENT-PROCEDURE',
'EVENT-PROCEDURE-CONTEXT',
'EVENTS',
'EVENT',
'EVENT-TYPE',
'EVENT-T',
'EVENT-TY',
'EVENT-TYP',
'EXCEPT',
'EXCLUSIVE-ID',
'EXCLUSIVE-LOCK',
'EXCLUSIVE',
'EXCLUSIVE-',
'EXCLUSIVE-L',
'EXCLUSIVE-LO',
'EXCLUSIVE-LOC',
'EXCLUSIVE-WEB-USER',
'EXECUTE',
'EXISTS',
'EXP',
'EXPAND',
'EXPANDABLE',
'EXPLICIT',
'EXPORT',
'EXPORT-PRINCIPAL',
'EXTENDED',
'EXTENT',
'EXTERNAL',
'FALSE',
'FETCH',
'FETCH-SELECTED-ROW',
'FGCOLOR',
'FGC',
'FGCO',
'FGCOL',
'FGCOLO',
'FIELD',
'FIELDS',
'FILE',
'FILE-CREATE-DATE',
'FILE-CREATE-TIME',
'FILE-INFORMATION',
'FILE-INFO',
'FILE-INFOR',
'FILE-INFORM',
'FILE-INFORMA',
'FILE-INFORMAT',
'FILE-INFORMATI',
'FILE-INFORMATIO',
'FILE-MOD-DATE',
'FILE-MOD-TIME',
'FILENAME',
'FILE-NAME',
'FILE-OFFSET',
'FILE-OFF',
'FILE-OFFS',
'FILE-OFFSE',
'FILE-SIZE',
'FILE-TYPE',
'FILL',
'FILLED',
'FILL-IN',
'FILTERS',
'FINAL',
'FINALLY',
'FIND',
'FIND-BY-ROWID',
'FIND-CASE-SENSITIVE',
'FIND-CURRENT',
'FINDER',
'FIND-FIRST',
'FIND-GLOBAL',
'FIND-LAST',
'FIND-NEXT-OCCURRENCE',
'FIND-PREV-OCCURRENCE',
'FIND-SELECT',
'FIND-UNIQUE',
'FIND-WRAP-AROUND',
'FIRST',
'FIRST-ASYNCH-REQUEST',
'FIRST-CHILD',
'FIRST-COLUMN',
'FIRST-FORM',
'FIRST-OBJECT',
'FIRST-OF',
'FIRST-PROCEDURE',
'FIRST-PROC',
'FIRST-PROCE',
'FIRST-PROCED',
'FIRST-PROCEDU',
'FIRST-PROCEDUR',
'FIRST-SERVER',
'FIRST-TAB-ITEM',
'FIRST-TAB-I',
'FIRST-TAB-IT',
'FIRST-TAB-ITE',
'FIT-LAST-COLUMN',
'FIXED-ONLY',
'FLAT-BUTTON',
'FLOAT',
'FOCUS',
'FOCUSED-ROW',
'FOCUSED-ROW-SELECTED',
'FONT',
'FONT-TABLE',
'FOR',
'FORCE-FILE',
'FOREGROUND',
'FORE',
'FOREG',
'FOREGR',
'FOREGRO',
'FOREGROU',
'FOREGROUN',
'FORM',
'FORMAT',
'FORMA',
'FORMATTED',
'FORMATTE',
'FORM-LONG-INPUT',
'FORWARD',
'FORWARDS',
'FRAGMENT',
'FRAGMEN',
'FRAME',
'FRAM',
'FRAME-COL',
'FRAME-DB',
'FRAME-DOWN',
'FRAME-FIELD',
'FRAME-FILE',
'FRAME-INDEX',
'FRAME-INDE',
'FRAME-LINE',
'FRAME-NAME',
'FRAME-ROW',
'FRAME-SPACING',
'FRAME-SPA',
'FRAME-SPAC',
'FRAME-SPACI',
'FRAME-SPACIN',
'FRAME-VALUE',
'FRAME-VAL',
'FRAME-VALU',
'FRAME-X',
'FRAME-Y',
'FREQUENCY',
'FROM',
'FROM-CHARS',
'FROM-C',
'FROM-CH',
'FROM-CHA',
'FROM-CHAR',
'FROM-CURRENT',
'FROM-CUR',
'FROM-CURR',
'FROM-CURRE',
'FROM-CURREN',
'FROM-PIXELS',
'FROM-P',
'FROM-PI',
'FROM-PIX',
'FROM-PIXE',
'FROM-PIXEL',
'FULL-HEIGHT-CHARS',
'FULL-HEIGHT',
'FULL-HEIGHT-',
'FULL-HEIGHT-C',
'FULL-HEIGHT-CH',
'FULL-HEIGHT-CHA',
'FULL-HEIGHT-CHAR',
'FULL-HEIGHT-PIXELS',
'FULL-HEIGHT-P',
'FULL-HEIGHT-PI',
'FULL-HEIGHT-PIX',
'FULL-HEIGHT-PIXE',
'FULL-HEIGHT-PIXEL',
'FULL-PATHNAME',
'FULL-PATHN',
'FULL-PATHNA',
'FULL-PATHNAM',
'FULL-WIDTH-CHARS',
'FULL-WIDTH',
'FULL-WIDTH-',
'FULL-WIDTH-C',
'FULL-WIDTH-CH',
'FULL-WIDTH-CHA',
'FULL-WIDTH-CHAR',
'FULL-WIDTH-PIXELS',
'FULL-WIDTH-P',
'FULL-WIDTH-PI',
'FULL-WIDTH-PIX',
'FULL-WIDTH-PIXE',
'FULL-WIDTH-PIXEL',
'FUNCTION',
'FUNCTION-CALL-TYPE',
'GATEWAYS',
'GATEWAY',
'GE',
'GENERATE-MD5',
'GENERATE-PBE-KEY',
'GENERATE-PBE-SALT',
'GENERATE-RANDOM-KEY',
'GENERATE-UUID',
'GET',
'GET-ATTR-CALL-TYPE',
'GET-ATTRIBUTE-NODE',
'GET-BINARY-DATA',
'GET-BLUE-VALUE',
'GET-BLUE',
'GET-BLUE-',
'GET-BLUE-V',
'GET-BLUE-VA',
'GET-BLUE-VAL',
'GET-BLUE-VALU',
'GET-BROWSE-COLUMN',
'GET-BUFFER-HANDLEGETBYTE',
'GET-BYTE',
'GET-CALLBACK-PROC-CONTEXT',
'GET-CALLBACK-PROC-NAME',
'GET-CGI-LIST',
'GET-CGI-LONG-VALUE',
'GET-CGI-VALUE',
'GET-CODEPAGES',
'GET-COLLATIONS',
'GET-CONFIG-VALUE',
'GET-CURRENT',
'GET-DOUBLE',
'GET-DROPPED-FILE',
'GET-DYNAMIC',
'GET-ERROR-COLUMN',
'GET-ERROR-ROW',
'GET-FILE',
'GET-FILE-NAME',
'GET-FILE-OFFSET',
'GET-FILE-OFFSE',
'GET-FIRST',
'GET-FLOAT',
'GET-GREEN-VALUE',
'GET-GREEN',
'GET-GREEN-',
'GET-GREEN-V',
'GET-GREEN-VA',
'GET-GREEN-VAL',
'GET-GREEN-VALU',
'GET-INDEX-BY-NAMESPACE-NAME',
'GET-INDEX-BY-QNAME',
'GET-INT64',
'GET-ITERATION',
'GET-KEY-VALUE',
'GET-KEY-VAL',
'GET-KEY-VALU',
'GET-LAST',
'GET-LOCALNAME-BY-INDEX',
'GET-LONG',
'GET-MESSAGE',
'GET-NEXT',
'GET-NUMBER',
'GET-POINTER-VALUE',
'GET-PREV',
'GET-PRINTERS',
'GET-PROPERTY',
'GET-QNAME-BY-INDEX',
'GET-RED-VALUE',
'GET-RED',
'GET-RED-',
'GET-RED-V',
'GET-RED-VA',
'GET-RED-VAL',
'GET-RED-VALU',
'GET-REPOSITIONED-ROW',
'GET-RGB-VALUE',
'GET-SELECTED-WIDGET',
'GET-SELECTED',
'GET-SELECTED-',
'GET-SELECTED-W',
'GET-SELECTED-WI',
'GET-SELECTED-WID',
'GET-SELECTED-WIDG',
'GET-SELECTED-WIDGE',
'GET-SHORT',
'GET-SIGNATURE',
'GET-SIZE',
'GET-STRING',
'GET-TAB-ITEM',
'GET-TEXT-HEIGHT-CHARS',
'GET-TEXT-HEIGHT',
'GET-TEXT-HEIGHT-',
'GET-TEXT-HEIGHT-C',
'GET-TEXT-HEIGHT-CH',
'GET-TEXT-HEIGHT-CHA',
'GET-TEXT-HEIGHT-CHAR',
'GET-TEXT-HEIGHT-PIXELS',
'GET-TEXT-HEIGHT-P',
'GET-TEXT-HEIGHT-PI',
'GET-TEXT-HEIGHT-PIX',
'GET-TEXT-HEIGHT-PIXE',
'GET-TEXT-HEIGHT-PIXEL',
'GET-TEXT-WIDTH-CHARS',
'GET-TEXT-WIDTH',
'GET-TEXT-WIDTH-',
'GET-TEXT-WIDTH-C',
'GET-TEXT-WIDTH-CH',
'GET-TEXT-WIDTH-CHA',
'GET-TEXT-WIDTH-CHAR',
'GET-TEXT-WIDTH-PIXELS',
'GET-TEXT-WIDTH-P',
'GET-TEXT-WIDTH-PI',
'GET-TEXT-WIDTH-PIX',
'GET-TEXT-WIDTH-PIXE',
'GET-TEXT-WIDTH-PIXEL',
'GET-TYPE-BY-INDEX',
'GET-TYPE-BY-NAMESPACE-NAME',
'GET-TYPE-BY-QNAME',
'GET-UNSIGNED-LONG',
'GET-UNSIGNED-SHORT',
'GET-URI-BY-INDEX',
'GET-VALUE-BY-INDEX',
'GET-VALUE-BY-NAMESPACE-NAME',
'GET-VALUE-BY-QNAME',
'GET-WAIT-STATE',
'GLOBAL',
'GO-ON',
'GO-PENDING',
'GO-PEND',
'GO-PENDI',
'GO-PENDIN',
'GRANT',
'GRAPHIC-EDGE',
'GRAPHIC-E',
'GRAPHIC-ED',
'GRAPHIC-EDG',
'GRID-FACTOR-HORIZONTAL',
'GRID-FACTOR-H',
'GRID-FACTOR-HO',
'GRID-FACTOR-HOR',
'GRID-FACTOR-HORI',
'GRID-FACTOR-HORIZ',
'GRID-FACTOR-HORIZO',
'GRID-FACTOR-HORIZON',
'GRID-FACTOR-HORIZONT',
'GRID-FACTOR-HORIZONTA',
'GRID-FACTOR-VERTICAL',
'GRID-FACTOR-V',
'GRID-FACTOR-VE',
'GRID-FACTOR-VER',
'GRID-FACTOR-VERT',
'GRID-FACTOR-VERTI',
'GRID-FACTOR-VERTIC',
'GRID-FACTOR-VERTICA',
'GRID-SNAP',
'GRID-UNIT-HEIGHT-CHARS',
'GRID-UNIT-HEIGHT',
'GRID-UNIT-HEIGHT-',
'GRID-UNIT-HEIGHT-C',
'GRID-UNIT-HEIGHT-CH',
'GRID-UNIT-HEIGHT-CHA',
'GRID-UNIT-HEIGHT-PIXELS',
'GRID-UNIT-HEIGHT-P',
'GRID-UNIT-HEIGHT-PI',
'GRID-UNIT-HEIGHT-PIX',
'GRID-UNIT-HEIGHT-PIXE',
'GRID-UNIT-HEIGHT-PIXEL',
'GRID-UNIT-WIDTH-CHARS',
'GRID-UNIT-WIDTH',
'GRID-UNIT-WIDTH-',
'GRID-UNIT-WIDTH-C',
'GRID-UNIT-WIDTH-CH',
'GRID-UNIT-WIDTH-CHA',
'GRID-UNIT-WIDTH-CHAR',
'GRID-UNIT-WIDTH-PIXELS',
'GRID-UNIT-WIDTH-P',
'GRID-UNIT-WIDTH-PI',
'GRID-UNIT-WIDTH-PIX',
'GRID-UNIT-WIDTH-PIXE',
'GRID-UNIT-WIDTH-PIXEL',
'GRID-VISIBLE',
'GROUP',
'GT',
'GUID',
'HANDLER',
'HAS-RECORDS',
'HAVING',
'HEADER',
'HEIGHT-CHARS',
'HEIGHT',
'HEIGHT-',
'HEIGHT-C',
'HEIGHT-CH',
'HEIGHT-CHA',
'HEIGHT-CHAR',
'HEIGHT-PIXELS',
'HEIGHT-P',
'HEIGHT-PI',
'HEIGHT-PIX',
'HEIGHT-PIXE',
'HEIGHT-PIXEL',
'HELP',
'HEX-DECODE',
'HEX-ENCODE',
'HIDDEN',
'HIDE',
'HORIZONTAL',
'HORI',
'HORIZ',
'HORIZO',
'HORIZON',
'HORIZONT',
'HORIZONTA',
'HOST-BYTE-ORDER',
'HTML-CHARSET',
'HTML-END-OF-LINE',
'HTML-END-OF-PAGE',
'HTML-FRAME-BEGIN',
'HTML-FRAME-END',
'HTML-HEADER-BEGIN',
'HTML-HEADER-END',
'HTML-TITLE-BEGIN',
'HTML-TITLE-END',
'HWND',
'ICON',
'IF',
'IMAGE',
'IMAGE-DOWN',
'IMAGE-INSENSITIVE',
'IMAGE-SIZE',
'IMAGE-SIZE-CHARS',
'IMAGE-SIZE-C',
'IMAGE-SIZE-CH',
'IMAGE-SIZE-CHA',
'IMAGE-SIZE-CHAR',
'IMAGE-SIZE-PIXELS',
'IMAGE-SIZE-P',
'IMAGE-SIZE-PI',
'IMAGE-SIZE-PIX',
'IMAGE-SIZE-PIXE',
'IMAGE-SIZE-PIXEL',
'IMAGE-UP',
'IMMEDIATE-DISPLAY',
'IMPLEMENTS',
'IMPORT',
'IMPORT-PRINCIPAL',
'IN',
'INCREMENT-EXCLUSIVE-ID',
'INDEX',
'INDEXED-REPOSITION',
'INDEX-HINT',
'INDEX-INFORMATION',
'INDICATOR',
'INFORMATION',
'INFO',
'INFOR',
'INFORM',
'INFORMA',
'INFORMAT',
'INFORMATI',
'INFORMATIO',
'IN-HANDLE',
'INHERIT-BGCOLOR',
'INHERIT-BGC',
'INHERIT-BGCO',
'INHERIT-BGCOL',
'INHERIT-BGCOLO',
'INHERIT-FGCOLOR',
'INHERIT-FGC',
'INHERIT-FGCO',
'INHERIT-FGCOL',
'INHERIT-FGCOLO',
'INHERITS',
'INITIAL',
'INIT',
'INITI',
'INITIA',
'INITIAL-DIR',
'INITIAL-FILTER',
'INITIALIZE-DOCUMENT-TYPE',
'INITIATE',
'INNER-CHARS',
'INNER-LINES',
'INPUT',
'INPUT-OUTPUT',
'INPUT-O',
'INPUT-OU',
'INPUT-OUT',
'INPUT-OUTP',
'INPUT-OUTPU',
'INPUT-VALUE',
'INSERT',
'INSERT-ATTRIBUTE',
'INSERT-BACKTAB',
'INSERT-B',
'INSERT-BA',
'INSERT-BAC',
'INSERT-BACK',
'INSERT-BACKT',
'INSERT-BACKTA',
'INSERT-FILE',
'INSERT-ROW',
'INSERT-STRING',
'INSERT-TAB',
'INSERT-T',
'INSERT-TA',
'INTERFACE',
'INTERNAL-ENTRIES',
'INTO',
'INVOKE',
'IS',
'IS-ATTR-SPACE',
'IS-ATTR',
'IS-ATTR-',
'IS-ATTR-S',
'IS-ATTR-SP',
'IS-ATTR-SPA',
'IS-ATTR-SPAC',
'IS-CLASS',
'IS-CLAS',
'IS-LEAD-BYTE',
'IS-OPEN',
'IS-PARAMETER-SET',
'IS-ROW-SELECTED',
'IS-SELECTED',
'ITEM',
'ITEMS-PER-ROW',
'JOIN',
'JOIN-BY-SQLDB',
'KBLABEL',
'KEEP-CONNECTION-OPEN',
'KEEP-FRAME-Z-ORDER',
'KEEP-FRAME-Z',
'KEEP-FRAME-Z-',
'KEEP-FRAME-Z-O',
'KEEP-FRAME-Z-OR',
'KEEP-FRAME-Z-ORD',
'KEEP-FRAME-Z-ORDE',
'KEEP-MESSAGES',
'KEEP-SECURITY-CACHE',
'KEEP-TAB-ORDER',
'KEY',
'KEYCODE',
'KEY-CODE',
'KEYFUNCTION',
'KEYFUNC',
'KEYFUNCT',
'KEYFUNCTI',
'KEYFUNCTIO',
'KEY-FUNCTION',
'KEY-FUNC',
'KEY-FUNCT',
'KEY-FUNCTI',
'KEY-FUNCTIO',
'KEYLABEL',
'KEY-LABEL',
'KEYS',
'KEYWORD',
'KEYWORD-ALL',
'LABEL',
'LABEL-BGCOLOR',
'LABEL-BGC',
'LABEL-BGCO',
'LABEL-BGCOL',
'LABEL-BGCOLO',
'LABEL-DCOLOR',
'LABEL-DC',
'LABEL-DCO',
'LABEL-DCOL',
'LABEL-DCOLO',
'LABEL-FGCOLOR',
'LABEL-FGC',
'LABEL-FGCO',
'LABEL-FGCOL',
'LABEL-FGCOLO',
'LABEL-FONT',
'LABEL-PFCOLOR',
'LABEL-PFC',
'LABEL-PFCO',
'LABEL-PFCOL',
'LABEL-PFCOLO',
'LABELS',
'LANDSCAPE',
'LANGUAGES',
'LANGUAGE',
'LARGE',
'LARGE-TO-SMALL',
'LAST',
'LAST-ASYNCH-REQUEST',
'LAST-BATCH',
'LAST-CHILD',
'LAST-EVENT',
'LAST-EVEN',
'LAST-FORM',
'LASTKEY',
'LAST-KEY',
'LAST-OBJECT',
'LAST-OF',
'LAST-PROCEDURE',
'LAST-PROCE',
'LAST-PROCED',
'LAST-PROCEDU',
'LAST-PROCEDUR',
'LAST-SERVER',
'LAST-TAB-ITEM',
'LAST-TAB-I',
'LAST-TAB-IT',
'LAST-TAB-ITE',
'LC',
'LDBNAME',
'LE',
'LEAVE',
'LEFT-ALIGNED',
'LEFT-ALIGN',
'LEFT-ALIGNE',
'LEFT-TRIM',
'LENGTH',
'LIBRARY',
'LIKE',
'LIKE-SEQUENTIAL',
'LINE',
'LINE-COUNTER',
'LINE-COUNT',
'LINE-COUNTE',
'LIST-EVENTS',
'LISTING',
'LISTI',
'LISTIN',
'LIST-ITEM-PAIRS',
'LIST-ITEMS',
'LIST-PROPERTY-NAMES',
'LIST-QUERY-ATTRS',
'LIST-SET-ATTRS',
'LIST-WIDGETS',
'LITERAL-QUESTION',
'LITTLE-ENDIAN',
'LOAD',
'LOAD-DOMAINS',
'LOAD-ICON',
'LOAD-IMAGE',
'LOAD-IMAGE-DOWN',
'LOAD-IMAGE-INSENSITIVE',
'LOAD-IMAGE-UP',
'LOAD-MOUSE-POINTER',
'LOAD-MOUSE-P',
'LOAD-MOUSE-PO',
'LOAD-MOUSE-POI',
'LOAD-MOUSE-POIN',
'LOAD-MOUSE-POINT',
'LOAD-MOUSE-POINTE',
'LOAD-PICTURE',
'LOAD-SMALL-ICON',
'LOCAL-NAME',
'LOCATOR-COLUMN-NUMBER',
'LOCATOR-LINE-NUMBER',
'LOCATOR-PUBLIC-ID',
'LOCATOR-SYSTEM-ID',
'LOCATOR-TYPE',
'LOCKED',
'LOCK-REGISTRATION',
'LOG',
'LOG-AUDIT-EVENT',
'LOGIN-EXPIRATION-TIMESTAMP',
'LOGIN-HOST',
'LOGIN-STATE',
'LOG-MANAGER',
'LOGOUT',
'LOOKAHEAD',
'LOOKUP',
'LT',
'MACHINE-CLASS',
'MANDATORY',
'MANUAL-HIGHLIGHT',
'MAP',
'MARGIN-EXTRA',
'MARGIN-HEIGHT-CHARS',
'MARGIN-HEIGHT',
'MARGIN-HEIGHT-',
'MARGIN-HEIGHT-C',
'MARGIN-HEIGHT-CH',
'MARGIN-HEIGHT-CHA',
'MARGIN-HEIGHT-CHAR',
'MARGIN-HEIGHT-PIXELS',
'MARGIN-HEIGHT-P',
'MARGIN-HEIGHT-PI',
'MARGIN-HEIGHT-PIX',
'MARGIN-HEIGHT-PIXE',
'MARGIN-HEIGHT-PIXEL',
'MARGIN-WIDTH-CHARS',
'MARGIN-WIDTH',
'MARGIN-WIDTH-',
'MARGIN-WIDTH-C',
'MARGIN-WIDTH-CH',
'MARGIN-WIDTH-CHA',
'MARGIN-WIDTH-CHAR',
'MARGIN-WIDTH-PIXELS',
'MARGIN-WIDTH-P',
'MARGIN-WIDTH-PI',
'MARGIN-WIDTH-PIX',
'MARGIN-WIDTH-PIXE',
'MARGIN-WIDTH-PIXEL',
'MARK-NEW',
'MARK-ROW-STATE',
'MATCHES',
'MAX-BUTTON',
'MAX-CHARS',
'MAX-DATA-GUESS',
'MAX-HEIGHT',
'MAX-HEIGHT-CHARS',
'MAX-HEIGHT-C',
'MAX-HEIGHT-CH',
'MAX-HEIGHT-CHA',
'MAX-HEIGHT-CHAR',
'MAX-HEIGHT-PIXELS',
'MAX-HEIGHT-P',
'MAX-HEIGHT-PI',
'MAX-HEIGHT-PIX',
'MAX-HEIGHT-PIXE',
'MAX-HEIGHT-PIXEL',
'MAXIMIZE',
'MAXIMUM',
'MAX',
'MAXI',
'MAXIM',
'MAXIMU',
'MAXIMUM-LEVEL',
'MAX-ROWS',
'MAX-SIZE',
'MAX-VALUE',
'MAX-VAL',
'MAX-VALU',
'MAX-WIDTH-CHARS',
'MAX-WIDTH',
'MAX-WIDTH-',
'MAX-WIDTH-C',
'MAX-WIDTH-CH',
'MAX-WIDTH-CHA',
'MAX-WIDTH-CHAR',
'MAX-WIDTH-PIXELS',
'MAX-WIDTH-P',
'MAX-WIDTH-PI',
'MAX-WIDTH-PIX',
'MAX-WIDTH-PIXE',
'MAX-WIDTH-PIXEL',
'MD5-DIGEST',
'MEMBER',
'MEMPTR-TO-NODE-VALUE',
'MENU',
'MENUBAR',
'MENU-BAR',
'MENU-ITEM',
'MENU-KEY',
'MENU-K',
'MENU-KE',
'MENU-MOUSE',
'MENU-M',
'MENU-MO',
'MENU-MOU',
'MENU-MOUS',
'MERGE-BY-FIELD',
'MESSAGE',
'MESSAGE-AREA',
'MESSAGE-AREA-FONT',
'MESSAGE-LINES',
'METHOD',
'MIN-BUTTON',
'MIN-COLUMN-WIDTH-CHARS',
'MIN-COLUMN-WIDTH-C',
'MIN-COLUMN-WIDTH-CH',
'MIN-COLUMN-WIDTH-CHA',
'MIN-COLUMN-WIDTH-CHAR',
'MIN-COLUMN-WIDTH-PIXELS',
'MIN-COLUMN-WIDTH-P',
'MIN-COLUMN-WIDTH-PI',
'MIN-COLUMN-WIDTH-PIX',
'MIN-COLUMN-WIDTH-PIXE',
'MIN-COLUMN-WIDTH-PIXEL',
'MIN-HEIGHT-CHARS',
'MIN-HEIGHT',
'MIN-HEIGHT-',
'MIN-HEIGHT-C',
'MIN-HEIGHT-CH',
'MIN-HEIGHT-CHA',
'MIN-HEIGHT-CHAR',
'MIN-HEIGHT-PIXELS',
'MIN-HEIGHT-P',
'MIN-HEIGHT-PI',
'MIN-HEIGHT-PIX',
'MIN-HEIGHT-PIXE',
'MIN-HEIGHT-PIXEL',
'MINIMUM',
'MIN',
'MINI',
'MINIM',
'MINIMU',
'MIN-SIZE',
'MIN-VALUE',
'MIN-VAL',
'MIN-VALU',
'MIN-WIDTH-CHARS',
'MIN-WIDTH',
'MIN-WIDTH-',
'MIN-WIDTH-C',
'MIN-WIDTH-CH',
'MIN-WIDTH-CHA',
'MIN-WIDTH-CHAR',
'MIN-WIDTH-PIXELS',
'MIN-WIDTH-P',
'MIN-WIDTH-PI',
'MIN-WIDTH-PIX',
'MIN-WIDTH-PIXE',
'MIN-WIDTH-PIXEL',
'MODIFIED',
'MODULO',
'MOD',
'MODU',
'MODUL',
'MONTH',
'MOUSE',
'MOUSE-POINTER',
'MOUSE-P',
'MOUSE-PO',
'MOUSE-POI',
'MOUSE-POIN',
'MOUSE-POINT',
'MOUSE-POINTE',
'MOVABLE',
'MOVE-AFTER-TAB-ITEM',
'MOVE-AFTER',
'MOVE-AFTER-',
'MOVE-AFTER-T',
'MOVE-AFTER-TA',
'MOVE-AFTER-TAB',
'MOVE-AFTER-TAB-',
'MOVE-AFTER-TAB-I',
'MOVE-AFTER-TAB-IT',
'MOVE-AFTER-TAB-ITE',
'MOVE-BEFORE-TAB-ITEM',
'MOVE-BEFOR',
'MOVE-BEFORE',
'MOVE-BEFORE-',
'MOVE-BEFORE-T',
'MOVE-BEFORE-TA',
'MOVE-BEFORE-TAB',
'MOVE-BEFORE-TAB-',
'MOVE-BEFORE-TAB-I',
'MOVE-BEFORE-TAB-IT',
'MOVE-BEFORE-TAB-ITE',
'MOVE-COLUMN',
'MOVE-COL',
'MOVE-COLU',
'MOVE-COLUM',
'MOVE-TO-BOTTOM',
'MOVE-TO-B',
'MOVE-TO-BO',
'MOVE-TO-BOT',
'MOVE-TO-BOTT',
'MOVE-TO-BOTTO',
'MOVE-TO-EOF',
'MOVE-TO-TOP',
'MOVE-TO-T',
'MOVE-TO-TO',
'MPE',
'MULTI-COMPILE',
'MULTIPLE',
'MULTIPLE-KEY',
'MULTITASKING-INTERVAL',
'MUST-EXIST',
'NAME',
'NAMESPACE-PREFIX',
'NAMESPACE-URI',
'NATIVE',
'NE',
'NEEDS-APPSERVER-PROMPT',
'NEEDS-PROMPT',
'NEW',
'NEW-INSTANCE',
'NEW-ROW',
'NEXT',
'NEXT-COLUMN',
'NEXT-PROMPT',
'NEXT-ROWID',
'NEXT-SIBLING',
'NEXT-TAB-ITEM',
'NEXT-TAB-I',
'NEXT-TAB-IT',
'NEXT-TAB-ITE',
'NEXT-VALUE',
'NO',
'NO-APPLY',
'NO-ARRAY-MESSAGE',
'NO-ASSIGN',
'NO-ATTR-LIST',
'NO-ATTR',
'NO-ATTR-',
'NO-ATTR-L',
'NO-ATTR-LI',
'NO-ATTR-LIS',
'NO-ATTR-SPACE',
'NO-ATTR-S',
'NO-ATTR-SP',
'NO-ATTR-SPA',
'NO-ATTR-SPAC',
'NO-AUTO-VALIDATE',
'NO-BIND-WHERE',
'NO-BOX',
'NO-CONSOLE',
'NO-CONVERT',
'NO-CONVERT-3D-COLORS',
'NO-CURRENT-VALUE',
'NO-DEBUG',
'NODE-VALUE-TO-MEMPTR',
'NO-DRAG',
'NO-ECHO',
'NO-EMPTY-SPACE',
'NO-ERROR',
'NO-FILL',
'NO-F',
'NO-FI',
'NO-FIL',
'NO-FOCUS',
'NO-HELP',
'NO-HIDE',
'NO-INDEX-HINT',
'NO-INHERIT-BGCOLOR',
'NO-INHERIT-BGC',
'NO-INHERIT-BGCO',
'NO-INHERIT-FGCOLOR',
'NO-INHERIT-FGC',
'NO-INHERIT-FGCO',
'NO-INHERIT-FGCOL',
'NO-INHERIT-FGCOLO',
'NO-JOIN-BY-SQLDB',
'NO-LABELS',
'NO-LABE',
'NO-LOBS',
'NO-LOCK',
'NO-LOOKAHEAD',
'NO-MAP',
'NO-MESSAGE',
'NO-MES',
'NO-MESS',
'NO-MESSA',
'NO-MESSAG',
'NONAMESPACE-SCHEMA-LOCATION',
'NONE',
'NO-PAUSE',
'NO-PREFETCH',
'NO-PREFE',
'NO-PREFET',
'NO-PREFETC',
'NORMALIZE',
'NO-ROW-MARKERS',
'NO-SCROLLBAR-VERTICAL',
'NO-SEPARATE-CONNECTION',
'NO-SEPARATORS',
'NOT',
'NO-TAB-STOP',
'NOT-ACTIVE',
'NO-UNDERLINE',
'NO-UND',
'NO-UNDE',
'NO-UNDER',
'NO-UNDERL',
'NO-UNDERLI',
'NO-UNDERLIN',
'NO-UNDO',
'NO-VALIDATE',
'NO-VAL',
'NO-VALI',
'NO-VALID',
'NO-VALIDA',
'NO-VALIDAT',
'NOW',
'NO-WAIT',
'NO-WORD-WRAP',
'NULL',
'NUM-ALIASES',
'NUM-ALI',
'NUM-ALIA',
'NUM-ALIAS',
'NUM-ALIASE',
'NUM-BUFFERS',
'NUM-BUTTONS',
'NUM-BUT',
'NUM-BUTT',
'NUM-BUTTO',
'NUM-BUTTON',
'NUM-COLUMNS',
'NUM-COL',
'NUM-COLU',
'NUM-COLUM',
'NUM-COLUMN',
'NUM-COPIES',
'NUM-DBS',
'NUM-DROPPED-FILES',
'NUM-ENTRIES',
'NUMERIC',
'NUMERIC-FORMAT',
'NUMERIC-F',
'NUMERIC-FO',
'NUMERIC-FOR',
'NUMERIC-FORM',
'NUMERIC-FORMA',
'NUM-FIELDS',
'NUM-FORMATS',
'NUM-ITEMS',
'NUM-ITERATIONS',
'NUM-LINES',
'NUM-LOCKED-COLUMNS',
'NUM-LOCKED-COL',
'NUM-LOCKED-COLU',
'NUM-LOCKED-COLUM',
'NUM-LOCKED-COLUMN',
'NUM-MESSAGES',
'NUM-PARAMETERS',
'NUM-REFERENCES',
'NUM-REPLACED',
'NUM-RESULTS',
'NUM-SELECTED-ROWS',
'NUM-SELECTED-WIDGETS',
'NUM-SELECTED',
'NUM-SELECTED-',
'NUM-SELECTED-W',
'NUM-SELECTED-WI',
'NUM-SELECTED-WID',
'NUM-SELECTED-WIDG',
'NUM-SELECTED-WIDGE',
'NUM-SELECTED-WIDGET',
'NUM-TABS',
'NUM-TO-RETAIN',
'NUM-VISIBLE-COLUMNS',
'OCTET-LENGTH',
'OF',
'OFF',
'OK',
'OK-CANCEL',
'OLD',
'ON',
'ON-FRAME-BORDER',
'ON-FRAME',
'ON-FRAME-',
'ON-FRAME-B',
'ON-FRAME-BO',
'ON-FRAME-BOR',
'ON-FRAME-BORD',
'ON-FRAME-BORDE',
'OPEN',
'OPSYS',
'OPTION',
'OR',
'ORDERED-JOIN',
'ORDINAL',
'OS-APPEND',
'OS-COMMAND',
'OS-COPY',
'OS-CREATE-DIR',
'OS-DELETE',
'OS-DIR',
'OS-DRIVES',
'OS-DRIVE',
'OS-ERROR',
'OS-GETENV',
'OS-RENAME',
'OTHERWISE',
'OUTPUT',
'OVERLAY',
'OVERRIDE',
'OWNER',
'PAGE',
'PAGE-BOTTOM',
'PAGE-BOT',
'PAGE-BOTT',
'PAGE-BOTTO',
'PAGED',
'PAGE-NUMBER',
'PAGE-NUM',
'PAGE-NUMB',
'PAGE-NUMBE',
'PAGE-SIZE',
'PAGE-TOP',
'PAGE-WIDTH',
'PAGE-WID',
'PAGE-WIDT',
'PARAMETER',
'PARAM',
'PARAME',
'PARAMET',
'PARAMETE',
'PARENT',
'PARSE-STATUS',
'PARTIAL-KEY',
'PASCAL',
'PASSWORD-FIELD',
'PATHNAME',
'PAUSE',
'PBE-HASH-ALGORITHM',
'PBE-HASH-ALG',
'PBE-HASH-ALGO',
'PBE-HASH-ALGOR',
'PBE-HASH-ALGORI',
'PBE-HASH-ALGORIT',
'PBE-HASH-ALGORITH',
'PBE-KEY-ROUNDS',
'PDBNAME',
'PERSISTENT',
'PERSIST',
'PERSISTE',
'PERSISTEN',
'PERSISTENT-CACHE-DISABLED',
'PFCOLOR',
'PFC',
'PFCO',
'PFCOL',
'PFCOLO',
'PIXELS',
'PIXELS-PER-COLUMN',
'PIXELS-PER-COL',
'PIXELS-PER-COLU',
'PIXELS-PER-COLUM',
'PIXELS-PER-ROW',
'POPUP-MENU',
'POPUP-M',
'POPUP-ME',
'POPUP-MEN',
'POPUP-ONLY',
'POPUP-O',
'POPUP-ON',
'POPUP-ONL',
'PORTRAIT',
'POSITION',
'PRECISION',
'PREFER-DATASET',
'PREPARED',
'PREPARE-STRING',
'PREPROCESS',
'PREPROC',
'PREPROCE',
'PREPROCES',
'PRESELECT',
'PRESEL',
'PRESELE',
'PRESELEC',
'PREV',
'PREV-COLUMN',
'PREV-SIBLING',
'PREV-TAB-ITEM',
'PREV-TAB-I',
'PREV-TAB-IT',
'PREV-TAB-ITE',
'PRIMARY',
'PRINTER',
'PRINTER-CONTROL-HANDLE',
'PRINTER-HDC',
'PRINTER-NAME',
'PRINTER-PORT',
'PRINTER-SETUP',
'PRIVATE',
'PRIVATE-DATA',
'PRIVATE-D',
'PRIVATE-DA',
'PRIVATE-DAT',
'PRIVILEGES',
'PROCEDURE',
'PROCE',
'PROCED',
'PROCEDU',
'PROCEDUR',
'PROCEDURE-CALL-TYPE',
'PROCESS',
'PROC-HANDLE',
'PROC-HA',
'PROC-HAN',
'PROC-HAND',
'PROC-HANDL',
'PROC-STATUS',
'PROC-ST',
'PROC-STA',
'PROC-STAT',
'PROC-STATU',
'proc-text',
'proc-text-buffe',
'PROFILER',
'PROGRAM-NAME',
'PROGRESS',
'PROGRESS-SOURCE',
'PROGRESS-S',
'PROGRESS-SO',
'PROGRESS-SOU',
'PROGRESS-SOUR',
'PROGRESS-SOURC',
'PROMPT',
'PROMPT-FOR',
'PROMPT-F',
'PROMPT-FO',
'PROMSGS',
'PROPATH',
'PROPERTY',
'PROTECTED',
'PROVERSION',
'PROVERS',
'PROVERSI',
'PROVERSIO',
'PROXY',
'PROXY-PASSWORD',
'PROXY-USERID',
'PUBLIC',
'PUBLIC-ID',
'PUBLISH',
'PUBLISHED-EVENTS',
'PUT',
'PUTBYTE',
'PUT-BYTE',
'PUT-DOUBLE',
'PUT-FLOAT',
'PUT-INT64',
'PUT-KEY-VALUE',
'PUT-KEY-VAL',
'PUT-KEY-VALU',
'PUT-LONG',
'PUT-SHORT',
'PUT-STRING',
'PUT-UNSIGNED-LONG',
'QUERY',
'QUERY-CLOSE',
'QUERY-OFF-END',
'QUERY-OPEN',
'QUERY-PREPARE',
'QUERY-TUNING',
'QUESTION',
'QUIT',
'QUOTER',
'RADIO-BUTTONS',
'RADIO-SET',
'RANDOM',
'RAW-TRANSFER',
'RCODE-INFORMATION',
'RCODE-INFO',
'RCODE-INFOR',
'RCODE-INFORM',
'RCODE-INFORMA',
'RCODE-INFORMAT',
'RCODE-INFORMATI',
'RCODE-INFORMATIO',
'READ-AVAILABLE',
'READ-EXACT-NUM',
'READ-FILE',
'READKEY',
'READ-ONLY',
'READ-XML',
'READ-XMLSCHEMA',
'REAL',
'RECORD-LENGTH',
'RECTANGLE',
'RECT',
'RECTA',
'RECTAN',
'RECTANG',
'RECTANGL',
'RECURSIVE',
'REFERENCE-ONLY',
'REFRESH',
'REFRESHABLE',
'REFRESH-AUDIT-POLICY',
'REGISTER-DOMAIN',
'RELEASE',
'REMOTE',
'REMOVE-EVENTS-PROCEDURE',
'REMOVE-SUPER-PROCEDURE',
'REPEAT',
'REPLACE',
'REPLACE-SELECTION-TEXT',
'REPOSITION',
'REPOSITION-BACKWARD',
'REPOSITION-FORWARD',
'REPOSITION-MODE',
'REPOSITION-TO-ROW',
'REPOSITION-TO-ROWID',
'REQUEST',
'RESET',
'RESIZABLE',
'RESIZA',
'RESIZAB',
'RESIZABL',
'RESIZE',
'RESTART-ROW',
'RESTART-ROWID',
'RETAIN',
'RETAIN-SHAPE',
'RETRY',
'RETRY-CANCEL',
'RETURN',
'RETURN-INSERTED',
'RETURN-INS',
'RETURN-INSE',
'RETURN-INSER',
'RETURN-INSERT',
'RETURN-INSERTE',
'RETURNS',
'RETURN-TO-START-DIR',
'RETURN-TO-START-DI',
'RETURN-VALUE',
'RETURN-VAL',
'RETURN-VALU',
'RETURN-VALUE-DATA-TYPE',
'REVERSE-FROM',
'REVERT',
'REVOKE',
'RGB-VALUE',
'RIGHT-ALIGNED',
'RETURN-ALIGN',
'RETURN-ALIGNE',
'RIGHT-TRIM',
'R-INDEX',
'ROLES',
'ROUND',
'ROUTINE-LEVEL',
'ROW',
'ROW-HEIGHT-CHARS',
'ROW-HEIGHT-PIXELS',
'ROW-MARKERS',
'ROW-OF',
'ROW-RESIZABLE',
'RULE',
'RUN',
'RUN-PROCEDURE',
'SAVE',
'SAVE-AS',
'SAVE-FILE',
'SAX-COMPLETE',
'SAX-COMPLE',
'SAX-COMPLET',
'SAX-PARSE',
'SAX-PARSE-FIRST',
'SAX-PARSE-NEXT',
'SAX-PARSER-ERROR',
'SAX-RUNNING',
'SAX-UNINITIALIZED',
'SAX-WRITE-BEGIN',
'SAX-WRITE-COMPLETE',
'SAX-WRITE-CONTENT',
'SAX-WRITE-ELEMENT',
'SAX-WRITE-ERROR',
'SAX-WRITE-IDLE',
'SAX-WRITER',
'SAX-WRITE-TAG',
'SCHEMA',
'SCHEMA-LOCATION',
'SCHEMA-MARSHAL',
'SCHEMA-PATH',
'SCREEN',
'SCREEN-IO',
'SCREEN-LINES',
'SCREEN-VALUE',
'SCREEN-VAL',
'SCREEN-VALU',
'SCROLL',
'SCROLLABLE',
'SCROLLBAR-HORIZONTAL',
'SCROLLBAR-H',
'SCROLLBAR-HO',
'SCROLLBAR-HOR',
'SCROLLBAR-HORI',
'SCROLLBAR-HORIZ',
'SCROLLBAR-HORIZO',
'SCROLLBAR-HORIZON',
'SCROLLBAR-HORIZONT',
'SCROLLBAR-HORIZONTA',
'SCROLL-BARS',
'SCROLLBAR-VERTICAL',
'SCROLLBAR-V',
'SCROLLBAR-VE',
'SCROLLBAR-VER',
'SCROLLBAR-VERT',
'SCROLLBAR-VERTI',
'SCROLLBAR-VERTIC',
'SCROLLBAR-VERTICA',
'SCROLL-DELTA',
'SCROLLED-ROW-POSITION',
'SCROLLED-ROW-POS',
'SCROLLED-ROW-POSI',
'SCROLLED-ROW-POSIT',
'SCROLLED-ROW-POSITI',
'SCROLLED-ROW-POSITIO',
'SCROLLING',
'SCROLL-OFFSET',
'SCROLL-TO-CURRENT-ROW',
'SCROLL-TO-ITEM',
'SCROLL-TO-I',
'SCROLL-TO-IT',
'SCROLL-TO-ITE',
'SCROLL-TO-SELECTED-ROW',
'SDBNAME',
'SEAL',
'SEAL-TIMESTAMP',
'SEARCH',
'SEARCH-SELF',
'SEARCH-TARGET',
'SECTION',
'SECURITY-POLICY',
'SEEK',
'SELECT',
'SELECTABLE',
'SELECT-ALL',
'SELECTED',
'SELECT-FOCUSED-ROW',
'SELECTION',
'SELECTION-END',
'SELECTION-LIST',
'SELECTION-START',
'SELECTION-TEXT',
'SELECT-NEXT-ROW',
'SELECT-PREV-ROW',
'SELECT-ROW',
'SELF',
'SEND',
'send-sql-statement',
'send-sql',
'SENSITIVE',
'SEPARATE-CONNECTION',
'SEPARATOR-FGCOLOR',
'SEPARATORS',
'SERVER',
'SERVER-CONNECTION-BOUND',
'SERVER-CONNECTION-BOUND-REQUEST',
'SERVER-CONNECTION-CONTEXT',
'SERVER-CONNECTION-ID',
'SERVER-OPERATING-MODE',
'SESSION',
'SESSION-ID',
'SET',
'SET-APPL-CONTEXT',
'SET-ATTR-CALL-TYPE',
'SET-ATTRIBUTE-NODE',
'SET-BLUE-VALUE',
'SET-BLUE',
'SET-BLUE-',
'SET-BLUE-V',
'SET-BLUE-VA',
'SET-BLUE-VAL',
'SET-BLUE-VALU',
'SET-BREAK',
'SET-BUFFERS',
'SET-CALLBACK',
'SET-CLIENT',
'SET-COMMIT',
'SET-CONTENTS',
'SET-CURRENT-VALUE',
'SET-DB-CLIENT',
'SET-DYNAMIC',
'SET-EVENT-MANAGER-OPTION',
'SET-GREEN-VALUE',
'SET-GREEN',
'SET-GREEN-',
'SET-GREEN-V',
'SET-GREEN-VA',
'SET-GREEN-VAL',
'SET-GREEN-VALU',
'SET-INPUT-SOURCE',
'SET-OPTION',
'SET-OUTPUT-DESTINATION',
'SET-PARAMETER',
'SET-POINTER-VALUE',
'SET-PROPERTY',
'SET-RED-VALUE',
'SET-RED',
'SET-RED-',
'SET-RED-V',
'SET-RED-VA',
'SET-RED-VAL',
'SET-RED-VALU',
'SET-REPOSITIONED-ROW',
'SET-RGB-VALUE',
'SET-ROLLBACK',
'SET-SELECTION',
'SET-SIZE',
'SET-SORT-ARROW',
'SETUSERID',
'SETUSER',
'SETUSERI',
'SET-WAIT-STATE',
'SHA1-DIGEST',
'SHARED',
'SHARE-LOCK',
'SHARE',
'SHARE-',
'SHARE-L',
'SHARE-LO',
'SHARE-LOC',
'SHOW-IN-TASKBAR',
'SHOW-STATS',
'SHOW-STAT',
'SIDE-LABEL-HANDLE',
'SIDE-LABEL-H',
'SIDE-LABEL-HA',
'SIDE-LABEL-HAN',
'SIDE-LABEL-HAND',
'SIDE-LABEL-HANDL',
'SIDE-LABELS',
'SIDE-LAB',
'SIDE-LABE',
'SIDE-LABEL',
'SILENT',
'SIMPLE',
'SINGLE',
'SIZE',
'SIZE-CHARS',
'SIZE-C',
'SIZE-CH',
'SIZE-CHA',
'SIZE-CHAR',
'SIZE-PIXELS',
'SIZE-P',
'SIZE-PI',
'SIZE-PIX',
'SIZE-PIXE',
'SIZE-PIXEL',
'SKIP',
'SKIP-DELETED-RECORD',
'SLIDER',
'SMALL-ICON',
'SMALLINT',
'SMALL-TITLE',
'SOME',
'SORT',
'SORT-ASCENDING',
'SORT-NUMBER',
'SOURCE',
'SOURCE-PROCEDURE',
'SPACE',
'SQL',
'SQRT',
'SSL-SERVER-NAME',
'STANDALONE',
'START',
'START-DOCUMENT',
'START-ELEMENT',
'START-MOVE',
'START-RESIZE',
'START-ROW-RESIZE',
'STATE-DETAIL',
'STATIC',
'STATUS',
'STATUS-AREA',
'STATUS-AREA-FONT',
'STDCALL',
'STOP',
'STOP-PARSING',
'STOPPED',
'STOPPE',
'STORED-PROCEDURE',
'STORED-PROC',
'STORED-PROCE',
'STORED-PROCED',
'STORED-PROCEDU',
'STORED-PROCEDUR',
'STREAM',
'STREAM-HANDLE',
'STREAM-IO',
'STRETCH-TO-FIT',
'STRICT',
'STRING',
'STRING-VALUE',
'STRING-XREF',
'SUB-AVERAGE',
'SUB-AVE',
'SUB-AVER',
'SUB-AVERA',
'SUB-AVERAG',
'SUB-COUNT',
'SUB-MAXIMUM',
'SUM-MAX',
'SUM-MAXI',
'SUM-MAXIM',
'SUM-MAXIMU',
'SUB-MENU',
'SUBSUB-',
'SUB-MIN',
'SUBSCRIBE',
'SUBSTITUTE',
'SUBST',
'SUBSTI',
'SUBSTIT',
'SUBSTITU',
'SUBSTITUT',
'SUBSTRING',
'SUBSTR',
'SUBSTRI',
'SUBSTRIN',
'SUB-TOTAL',
'SUBTYPE',
'SUM',
'SUPER',
'SUPER-PROCEDURES',
'SUPPRESS-NAMESPACE-PROCESSING',
'SUPPRESS-WARNINGS',
'SUPPRESS-W',
'SUPPRESS-WA',
'SUPPRESS-WAR',
'SUPPRESS-WARN',
'SUPPRESS-WARNI',
'SUPPRESS-WARNIN',
'SUPPRESS-WARNING',
'SYMMETRIC-ENCRYPTION-ALGORITHM',
'SYMMETRIC-ENCRYPTION-IV',
'SYMMETRIC-ENCRYPTION-KEY',
'SYMMETRIC-SUPPORT',
'SYSTEM-ALERT-BOXES',
'SYSTEM-ALERT',
'SYSTEM-ALERT-',
'SYSTEM-ALERT-B',
'SYSTEM-ALERT-BO',
'SYSTEM-ALERT-BOX',
'SYSTEM-ALERT-BOXE',
'SYSTEM-DIALOG',
'SYSTEM-HELP',
'SYSTEM-ID',
'TABLE',
'TABLE-HANDLE',
'TABLE-NUMBER',
'TAB-POSITION',
'TAB-STOP',
'TARGET',
'TARGET-PROCEDURE',
'TEMP-DIRECTORY',
'TEMP-DIR',
'TEMP-DIRE',
'TEMP-DIREC',
'TEMP-DIRECT',
'TEMP-DIRECTO',
'TEMP-DIRECTOR',
'TEMP-TABLE',
'TEMP-TABLE-PREPARE',
'TERM',
'TERMINAL',
'TERMI',
'TERMIN',
'TERMINA',
'TERMINATE',
'TEXT',
'TEXT-CURSOR',
'TEXT-SEG-GROW',
'TEXT-SELECTED',
'THEN',
'THIS-OBJECT',
'THIS-PROCEDURE',
'THREE-D',
'THROW',
'THROUGH',
'THRU',
'TIC-MARKS',
'TIME',
'TIME-SOURCE',
'TITLE',
'TITLE-BGCOLOR',
'TITLE-BGC',
'TITLE-BGCO',
'TITLE-BGCOL',
'TITLE-BGCOLO',
'TITLE-DCOLOR',
'TITLE-DC',
'TITLE-DCO',
'TITLE-DCOL',
'TITLE-DCOLO',
'TITLE-FGCOLOR',
'TITLE-FGC',
'TITLE-FGCO',
'TITLE-FGCOL',
'TITLE-FGCOLO',
'TITLE-FONT',
'TITLE-FO',
'TITLE-FON',
'TO',
'TODAY',
'TOGGLE-BOX',
'TOOLTIP',
'TOOLTIPS',
'TOPIC',
'TOP-NAV-QUERY',
'TOP-ONLY',
'TO-ROWID',
'TOTAL',
'TRAILING',
'TRANS',
'TRANSACTION',
'TRANSACTION-MODE',
'TRANS-INIT-PROCEDURE',
'TRANSPARENT',
'TRIGGER',
'TRIGGERS',
'TRIM',
'TRUE',
'TRUNCATE',
'TRUNC',
'TRUNCA',
'TRUNCAT',
'TYPE',
'TYPE-OF',
'UNBOX',
'UNBUFFERED',
'UNBUFF',
'UNBUFFE',
'UNBUFFER',
'UNBUFFERE',
'UNDERLINE',
'UNDERL',
'UNDERLI',
'UNDERLIN',
'UNDO',
'UNFORMATTED',
'UNFORM',
'UNFORMA',
'UNFORMAT',
'UNFORMATT',
'UNFORMATTE',
'UNION',
'UNIQUE',
'UNIQUE-ID',
'UNIQUE-MATCH',
'UNIX',
'UNLESS-HIDDEN',
'UNLOAD',
'UNSIGNED-LONG',
'UNSUBSCRIBE',
'UP',
'UPDATE',
'UPDATE-ATTRIBUTE',
'URL',
'URL-DECODE',
'URL-ENCODE',
'URL-PASSWORD',
'URL-USERID',
'USE',
'USE-DICT-EXPS',
'USE-FILENAME',
'USE-INDEX',
'USER',
'USE-REVVIDEO',
'USERID',
'USER-ID',
'USE-TEXT',
'USE-UNDERLINE',
'USE-WIDGET-POOL',
'USING',
'V6DISPLAY',
'V6FRAME',
'VALIDATE',
'VALIDATE-EXPRESSION',
'VALIDATE-MESSAGE',
'VALIDATE-SEAL',
'VALIDATION-ENABLED',
'VALID-EVENT',
'VALID-HANDLE',
'VALID-OBJECT',
'VALUE',
'VALUE-CHANGED',
'VALUES',
'VARIABLE',
'VAR',
'VARI',
'VARIA',
'VARIAB',
'VARIABL',
'VERBOSE',
'VERSION',
'VERTICAL',
'VERT',
'VERTI',
'VERTIC',
'VERTICA',
'VIEW',
'VIEW-AS',
'VIEW-FIRST-COLUMN-ON-REOPEN',
'VIRTUAL-HEIGHT-CHARS',
'VIRTUAL-HEIGHT',
'VIRTUAL-HEIGHT-',
'VIRTUAL-HEIGHT-C',
'VIRTUAL-HEIGHT-CH',
'VIRTUAL-HEIGHT-CHA',
'VIRTUAL-HEIGHT-CHAR',
'VIRTUAL-HEIGHT-PIXELS',
'VIRTUAL-HEIGHT-P',
'VIRTUAL-HEIGHT-PI',
'VIRTUAL-HEIGHT-PIX',
'VIRTUAL-HEIGHT-PIXE',
'VIRTUAL-HEIGHT-PIXEL',
'VIRTUAL-WIDTH-CHARS',
'VIRTUAL-WIDTH',
'VIRTUAL-WIDTH-',
'VIRTUAL-WIDTH-C',
'VIRTUAL-WIDTH-CH',
'VIRTUAL-WIDTH-CHA',
'VIRTUAL-WIDTH-CHAR',
'VIRTUAL-WIDTH-PIXELS',
'VIRTUAL-WIDTH-P',
'VIRTUAL-WIDTH-PI',
'VIRTUAL-WIDTH-PIX',
'VIRTUAL-WIDTH-PIXE',
'VIRTUAL-WIDTH-PIXEL',
'VISIBLE',
'VOID',
'WAIT',
'WAIT-FOR',
'WARNING',
'WEB-CONTEXT',
'WEEKDAY',
'WHEN',
'WHERE',
'WHILE',
'WIDGET',
'WIDGET-ENTER',
'WIDGET-E',
'WIDGET-EN',
'WIDGET-ENT',
'WIDGET-ENTE',
'WIDGET-ID',
'WIDGET-LEAVE',
'WIDGET-L',
'WIDGET-LE',
'WIDGET-LEA',
'WIDGET-LEAV',
'WIDGET-POOL',
'WIDTH-CHARS',
'WIDTH',
'WIDTH-',
'WIDTH-C',
'WIDTH-CH',
'WIDTH-CHA',
'WIDTH-CHAR',
'WIDTH-PIXELS',
'WIDTH-P',
'WIDTH-PI',
'WIDTH-PIX',
'WIDTH-PIXE',
'WIDTH-PIXEL',
'WINDOW',
'WINDOW-MAXIMIZED',
'WINDOW-MAXIM',
'WINDOW-MAXIMI',
'WINDOW-MAXIMIZ',
'WINDOW-MAXIMIZE',
'WINDOW-MINIMIZED',
'WINDOW-MINIM',
'WINDOW-MINIMI',
'WINDOW-MINIMIZ',
'WINDOW-MINIMIZE',
'WINDOW-NAME',
'WINDOW-NORMAL',
'WINDOW-STATE',
'WINDOW-STA',
'WINDOW-STAT',
'WINDOW-SYSTEM',
'WITH',
'WORD-INDEX',
'WORD-WRAP',
'WORK-AREA-HEIGHT-PIXELS',
'WORK-AREA-WIDTH-PIXELS',
'WORK-AREA-X',
'WORK-AREA-Y',
'WORKFILE',
'WORK-TABLE',
'WORK-TAB',
'WORK-TABL',
'WRITE',
'WRITE-CDATA',
'WRITE-CHARACTERS',
'WRITE-COMMENT',
'WRITE-DATA-ELEMENT',
'WRITE-EMPTY-ELEMENT',
'WRITE-ENTITY-REF',
'WRITE-EXTERNAL-DTD',
'WRITE-FRAGMENT',
'WRITE-MESSAGE',
'WRITE-PROCESSING-INSTRUCTION',
'WRITE-STATUS',
'WRITE-XML',
'WRITE-XMLSCHEMA',
'X',
'XCODE',
'XML-DATA-TYPE',
'XML-NODE-TYPE',
'XML-SCHEMA-PATH',
'XML-SUPPRESS-NAMESPACE-PROCESSING',
'X-OF',
'XREF',
'XREF-XML',
'Y',
'YEAR',
'YEAR-OFFSET',
'YES',
'YES-NO',
'YES-NO-CANCEL',
'Y-OF'
)
| gpl-3.0 |
sergiocorato/odoomrp-wip | quality_control_tolerance/models/qc_test.py | 11 | 4113 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class QcTestQuestion(models.Model):
_inherit = 'qc.test.question'
@api.one
@api.depends('min_value', 'max_value',
'tolerance_below', 'tolerance_above')
def _tolerable_values(self):
self.min_value_below = self.min_value - self.tolerance_below
self.max_value_above = self.max_value + self.tolerance_above
tolerance_below = fields.Float(string='Tolerance (below)')
tolerance_above = fields.Float(string='Tolerance (above)')
tolerance_percent_below = fields.Float(string='% tolerance (below)',
digits=(3, 2))
tolerance_percent_above = fields.Float(string='% tolerance (above)',
digits=(3, 2))
min_value_below = fields.Float(
string='Min. tolerable', compute='_tolerable_values')
max_value_above = fields.Float(
string='Max. tolerable', compute='_tolerable_values')
same_tolerance = fields.Boolean('Same tolerance above/below', default=True)
@api.one
@api.onchange('min_value', 'max_value')
def onchange_values(self):
self.onchange_tolerance_below()
self.onchange_tolerance_above()
@api.one
@api.onchange('same_tolerance')
def onchange_same_tolerance(self):
self.tolerance_percent_above = self.tolerance_percent_below
self.tolerance_above = self.tolerance_below
@api.one
@api.onchange('tolerance_below')
def onchange_tolerance_below(self):
diff = self.max_value - self.min_value
if diff:
self.tolerance_percent_below = 100 * self.tolerance_below / diff
if self.same_tolerance:
self.onchange_same_tolerance()
@api.one
@api.onchange('tolerance_percent_below')
def onchange_tolerance_percent_below(self):
diff = self.max_value - self.min_value
if diff:
self.tolerance_below = self.tolerance_percent_below * diff / 100
if self.same_tolerance:
self.onchange_same_tolerance()
@api.one
@api.onchange('tolerance_above')
def onchange_tolerance_above(self):
diff = self.max_value - self.min_value
if diff:
self.tolerance_percent_above = 100 * self.tolerance_above / diff
@api.one
@api.onchange('tolerance_percent_above')
def onchange_tolerance_percent_above(self):
diff = self.max_value - self.min_value
if diff:
self.tolerance_above = self.tolerance_percent_above * diff / 100
def check_same_tolerance(self, vals):
vals = vals.copy()
if (('tolerance_below' in vals or
'tolerance_percent_below' in vals) and
vals.get('same_tolerance', self.same_tolerance)):
vals['tolerance_above'] = vals.get('tolerance_below')
vals['tolerance_percent_above'] = (
vals.get('tolerance_percent_below'))
return vals
@api.model
def create(self, default):
# This is due to a bug in readonly treatment on views
default = self.check_same_tolerance(default)
return super(QcTestQuestion, self).create(default)
@api.multi
def write(self, vals):
# This is due to a bug in readonly treatment on views
vals = self.check_same_tolerance(vals)
return super(QcTestQuestion, self).write(vals)
class QcTestQuestionValue(models.Model):
_inherit = 'qc.test.question.value'
@api.one
@api.onchange('ok')
def onchange_ok(self):
self.tolerance_status = 'optimal' if self.ok else 'not_tolerable'
tolerance_status = fields.Selection(
[('optimal', 'Optimal'),
('tolerable', 'Tolerable'),
('not_tolerable', 'Not tolerable')],
string='Tolerance status', default='not_tolerable')
| agpl-3.0 |
liorvh/golismero | golismero/api/data/vulnerability/authentication/broken_captcha.py | 8 | 1453 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__license__= """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: [email protected]
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["BrokenCaptcha"]
from .. import WebVulnerability
class BrokenCaptcha(WebVulnerability):
"""
Broken CAPTCHA.
A CAPTCHA (Completely Automated Public Turing test to tell Computers and
Humans Apart) was broken by automated means.
"""
DEFAULTS = WebVulnerability.DEFAULTS.copy()
DEFAULTS["level"] = "low"
DEFAULTS["cwe"] = "CWE-804"
DEFAULTS["cvss_base"] = "4.9"
DEFAULTS["references"] = (
"https://www.owasp.org/index.php/Testing_for_Captcha_(OWASP-AT-012)",
)
| gpl-2.0 |
miguelinux/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/AutoGen/GenMake.py | 1 | 52497 | ## @file
# Create makefile for MS nmake and GNU make
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
## Import Modules
#
import Common.LongFilePathOs as os
import sys
import string
import re
import os.path as path
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.BuildToolError import *
from Common.Misc import *
from Common.String import *
from BuildEngine import *
import Common.GlobalData as GlobalData
## Regular expression for finding header file inclusions
gIncludePattern = re.compile(r"^[ \t]*#?[ \t]*include(?:[ \t]*(?:\\(?:\r\n|\r|\n))*[ \t]*)*(?:\(?[\"<]?[ \t]*)([-\w.\\/() \t]+)(?:[ \t]*[\">]?\)?)", re.MULTILINE|re.UNICODE|re.IGNORECASE)
## Regular expression for matching macro used in header file inclusion
gMacroPattern = re.compile("([_A-Z][_A-Z0-9]*)[ \t]*\((.+)\)", re.UNICODE)
gIsFileMap = {}
## pattern for include style in Edk.x code
gProtocolDefinition = "Protocol/%(HeaderKey)s/%(HeaderKey)s.h"
gGuidDefinition = "Guid/%(HeaderKey)s/%(HeaderKey)s.h"
gArchProtocolDefinition = "ArchProtocol/%(HeaderKey)s/%(HeaderKey)s.h"
gPpiDefinition = "Ppi/%(HeaderKey)s/%(HeaderKey)s.h"
gIncludeMacroConversion = {
"EFI_PROTOCOL_DEFINITION" : gProtocolDefinition,
"EFI_GUID_DEFINITION" : gGuidDefinition,
"EFI_ARCH_PROTOCOL_DEFINITION" : gArchProtocolDefinition,
"EFI_PROTOCOL_PRODUCER" : gProtocolDefinition,
"EFI_PROTOCOL_CONSUMER" : gProtocolDefinition,
"EFI_PROTOCOL_DEPENDENCY" : gProtocolDefinition,
"EFI_ARCH_PROTOCOL_PRODUCER" : gArchProtocolDefinition,
"EFI_ARCH_PROTOCOL_CONSUMER" : gArchProtocolDefinition,
"EFI_ARCH_PROTOCOL_DEPENDENCY" : gArchProtocolDefinition,
"EFI_PPI_DEFINITION" : gPpiDefinition,
"EFI_PPI_PRODUCER" : gPpiDefinition,
"EFI_PPI_CONSUMER" : gPpiDefinition,
"EFI_PPI_DEPENDENCY" : gPpiDefinition,
}
## default makefile type
gMakeType = ""
if sys.platform == "win32":
gMakeType = "nmake"
else:
gMakeType = "gmake"
## BuildFile class
#
# This base class encapsules build file and its generation. It uses template to generate
# the content of build file. The content of build file will be got from AutoGen objects.
#
class BuildFile(object):
## template used to generate the build file (i.e. makefile if using make)
_TEMPLATE_ = TemplateString('')
_DEFAULT_FILE_NAME_ = "Makefile"
## default file name for each type of build file
_FILE_NAME_ = {
"nmake" : "Makefile",
"gmake" : "GNUmakefile"
}
## Fixed header string for makefile
_MAKEFILE_HEADER = '''#
# DO NOT EDIT
# This file is auto-generated by build utility
#
# Module Name:
#
# %s
#
# Abstract:
#
# Auto-generated makefile for building modules, libraries or platform
#
'''
## Header string for each type of build file
_FILE_HEADER_ = {
"nmake" : _MAKEFILE_HEADER % _FILE_NAME_["nmake"],
"gmake" : _MAKEFILE_HEADER % _FILE_NAME_["gmake"]
}
## shell commands which can be used in build file in the form of macro
# $(CP) copy file command
# $(MV) move file command
# $(RM) remove file command
# $(MD) create dir command
# $(RD) remove dir command
#
_SHELL_CMD_ = {
"nmake" : {
"CP" : "copy /y",
"MV" : "move /y",
"RM" : "del /f /q",
"MD" : "mkdir",
"RD" : "rmdir /s /q",
},
"gmake" : {
"CP" : "cp -f",
"MV" : "mv -f",
"RM" : "rm -f",
"MD" : "mkdir -p",
"RD" : "rm -r -f",
}
}
## directory separator
_SEP_ = {
"nmake" : "\\",
"gmake" : "/"
}
## directory creation template
_MD_TEMPLATE_ = {
"nmake" : 'if not exist %(dir)s $(MD) %(dir)s',
"gmake" : "$(MD) %(dir)s"
}
## directory removal template
_RD_TEMPLATE_ = {
"nmake" : 'if exist %(dir)s $(RD) %(dir)s',
"gmake" : "$(RD) %(dir)s"
}
_CD_TEMPLATE_ = {
"nmake" : 'if exist %(dir)s cd %(dir)s',
"gmake" : "test -e %(dir)s && cd %(dir)s"
}
_MAKE_TEMPLATE_ = {
"nmake" : 'if exist %(file)s "$(MAKE)" $(MAKE_FLAGS) -f %(file)s',
"gmake" : 'test -e %(file)s && "$(MAKE)" $(MAKE_FLAGS) -f %(file)s'
}
_INCLUDE_CMD_ = {
"nmake" : '!INCLUDE',
"gmake" : "include"
}
_INC_FLAG_ = {"MSFT" : "/I", "GCC" : "-I", "INTEL" : "-I", "RVCT" : "-I"}
## Constructor of BuildFile
#
# @param AutoGenObject Object of AutoGen class
#
def __init__(self, AutoGenObject):
self._AutoGenObject = AutoGenObject
self._FileType = gMakeType
## Create build file
#
# @param FileType Type of build file. Only nmake and gmake are supported now.
#
# @retval TRUE The build file is created or re-created successfully
# @retval FALSE The build file exists and is the same as the one to be generated
#
def Generate(self, FileType=gMakeType):
if FileType not in self._FILE_NAME_:
EdkLogger.error("build", PARAMETER_INVALID, "Invalid build type [%s]" % FileType,
ExtraData="[%s]" % str(self._AutoGenObject))
self._FileType = FileType
FileContent = self._TEMPLATE_.Replace(self._TemplateDict)
FileName = self._FILE_NAME_[FileType]
return SaveFileOnChange(os.path.join(self._AutoGenObject.MakeFileDir, FileName), FileContent, False)
## Return a list of directory creation command string
#
# @param DirList The list of directory to be created
#
# @retval list The directory creation command list
#
def GetCreateDirectoryCommand(self, DirList):
return [self._MD_TEMPLATE_[self._FileType] % {'dir':Dir} for Dir in DirList]
## Return a list of directory removal command string
#
# @param DirList The list of directory to be removed
#
# @retval list The directory removal command list
#
def GetRemoveDirectoryCommand(self, DirList):
return [self._RD_TEMPLATE_[self._FileType] % {'dir':Dir} for Dir in DirList]
def PlaceMacro(self, Path, MacroDefinitions={}):
if Path.startswith("$("):
return Path
else:
PathLength = len(Path)
for MacroName in MacroDefinitions:
MacroValue = MacroDefinitions[MacroName]
MacroValueLength = len(MacroValue)
if MacroValueLength <= PathLength and Path.startswith(MacroValue):
Path = "$(%s)%s" % (MacroName, Path[MacroValueLength:])
break
return Path
## ModuleMakefile class
#
# This class encapsules makefie and its generation for module. It uses template to generate
# the content of makefile. The content of makefile will be got from ModuleAutoGen object.
#
class ModuleMakefile(BuildFile):
## template used to generate the makefile for module
_TEMPLATE_ = TemplateString('''\
${makefile_header}
#
# Platform Macro Definition
#
PLATFORM_NAME = ${platform_name}
PLATFORM_GUID = ${platform_guid}
PLATFORM_VERSION = ${platform_version}
PLATFORM_RELATIVE_DIR = ${platform_relative_directory}
PLATFORM_DIR = $(WORKSPACE)${separator}${platform_relative_directory}
PLATFORM_OUTPUT_DIR = ${platform_output_directory}
#
# Module Macro Definition
#
MODULE_NAME = ${module_name}
MODULE_GUID = ${module_guid}
MODULE_VERSION = ${module_version}
MODULE_TYPE = ${module_type}
MODULE_FILE = ${module_file}
MODULE_FILE_BASE_NAME = ${module_file_base_name}
BASE_NAME = $(MODULE_NAME)
MODULE_RELATIVE_DIR = ${module_relative_directory}
PACKAGE_RELATIVE_DIR = ${package_relative_directory}
MODULE_DIR = $(WORKSPACE)${separator}${module_relative_directory}
MODULE_ENTRY_POINT = ${module_entry_point}
ARCH_ENTRY_POINT = ${arch_entry_point}
IMAGE_ENTRY_POINT = ${image_entry_point}
${BEGIN}${module_extra_defines}
${END}
#
# Build Configuration Macro Definition
#
ARCH = ${architecture}
TOOLCHAIN = ${toolchain_tag}
TOOLCHAIN_TAG = ${toolchain_tag}
TARGET = ${build_target}
#
# Build Directory Macro Definition
#
# PLATFORM_BUILD_DIR = ${platform_build_directory}
BUILD_DIR = ${platform_build_directory}
BIN_DIR = $(BUILD_DIR)${separator}${architecture}
LIB_DIR = $(BIN_DIR)
MODULE_BUILD_DIR = ${module_build_directory}
OUTPUT_DIR = ${module_output_directory}
DEBUG_DIR = ${module_debug_directory}
DEST_DIR_OUTPUT = $(OUTPUT_DIR)
DEST_DIR_DEBUG = $(DEBUG_DIR)
#
# Shell Command Macro
#
${BEGIN}${shell_command_code} = ${shell_command}
${END}
#
# Tools definitions specific to this module
#
${BEGIN}${module_tool_definitions}
${END}
MAKE_FILE = ${makefile_path}
#
# Build Macro
#
${BEGIN}${file_macro}
${END}
COMMON_DEPS = ${BEGIN}${common_dependency_file} \\
${END}
#
# Overridable Target Macro Definitions
#
FORCE_REBUILD = force_build
INIT_TARGET = init
PCH_TARGET =
BC_TARGET = ${BEGIN}${backward_compatible_target} ${END}
CODA_TARGET = ${BEGIN}${remaining_build_target} \\
${END}
#
# Default target, which will build dependent libraries in addition to source files
#
all: mbuild
#
# Target used when called from platform makefile, which will bypass the build of dependent libraries
#
pbuild: $(INIT_TARGET) $(BC_TARGET) $(PCH_TARGET) $(CODA_TARGET)
#
# ModuleTarget
#
mbuild: $(INIT_TARGET) $(BC_TARGET) gen_libs $(PCH_TARGET) $(CODA_TARGET)
#
# Build Target used in multi-thread build mode, which will bypass the init and gen_libs targets
#
tbuild: $(BC_TARGET) $(PCH_TARGET) $(CODA_TARGET)
#
# Phony target which is used to force executing commands for a target
#
force_build:
\t-@
#
# Target to update the FD
#
fds: mbuild gen_fds
#
# Initialization target: print build information and create necessary directories
#
init: info dirs
info:
\t-@echo Building ... $(MODULE_DIR)${separator}$(MODULE_FILE) [$(ARCH)]
dirs:
${BEGIN}\t-@${create_directory_command}\n${END}
strdefs:
\t-@$(CP) $(DEBUG_DIR)${separator}AutoGen.h $(DEBUG_DIR)${separator}$(MODULE_NAME)StrDefs.h
#
# GenLibsTarget
#
gen_libs:
\t${BEGIN}@"$(MAKE)" $(MAKE_FLAGS) -f ${dependent_library_build_directory}${separator}${makefile_name}
\t${END}@cd $(MODULE_BUILD_DIR)
#
# Build Flash Device Image
#
gen_fds:
\t@"$(MAKE)" $(MAKE_FLAGS) -f $(BUILD_DIR)${separator}${makefile_name} fds
\t@cd $(MODULE_BUILD_DIR)
#
# Individual Object Build Targets
#
${BEGIN}${file_build_target}
${END}
#
# clean all intermediate files
#
clean:
\t${BEGIN}${clean_command}
\t${END}
#
# clean all generated files
#
cleanall:
${BEGIN}\t${cleanall_command}
${END}\t$(RM) *.pdb *.idb > NUL 2>&1
\t$(RM) $(BIN_DIR)${separator}$(MODULE_NAME).efi
#
# clean all dependent libraries built
#
cleanlib:
\t${BEGIN}-@${library_build_command} cleanall
\t${END}@cd $(MODULE_BUILD_DIR)\n\n''')
_FILE_MACRO_TEMPLATE = TemplateString("${macro_name} = ${BEGIN} \\\n ${source_file}${END}\n")
_BUILD_TARGET_TEMPLATE = TemplateString("${BEGIN}${target} : ${deps}\n${END}\t${cmd}\n")
## Constructor of ModuleMakefile
#
# @param ModuleAutoGen Object of ModuleAutoGen class
#
def __init__(self, ModuleAutoGen):
BuildFile.__init__(self, ModuleAutoGen)
self.PlatformInfo = self._AutoGenObject.PlatformInfo
self.ResultFileList = []
self.IntermediateDirectoryList = ["$(DEBUG_DIR)", "$(OUTPUT_DIR)"]
self.SourceFileDatabase = {} # {file type : file path}
self.DestFileDatabase = {} # {file type : file path}
self.FileBuildTargetList = [] # [(src, target string)]
self.BuildTargetList = [] # [target string]
self.PendingBuildTargetList = [] # [FileBuildRule objects]
self.CommonFileDependency = []
self.FileListMacros = {}
self.ListFileMacros = {}
self.FileCache = {}
self.FileDependency = []
self.LibraryBuildCommandList = []
self.LibraryFileList = []
self.LibraryMakefileList = []
self.LibraryBuildDirectoryList = []
self.SystemLibraryList = []
self.Macros = sdict()
self.Macros["OUTPUT_DIR" ] = self._AutoGenObject.Macros["OUTPUT_DIR"]
self.Macros["DEBUG_DIR" ] = self._AutoGenObject.Macros["DEBUG_DIR"]
self.Macros["MODULE_BUILD_DIR"] = self._AutoGenObject.Macros["MODULE_BUILD_DIR"]
self.Macros["BIN_DIR" ] = self._AutoGenObject.Macros["BIN_DIR"]
self.Macros["BUILD_DIR" ] = self._AutoGenObject.Macros["BUILD_DIR"]
self.Macros["WORKSPACE" ] = self._AutoGenObject.Macros["WORKSPACE"]
# Compose a dict object containing information used to do replacement in template
def _CreateTemplateDict(self):
if self._FileType not in self._SEP_:
EdkLogger.error("build", PARAMETER_INVALID, "Invalid Makefile type [%s]" % self._FileType,
ExtraData="[%s]" % str(self._AutoGenObject))
Separator = self._SEP_[self._FileType]
# break build if no source files and binary files are found
if len(self._AutoGenObject.SourceFileList) == 0 and len(self._AutoGenObject.BinaryFileList) == 0:
EdkLogger.error("build", AUTOGEN_ERROR, "No files to be built in module [%s, %s, %s]"
% (self._AutoGenObject.BuildTarget, self._AutoGenObject.ToolChain, self._AutoGenObject.Arch),
ExtraData="[%s]" % str(self._AutoGenObject))
# convert dependent libraries to build command
self.ProcessDependentLibrary()
if len(self._AutoGenObject.Module.ModuleEntryPointList) > 0:
ModuleEntryPoint = self._AutoGenObject.Module.ModuleEntryPointList[0]
else:
ModuleEntryPoint = "_ModuleEntryPoint"
# Intel EBC compiler enforces EfiMain
if self._AutoGenObject.AutoGenVersion < 0x00010005 and self._AutoGenObject.Arch == "EBC":
ArchEntryPoint = "EfiMain"
else:
ArchEntryPoint = ModuleEntryPoint
if self._AutoGenObject.Arch == "EBC":
# EBC compiler always use "EfiStart" as entry point. Only applies to EdkII modules
ImageEntryPoint = "EfiStart"
elif self._AutoGenObject.AutoGenVersion < 0x00010005:
# Edk modules use entry point specified in INF file
ImageEntryPoint = ModuleEntryPoint
else:
# EdkII modules always use "_ModuleEntryPoint" as entry point
ImageEntryPoint = "_ModuleEntryPoint"
# tools definitions
ToolsDef = []
IncPrefix = self._INC_FLAG_[self._AutoGenObject.ToolChainFamily]
for Tool in self._AutoGenObject.BuildOption:
for Attr in self._AutoGenObject.BuildOption[Tool]:
Value = self._AutoGenObject.BuildOption[Tool][Attr]
if Attr == "FAMILY":
continue
elif Attr == "PATH":
ToolsDef.append("%s = %s" % (Tool, Value))
else:
# Don't generate MAKE_FLAGS in makefile. It's put in environment variable.
if Tool == "MAKE":
continue
# Remove duplicated include path, if any
if Attr == "FLAGS":
Value = RemoveDupOption(Value, IncPrefix, self._AutoGenObject.IncludePathList)
ToolsDef.append("%s_%s = %s" % (Tool, Attr, Value))
ToolsDef.append("")
# convert source files and binary files to build targets
self.ResultFileList = [str(T.Target) for T in self._AutoGenObject.CodaTargetList]
if len(self.ResultFileList) == 0 and len(self._AutoGenObject.SourceFileList) <> 0:
EdkLogger.error("build", AUTOGEN_ERROR, "Nothing to build",
ExtraData="[%s]" % str(self._AutoGenObject))
self.ProcessBuildTargetList()
# Generate macros used to represent input files
FileMacroList = [] # macro name = file list
for FileListMacro in self.FileListMacros:
FileMacro = self._FILE_MACRO_TEMPLATE.Replace(
{
"macro_name" : FileListMacro,
"source_file" : self.FileListMacros[FileListMacro]
}
)
FileMacroList.append(FileMacro)
# INC_LIST is special
FileMacro = ""
IncludePathList = []
for P in self._AutoGenObject.IncludePathList:
IncludePathList.append(IncPrefix+self.PlaceMacro(P, self.Macros))
if FileBuildRule.INC_LIST_MACRO in self.ListFileMacros:
self.ListFileMacros[FileBuildRule.INC_LIST_MACRO].append(IncPrefix+P)
FileMacro += self._FILE_MACRO_TEMPLATE.Replace(
{
"macro_name" : "INC",
"source_file" : IncludePathList
}
)
FileMacroList.append(FileMacro)
# Generate macros used to represent files containing list of input files
for ListFileMacro in self.ListFileMacros:
ListFileName = os.path.join(self._AutoGenObject.OutputDir, "%s.lst" % ListFileMacro.lower()[:len(ListFileMacro)-5])
FileMacroList.append("%s = %s" % (ListFileMacro, ListFileName))
SaveFileOnChange(
ListFileName,
"\n".join(self.ListFileMacros[ListFileMacro]),
False
)
# Edk modules need <BaseName>StrDefs.h for string ID
#if self._AutoGenObject.AutoGenVersion < 0x00010005 and len(self._AutoGenObject.UnicodeFileList) > 0:
# BcTargetList = ['strdefs']
#else:
# BcTargetList = []
BcTargetList = []
MakefileName = self._FILE_NAME_[self._FileType]
LibraryMakeCommandList = []
for D in self.LibraryBuildDirectoryList:
Command = self._MAKE_TEMPLATE_[self._FileType] % {"file":os.path.join(D, MakefileName)}
LibraryMakeCommandList.append(Command)
package_rel_dir = self._AutoGenObject.SourceDir
if os.sep in package_rel_dir:
package_rel_dir = package_rel_dir[package_rel_dir.index(os.sep) + 1:]
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(MODULE_BUILD_DIR)", MakefileName),
"makefile_name" : MakefileName,
"platform_name" : self.PlatformInfo.Name,
"platform_guid" : self.PlatformInfo.Guid,
"platform_version" : self.PlatformInfo.Version,
"platform_relative_directory": self.PlatformInfo.SourceDir,
"platform_output_directory" : self.PlatformInfo.OutputDir,
"module_name" : self._AutoGenObject.Name,
"module_guid" : self._AutoGenObject.Guid,
"module_version" : self._AutoGenObject.Version,
"module_type" : self._AutoGenObject.ModuleType,
"module_file" : self._AutoGenObject.MetaFile.Name,
"module_file_base_name" : self._AutoGenObject.MetaFile.BaseName,
"module_relative_directory" : self._AutoGenObject.SourceDir,
"package_relative_directory": package_rel_dir,
"module_extra_defines" : ["%s = %s" % (k, v) for k, v in self._AutoGenObject.Module.Defines.iteritems()],
"architecture" : self._AutoGenObject.Arch,
"toolchain_tag" : self._AutoGenObject.ToolChain,
"build_target" : self._AutoGenObject.BuildTarget,
"platform_build_directory" : self.PlatformInfo.BuildDir,
"module_build_directory" : self._AutoGenObject.BuildDir,
"module_output_directory" : self._AutoGenObject.OutputDir,
"module_debug_directory" : self._AutoGenObject.DebugDir,
"separator" : Separator,
"module_tool_definitions" : ToolsDef,
"shell_command_code" : self._SHELL_CMD_[self._FileType].keys(),
"shell_command" : self._SHELL_CMD_[self._FileType].values(),
"module_entry_point" : ModuleEntryPoint,
"image_entry_point" : ImageEntryPoint,
"arch_entry_point" : ArchEntryPoint,
"remaining_build_target" : self.ResultFileList,
"common_dependency_file" : self.CommonFileDependency,
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"clean_command" : self.GetRemoveDirectoryCommand(["$(OUTPUT_DIR)"]),
"cleanall_command" : self.GetRemoveDirectoryCommand(["$(DEBUG_DIR)", "$(OUTPUT_DIR)"]),
"dependent_library_build_directory" : self.LibraryBuildDirectoryList,
"library_build_command" : LibraryMakeCommandList,
"file_macro" : FileMacroList,
"file_build_target" : self.BuildTargetList,
"backward_compatible_target": BcTargetList,
}
return MakefileTemplateDict
def ProcessBuildTargetList(self):
#
# Search dependency file list for each source file
#
ForceIncludedFile = []
for File in self._AutoGenObject.AutoGenFileList:
if File.Ext == '.h':
ForceIncludedFile.append(File)
SourceFileList = []
for Target in self._AutoGenObject.IntroTargetList:
SourceFileList.extend(Target.Inputs)
self.FileDependency = self.GetFileDependency(
SourceFileList,
ForceIncludedFile,
self._AutoGenObject.IncludePathList + self._AutoGenObject.BuildOptionIncPathList
)
DepSet = None
for File in self.FileDependency:
if not self.FileDependency[File]:
self.FileDependency[File] = ['$(FORCE_REBUILD)']
continue
# skip non-C files
if File.Ext not in [".c", ".C"] or File.Name == "AutoGen.c":
continue
elif DepSet == None:
DepSet = set(self.FileDependency[File])
else:
DepSet &= set(self.FileDependency[File])
# in case nothing in SourceFileList
if DepSet == None:
DepSet = set()
#
# Extract common files list in the dependency files
#
for File in DepSet:
self.CommonFileDependency.append(self.PlaceMacro(File.Path, self.Macros))
for File in self.FileDependency:
# skip non-C files
if File.Ext not in [".c", ".C"] or File.Name == "AutoGen.c":
continue
NewDepSet = set(self.FileDependency[File])
NewDepSet -= DepSet
self.FileDependency[File] = ["$(COMMON_DEPS)"] + list(NewDepSet)
# Convert target description object to target string in makefile
for Type in self._AutoGenObject.Targets:
for T in self._AutoGenObject.Targets[Type]:
# Generate related macros if needed
if T.GenFileListMacro and T.FileListMacro not in self.FileListMacros:
self.FileListMacros[T.FileListMacro] = []
if T.GenListFile and T.ListFileMacro not in self.ListFileMacros:
self.ListFileMacros[T.ListFileMacro] = []
if T.GenIncListFile and T.IncListFileMacro not in self.ListFileMacros:
self.ListFileMacros[T.IncListFileMacro] = []
Deps = []
# Add force-dependencies
for Dep in T.Dependencies:
Deps.append(self.PlaceMacro(str(Dep), self.Macros))
# Add inclusion-dependencies
if len(T.Inputs) == 1 and T.Inputs[0] in self.FileDependency:
for F in self.FileDependency[T.Inputs[0]]:
Deps.append(self.PlaceMacro(str(F), self.Macros))
# Add source-dependencies
for F in T.Inputs:
NewFile = self.PlaceMacro(str(F), self.Macros)
# In order to use file list macro as dependency
if T.GenListFile:
# gnu tools need forward slash path separater, even on Windows
self.ListFileMacros[T.ListFileMacro].append(str(F).replace ('\\', '/'))
self.FileListMacros[T.FileListMacro].append(NewFile)
elif T.GenFileListMacro:
self.FileListMacros[T.FileListMacro].append(NewFile)
else:
Deps.append(NewFile)
# Use file list macro as dependency
if T.GenFileListMacro:
Deps.append("$(%s)" % T.FileListMacro)
# VBox - begin: Add $(QUIET)
sAllCmds = None;
for sCmd in T.Commands:
sCmd = sCmd.strip();
if len(sCmd) > 0:
if sCmd[0] == '-' and self._FileType == 'nmake':
sCmd = '-$(EFI_QUIET)' + sCmd[1:];
else:
sCmd = '$(EFI_QUIET)' + sCmd;
if sAllCmds is None:
sAllCmds = sCmd;
else:
sAllCmds += '\n\t' + sCmd;
# VBox - end.
TargetDict = {
"target" : self.PlaceMacro(T.Target.Path, self.Macros),
"cmd" : sAllCmds, # VBox: Original: "\n\t".join(T.Commands)),
"deps" : Deps
}
self.BuildTargetList.append(self._BUILD_TARGET_TEMPLATE.Replace(TargetDict))
## For creating makefile targets for dependent libraries
def ProcessDependentLibrary(self):
for LibraryAutoGen in self._AutoGenObject.LibraryAutoGenList:
self.LibraryBuildDirectoryList.append(self.PlaceMacro(LibraryAutoGen.BuildDir, self.Macros))
## Return a list containing source file's dependencies
#
# @param FileList The list of source files
# @param ForceInculeList The list of files which will be included forcely
# @param SearchPathList The list of search path
#
# @retval dict The mapping between source file path and its dependencies
#
def GetFileDependency(self, FileList, ForceInculeList, SearchPathList):
Dependency = {}
for F in FileList:
Dependency[F] = self.GetDependencyList(F, ForceInculeList, SearchPathList)
return Dependency
## Find dependencies for one source file
#
# By searching recursively "#include" directive in file, find out all the
# files needed by given source file. The dependecies will be only searched
# in given search path list.
#
# @param File The source file
# @param ForceInculeList The list of files which will be included forcely
# @param SearchPathList The list of search path
#
# @retval list The list of files the given source file depends on
#
def GetDependencyList(self, File, ForceList, SearchPathList):
EdkLogger.debug(EdkLogger.DEBUG_1, "Try to get dependency files for %s" % File)
FileStack = [File] + ForceList
DependencySet = set()
if self._AutoGenObject.Arch not in gDependencyDatabase:
gDependencyDatabase[self._AutoGenObject.Arch] = {}
DepDb = gDependencyDatabase[self._AutoGenObject.Arch]
while len(FileStack) > 0:
F = FileStack.pop()
FullPathDependList = []
if F in self.FileCache:
for CacheFile in self.FileCache[F]:
FullPathDependList.append(CacheFile)
if CacheFile not in DependencySet:
FileStack.append(CacheFile)
DependencySet.update(FullPathDependList)
continue
CurrentFileDependencyList = []
if F in DepDb:
CurrentFileDependencyList = DepDb[F]
else:
try:
Fd = open(F.Path, 'r')
except BaseException, X:
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=F.Path+"\n\t"+str(X))
FileContent = Fd.read()
Fd.close()
if len(FileContent) == 0:
continue
if FileContent[0] == 0xff or FileContent[0] == 0xfe:
FileContent = unicode(FileContent, "utf-16")
IncludedFileList = gIncludePattern.findall(FileContent)
for Inc in IncludedFileList:
Inc = Inc.strip()
# if there's macro used to reference header file, expand it
HeaderList = gMacroPattern.findall(Inc)
if len(HeaderList) == 1 and len(HeaderList[0]) == 2:
HeaderType = HeaderList[0][0]
HeaderKey = HeaderList[0][1]
if HeaderType in gIncludeMacroConversion:
Inc = gIncludeMacroConversion[HeaderType] % {"HeaderKey" : HeaderKey}
else:
# not known macro used in #include, always build the file by
# returning a empty dependency
self.FileCache[File] = []
return []
Inc = os.path.normpath(Inc)
CurrentFileDependencyList.append(Inc)
DepDb[F] = CurrentFileDependencyList
CurrentFilePath = F.Dir
PathList = [CurrentFilePath] + SearchPathList
for Inc in CurrentFileDependencyList:
for SearchPath in PathList:
FilePath = os.path.join(SearchPath, Inc)
if FilePath in gIsFileMap:
if not gIsFileMap[FilePath]:
continue
# If isfile is called too many times, the performance is slow down.
elif not os.path.isfile(FilePath):
gIsFileMap[FilePath] = False
continue
else:
gIsFileMap[FilePath] = True
FilePath = PathClass(FilePath)
FullPathDependList.append(FilePath)
if FilePath not in DependencySet:
FileStack.append(FilePath)
break
else:
EdkLogger.debug(EdkLogger.DEBUG_9, "%s included by %s was not found "\
"in any given path:\n\t%s" % (Inc, F, "\n\t".join(SearchPathList)))
self.FileCache[F] = FullPathDependList
DependencySet.update(FullPathDependList)
DependencySet.update(ForceList)
if File in DependencySet:
DependencySet.remove(File)
DependencyList = list(DependencySet) # remove duplicate ones
return DependencyList
_TemplateDict = property(_CreateTemplateDict)
## CustomMakefile class
#
# This class encapsules makefie and its generation for module. It uses template to generate
# the content of makefile. The content of makefile will be got from ModuleAutoGen object.
#
class CustomMakefile(BuildFile):
## template used to generate the makefile for module with custom makefile
_TEMPLATE_ = TemplateString('''\
${makefile_header}
#
# Platform Macro Definition
#
PLATFORM_NAME = ${platform_name}
PLATFORM_GUID = ${platform_guid}
PLATFORM_VERSION = ${platform_version}
PLATFORM_RELATIVE_DIR = ${platform_relative_directory}
PLATFORM_DIR = $(WORKSPACE)${separator}${platform_relative_directory}
PLATFORM_OUTPUT_DIR = ${platform_output_directory}
#
# Module Macro Definition
#
MODULE_NAME = ${module_name}
MODULE_GUID = ${module_guid}
MODULE_VERSION = ${module_version}
MODULE_TYPE = ${module_type}
MODULE_FILE = ${module_file}
MODULE_FILE_BASE_NAME = ${module_file_base_name}
BASE_NAME = $(MODULE_NAME)
MODULE_RELATIVE_DIR = ${module_relative_directory}
MODULE_DIR = $(WORKSPACE)${separator}${module_relative_directory}
#
# Build Configuration Macro Definition
#
ARCH = ${architecture}
TOOLCHAIN = ${toolchain_tag}
TOOLCHAIN_TAG = ${toolchain_tag}
TARGET = ${build_target}
#
# Build Directory Macro Definition
#
# PLATFORM_BUILD_DIR = ${platform_build_directory}
BUILD_DIR = ${platform_build_directory}
BIN_DIR = $(BUILD_DIR)${separator}${architecture}
LIB_DIR = $(BIN_DIR)
MODULE_BUILD_DIR = ${module_build_directory}
OUTPUT_DIR = ${module_output_directory}
DEBUG_DIR = ${module_debug_directory}
DEST_DIR_OUTPUT = $(OUTPUT_DIR)
DEST_DIR_DEBUG = $(DEBUG_DIR)
#
# Tools definitions specific to this module
#
${BEGIN}${module_tool_definitions}
${END}
MAKE_FILE = ${makefile_path}
#
# Shell Command Macro
#
${BEGIN}${shell_command_code} = ${shell_command}
${END}
${custom_makefile_content}
#
# Target used when called from platform makefile, which will bypass the build of dependent libraries
#
pbuild: init all
#
# ModuleTarget
#
mbuild: init all
#
# Build Target used in multi-thread build mode, which no init target is needed
#
tbuild: all
#
# Initialization target: print build information and create necessary directories
#
init:
\t-@echo Building ... $(MODULE_DIR)${separator}$(MODULE_FILE) [$(ARCH)]
${BEGIN}\t-@${create_directory_command}\n${END}\
''')
## Constructor of CustomMakefile
#
# @param ModuleAutoGen Object of ModuleAutoGen class
#
def __init__(self, ModuleAutoGen):
BuildFile.__init__(self, ModuleAutoGen)
self.PlatformInfo = self._AutoGenObject.PlatformInfo
self.IntermediateDirectoryList = ["$(DEBUG_DIR)", "$(OUTPUT_DIR)"]
# Compose a dict object containing information used to do replacement in template
def _CreateTemplateDict(self):
Separator = self._SEP_[self._FileType]
if self._FileType not in self._AutoGenObject.CustomMakefile:
EdkLogger.error('build', OPTION_NOT_SUPPORTED, "No custom makefile for %s" % self._FileType,
ExtraData="[%s]" % str(self._AutoGenObject))
MakefilePath = os.path.join(
self._AutoGenObject.WorkspaceDir,
self._AutoGenObject.CustomMakefile[self._FileType]
)
try:
CustomMakefile = open(MakefilePath, 'r').read()
except:
EdkLogger.error('build', FILE_OPEN_FAILURE, File=str(self._AutoGenObject),
ExtraData=self._AutoGenObject.CustomMakefile[self._FileType])
# tools definitions
ToolsDef = []
for Tool in self._AutoGenObject.BuildOption:
# Don't generate MAKE_FLAGS in makefile. It's put in environment variable.
if Tool == "MAKE":
continue
for Attr in self._AutoGenObject.BuildOption[Tool]:
if Attr == "FAMILY":
continue
elif Attr == "PATH":
ToolsDef.append("%s = %s" % (Tool, self._AutoGenObject.BuildOption[Tool][Attr]))
else:
ToolsDef.append("%s_%s = %s" % (Tool, Attr, self._AutoGenObject.BuildOption[Tool][Attr]))
ToolsDef.append("")
MakefileName = self._FILE_NAME_[self._FileType]
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(MODULE_BUILD_DIR)", MakefileName),
"platform_name" : self.PlatformInfo.Name,
"platform_guid" : self.PlatformInfo.Guid,
"platform_version" : self.PlatformInfo.Version,
"platform_relative_directory": self.PlatformInfo.SourceDir,
"platform_output_directory" : self.PlatformInfo.OutputDir,
"module_name" : self._AutoGenObject.Name,
"module_guid" : self._AutoGenObject.Guid,
"module_version" : self._AutoGenObject.Version,
"module_type" : self._AutoGenObject.ModuleType,
"module_file" : self._AutoGenObject.MetaFile,
"module_file_base_name" : self._AutoGenObject.MetaFile.BaseName,
"module_relative_directory" : self._AutoGenObject.SourceDir,
"architecture" : self._AutoGenObject.Arch,
"toolchain_tag" : self._AutoGenObject.ToolChain,
"build_target" : self._AutoGenObject.BuildTarget,
"platform_build_directory" : self.PlatformInfo.BuildDir,
"module_build_directory" : self._AutoGenObject.BuildDir,
"module_output_directory" : self._AutoGenObject.OutputDir,
"module_debug_directory" : self._AutoGenObject.DebugDir,
"separator" : Separator,
"module_tool_definitions" : ToolsDef,
"shell_command_code" : self._SHELL_CMD_[self._FileType].keys(),
"shell_command" : self._SHELL_CMD_[self._FileType].values(),
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"custom_makefile_content" : CustomMakefile
}
return MakefileTemplateDict
_TemplateDict = property(_CreateTemplateDict)
## PlatformMakefile class
#
# This class encapsules makefie and its generation for platform. It uses
# template to generate the content of makefile. The content of makefile will be
# got from PlatformAutoGen object.
#
class PlatformMakefile(BuildFile):
## template used to generate the makefile for platform
_TEMPLATE_ = TemplateString('''\
${makefile_header}
#
# Platform Macro Definition
#
PLATFORM_NAME = ${platform_name}
PLATFORM_GUID = ${platform_guid}
PLATFORM_VERSION = ${platform_version}
PLATFORM_FILE = ${platform_file}
PLATFORM_DIR = $(WORKSPACE)${separator}${platform_relative_directory}
PLATFORM_OUTPUT_DIR = ${platform_output_directory}
#
# Build Configuration Macro Definition
#
TOOLCHAIN = ${toolchain_tag}
TOOLCHAIN_TAG = ${toolchain_tag}
TARGET = ${build_target}
#
# Build Directory Macro Definition
#
BUILD_DIR = ${platform_build_directory}
FV_DIR = ${platform_build_directory}${separator}FV
#
# Shell Command Macro
#
${BEGIN}${shell_command_code} = ${shell_command}
${END}
MAKE = ${make_path}
MAKE_FILE = ${makefile_path}
#
# Default target
#
all: init build_libraries build_modules
#
# Initialization target: print build information and create necessary directories
#
init:
\t-@echo Building ... $(PLATFORM_FILE) [${build_architecture_list}]
\t${BEGIN}-@${create_directory_command}
\t${END}
#
# library build target
#
libraries: init build_libraries
#
# module build target
#
modules: init build_libraries build_modules
#
# Build all libraries:
#
build_libraries:
${BEGIN}\t@"$(MAKE)" $(MAKE_FLAGS) -f ${library_makefile_list} pbuild
${END}\t@cd $(BUILD_DIR)
#
# Build all modules:
#
build_modules:
${BEGIN}\t@"$(MAKE)" $(MAKE_FLAGS) -f ${module_makefile_list} pbuild
${END}\t@cd $(BUILD_DIR)
#
# Clean intermediate files
#
clean:
\t${BEGIN}-@${library_build_command} clean
\t${END}${BEGIN}-@${module_build_command} clean
\t${END}@cd $(BUILD_DIR)
#
# Clean all generated files except to makefile
#
cleanall:
${BEGIN}\t${cleanall_command}
${END}
#
# Clean all library files
#
cleanlib:
\t${BEGIN}-@${library_build_command} cleanall
\t${END}@cd $(BUILD_DIR)\n
''')
## Constructor of PlatformMakefile
#
# @param ModuleAutoGen Object of PlatformAutoGen class
#
def __init__(self, PlatformAutoGen):
BuildFile.__init__(self, PlatformAutoGen)
self.ModuleBuildCommandList = []
self.ModuleMakefileList = []
self.IntermediateDirectoryList = []
self.ModuleBuildDirectoryList = []
self.LibraryBuildDirectoryList = []
self.LibraryMakeCommandList = []
# Compose a dict object containing information used to do replacement in template
def _CreateTemplateDict(self):
Separator = self._SEP_[self._FileType]
PlatformInfo = self._AutoGenObject
if "MAKE" not in PlatformInfo.ToolDefinition or "PATH" not in PlatformInfo.ToolDefinition["MAKE"]:
EdkLogger.error("build", OPTION_MISSING, "No MAKE command defined. Please check your tools_def.txt!",
ExtraData="[%s]" % str(self._AutoGenObject))
self.IntermediateDirectoryList = ["$(BUILD_DIR)"]
self.ModuleBuildDirectoryList = self.GetModuleBuildDirectoryList()
self.LibraryBuildDirectoryList = self.GetLibraryBuildDirectoryList()
MakefileName = self._FILE_NAME_[self._FileType]
LibraryMakefileList = []
LibraryMakeCommandList = []
for D in self.LibraryBuildDirectoryList:
D = self.PlaceMacro(D, {"BUILD_DIR":PlatformInfo.BuildDir})
Makefile = os.path.join(D, MakefileName)
Command = self._MAKE_TEMPLATE_[self._FileType] % {"file":Makefile}
LibraryMakefileList.append(Makefile)
LibraryMakeCommandList.append(Command)
self.LibraryMakeCommandList = LibraryMakeCommandList
ModuleMakefileList = []
ModuleMakeCommandList = []
for D in self.ModuleBuildDirectoryList:
D = self.PlaceMacro(D, {"BUILD_DIR":PlatformInfo.BuildDir})
Makefile = os.path.join(D, MakefileName)
Command = self._MAKE_TEMPLATE_[self._FileType] % {"file":Makefile}
ModuleMakefileList.append(Makefile)
ModuleMakeCommandList.append(Command)
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(BUILD_DIR)", MakefileName),
"make_path" : PlatformInfo.ToolDefinition["MAKE"]["PATH"],
"makefile_name" : MakefileName,
"platform_name" : PlatformInfo.Name,
"platform_guid" : PlatformInfo.Guid,
"platform_version" : PlatformInfo.Version,
"platform_file" : self._AutoGenObject.MetaFile,
"platform_relative_directory": PlatformInfo.SourceDir,
"platform_output_directory" : PlatformInfo.OutputDir,
"platform_build_directory" : PlatformInfo.BuildDir,
"toolchain_tag" : PlatformInfo.ToolChain,
"build_target" : PlatformInfo.BuildTarget,
"shell_command_code" : self._SHELL_CMD_[self._FileType].keys(),
"shell_command" : self._SHELL_CMD_[self._FileType].values(),
"build_architecture_list" : self._AutoGenObject.Arch,
"architecture" : self._AutoGenObject.Arch,
"separator" : Separator,
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"cleanall_command" : self.GetRemoveDirectoryCommand(self.IntermediateDirectoryList),
"library_makefile_list" : LibraryMakefileList,
"module_makefile_list" : ModuleMakefileList,
"library_build_command" : LibraryMakeCommandList,
"module_build_command" : ModuleMakeCommandList,
}
return MakefileTemplateDict
## Get the root directory list for intermediate files of all modules build
#
# @retval list The list of directory
#
def GetModuleBuildDirectoryList(self):
DirList = []
for ModuleAutoGen in self._AutoGenObject.ModuleAutoGenList:
if not ModuleAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, ModuleAutoGen.BuildDir))
return DirList
## Get the root directory list for intermediate files of all libraries build
#
# @retval list The list of directory
#
def GetLibraryBuildDirectoryList(self):
DirList = []
for LibraryAutoGen in self._AutoGenObject.LibraryAutoGenList:
if not LibraryAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, LibraryAutoGen.BuildDir))
return DirList
_TemplateDict = property(_CreateTemplateDict)
## TopLevelMakefile class
#
# This class encapsules makefie and its generation for entrance makefile. It
# uses template to generate the content of makefile. The content of makefile
# will be got from WorkspaceAutoGen object.
#
class TopLevelMakefile(BuildFile):
## template used to generate toplevel makefile
_TEMPLATE_ = TemplateString('''${BEGIN}\tGenFds -f ${fdf_file} --conf=${conf_directory} -o ${platform_build_directory} -t ${toolchain_tag} -b ${build_target} -p ${active_platform} -a ${build_architecture_list} ${extra_options}${END}${BEGIN} -r ${fd} ${END}${BEGIN} -i ${fv} ${END}${BEGIN} -C ${cap} ${END}${BEGIN} -D ${macro} ${END}''')
## Constructor of TopLevelMakefile
#
# @param Workspace Object of WorkspaceAutoGen class
#
def __init__(self, Workspace):
BuildFile.__init__(self, Workspace)
self.IntermediateDirectoryList = []
# Compose a dict object containing information used to do replacement in template
def _CreateTemplateDict(self):
Separator = self._SEP_[self._FileType]
# any platform autogen object is ok because we just need common information
PlatformInfo = self._AutoGenObject
if "MAKE" not in PlatformInfo.ToolDefinition or "PATH" not in PlatformInfo.ToolDefinition["MAKE"]:
EdkLogger.error("build", OPTION_MISSING, "No MAKE command defined. Please check your tools_def.txt!",
ExtraData="[%s]" % str(self._AutoGenObject))
for Arch in PlatformInfo.ArchList:
self.IntermediateDirectoryList.append(Separator.join(["$(BUILD_DIR)", Arch]))
self.IntermediateDirectoryList.append("$(FV_DIR)")
# TRICK: for not generating GenFds call in makefile if no FDF file
MacroList = []
if PlatformInfo.FdfFile != None and PlatformInfo.FdfFile != "":
FdfFileList = [PlatformInfo.FdfFile]
# macros passed to GenFds
MacroList.append('"%s=%s"' % ("EFI_SOURCE", GlobalData.gEfiSource.replace('\\', '\\\\')))
MacroList.append('"%s=%s"' % ("EDK_SOURCE", GlobalData.gEdkSource.replace('\\', '\\\\')))
MacroDict = {}
MacroDict.update(GlobalData.gGlobalDefines)
MacroDict.update(GlobalData.gCommandLineDefines)
MacroDict.pop("EFI_SOURCE", "dummy")
MacroDict.pop("EDK_SOURCE", "dummy")
for MacroName in MacroDict:
if MacroDict[MacroName] != "":
MacroList.append('"%s=%s"' % (MacroName, MacroDict[MacroName].replace('\\', '\\\\')))
else:
MacroList.append('"%s"' % MacroName)
else:
FdfFileList = []
# pass extra common options to external program called in makefile, currently GenFds.exe
ExtraOption = ''
LogLevel = EdkLogger.GetLevel()
if LogLevel == EdkLogger.VERBOSE:
ExtraOption += " -v"
elif LogLevel <= EdkLogger.DEBUG_9:
ExtraOption += " -d %d" % (LogLevel - 1)
elif LogLevel == EdkLogger.QUIET:
ExtraOption += " -q"
if GlobalData.gCaseInsensitive:
ExtraOption += " -c"
if GlobalData.gIgnoreSource:
ExtraOption += " --ignore-sources"
MakefileName = self._FILE_NAME_[self._FileType]
SubBuildCommandList = []
for A in PlatformInfo.ArchList:
Command = self._MAKE_TEMPLATE_[self._FileType] % {"file":os.path.join("$(BUILD_DIR)", A, MakefileName)}
SubBuildCommandList.append(Command)
MakefileTemplateDict = {
"makefile_header" : self._FILE_HEADER_[self._FileType],
"makefile_path" : os.path.join("$(BUILD_DIR)", MakefileName),
"make_path" : PlatformInfo.ToolDefinition["MAKE"]["PATH"],
"platform_name" : PlatformInfo.Name,
"platform_guid" : PlatformInfo.Guid,
"platform_version" : PlatformInfo.Version,
"platform_build_directory" : PlatformInfo.BuildDir,
"conf_directory" : GlobalData.gConfDirectory,
"toolchain_tag" : PlatformInfo.ToolChain,
"build_target" : PlatformInfo.BuildTarget,
"shell_command_code" : self._SHELL_CMD_[self._FileType].keys(),
"shell_command" : self._SHELL_CMD_[self._FileType].values(),
'arch' : list(PlatformInfo.ArchList),
"build_architecture_list" : ','.join(PlatformInfo.ArchList),
"separator" : Separator,
"create_directory_command" : self.GetCreateDirectoryCommand(self.IntermediateDirectoryList),
"cleanall_command" : self.GetRemoveDirectoryCommand(self.IntermediateDirectoryList),
"sub_build_command" : SubBuildCommandList,
"fdf_file" : FdfFileList,
"active_platform" : str(PlatformInfo),
"fd" : PlatformInfo.FdTargetList,
"fv" : PlatformInfo.FvTargetList,
"cap" : PlatformInfo.CapTargetList,
"extra_options" : ExtraOption,
"macro" : MacroList,
}
return MakefileTemplateDict
## Get the root directory list for intermediate files of all modules build
#
# @retval list The list of directory
#
def GetModuleBuildDirectoryList(self):
DirList = []
for ModuleAutoGen in self._AutoGenObject.ModuleAutoGenList:
if not ModuleAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, ModuleAutoGen.BuildDir))
return DirList
## Get the root directory list for intermediate files of all libraries build
#
# @retval list The list of directory
#
def GetLibraryBuildDirectoryList(self):
DirList = []
for LibraryAutoGen in self._AutoGenObject.LibraryAutoGenList:
if not LibraryAutoGen.IsBinaryModule:
DirList.append(os.path.join(self._AutoGenObject.BuildDir, LibraryAutoGen.BuildDir))
return DirList
_TemplateDict = property(_CreateTemplateDict)
# This acts like the main() function for the script, unless it is 'import'ed into another script.
if __name__ == '__main__':
pass
| gpl-2.0 |
boonchu/pykickstart | tests/commands/ignoredisk.py | 8 | 3720 | #
# Martin Gracik <[email protected]>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest
from pykickstart.errors import KickstartParseError, KickstartValueError
class FC3_TestCase(CommandTest):
command = "ignoredisk"
def runTest(self):
# pass
self.assert_parse("ignoredisk --drives=sda", "ignoredisk --drives=sda\n")
self.assert_parse("ignoredisk --drives=sda,sdb", "ignoredisk --drives=sda,sdb\n")
# fail
# wrong option name
self.assert_parse_error("ignoredisk --devices=sda", KickstartParseError)
# missing arguments
self.assert_parse_error("ignoredisk --drives", KickstartParseError)
# empty
self.assert_parse_error("ignoredisk", KickstartValueError)
class F8_TestCase(FC3_TestCase):
def runTest(self):
# Run parents class tests
FC3_TestCase.runTest(self)
# pass
self.assert_parse("ignoredisk --drives=sda", "ignoredisk --drives=sda\n")
self.assert_parse("ignoredisk --drives=sda,sdb", "ignoredisk --drives=sda,sdb\n")
self.assert_parse("ignoredisk --only-use=sda", "ignoredisk --only-use=sda\n")
self.assert_parse("ignoredisk --only-use=sda,sdb", "ignoredisk --only-use=sda,sdb\n")
# fail
# missing arguments
self.assert_parse_error("ignoredisk --only-use", KickstartParseError)
# wrong option name
self.assert_parse_error("ignoredisk --devices=sda", KickstartParseError)
# missing arguments
self.assert_parse_error("ignoredisk --drives", KickstartParseError)
# empty
self.assert_parse_error("ignoredisk", KickstartValueError)
# both options provided
self.assert_parse_error("ignoredisk --drives=sda --only-use=sdb", KickstartValueError)
self.assert_parse_error("ignoredisk --only-use=sda --drives=sdb", KickstartValueError)
class RHEL6_TestCase(F8_TestCase):
def runTest(self):
# Run parents class tests
F8_TestCase.runTest(self)
# pass
self.assert_parse("ignoredisk --interactive", "ignoredisk --interactive\n")
# fail
# both options provided
self.assert_parse_error("ignoredisk --drives=sda --interactive", KickstartValueError)
self.assert_parse_error("ignoredisk --interactive --drives=sda", KickstartValueError)
self.assert_parse_error("ignoredisk --only-use=sda --interactive", KickstartValueError)
self.assert_parse_error("ignoredisk --interactive --only-use=sda", KickstartValueError)
self.assert_parse_error("ignoredisk --interactive --drives=sda --only-use=sdb", KickstartValueError)
self.assert_parse_error("ignoredisk --only-use=sda --drives=sdb --interactive", KickstartValueError)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
goliate/sarakha63-persomov | couchpotato/core/notifications/plex/__init__.py | 48 | 1889 | from .main import Plex
def autoload():
return Plex()
config = [{
'name': 'plex',
'groups': [
{
'tab': 'notifications',
'list': 'notification_providers',
'name': 'plex',
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
},
{
'name': 'media_server',
'label': 'Media Server',
'default': 'localhost',
'description': 'Hostname/IP, default localhost'
},
{
'name': 'username',
'label': 'Username',
'default': '',
'description': 'Required for myPlex'
},
{
'name': 'password',
'label': 'Password',
'default': '',
'type': 'password',
'description': 'Required for myPlex'
},
{
'name': 'auth_token',
'label': 'Auth Token',
'default': '',
'advanced': True,
'description': 'Required for myPlex'
},
{
'name': 'clients',
'default': '',
'description': 'Comma separated list of client names\'s (computer names). Top right when you start Plex'
},
{
'name': 'on_snatch',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Also send message when movie is snatched.',
},
],
}
],
}]
| gpl-3.0 |
bouk/redshift_sqlalchemy | tests/test_default_ssl.py | 5 | 1725 | import sqlalchemy as sa
CERT = b"""-----BEGIN CERTIFICATE-----
MIIDeDCCAuGgAwIBAgIJALPHPDcjk979MA0GCSqGSIb3DQEBBQUAMIGFMQswCQYD
VQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2VhdHRsZTET
MBEGA1UEChMKQW1hem9uLmNvbTELMAkGA1UECxMCQ00xLTArBgkqhkiG9w0BCQEW
HmNvb2tpZS1tb25zdGVyLWNvcmVAYW1hem9uLmNvbTAeFw0xMjExMDIyMzI0NDda
Fw0xNzExMDEyMzI0NDdaMIGFMQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGlu
Z3RvbjEQMA4GA1UEBxMHU2VhdHRsZTETMBEGA1UEChMKQW1hem9uLmNvbTELMAkG
A1UECxMCQ00xLTArBgkqhkiG9w0BCQEWHmNvb2tpZS1tb25zdGVyLWNvcmVAYW1h
em9uLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAw949t4UZ+9n1K8vj
PVkyehoV2kWepDmJ8YKl358nkmNwrSAGkslVttdpZS+FrgIcb44UbfVbB4bOSq0J
qd39GYVRzSazCwr2tpibFvH87PyAX4VVUBDlCizJToEYsXkAKecs+IRqCDWG2ht/
pibO2+T5Wp8jaxUBvDmoHY3BSgkCAwEAAaOB7TCB6jAdBgNVHQ4EFgQUE5KUaWSM
Uml+6MZQia7DjmfjvLgwgboGA1UdIwSBsjCBr4AUE5KUaWSMUml+6MZQia7Djmfj
vLihgYukgYgwgYUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAw
DgYDVQQHEwdTZWF0dGxlMRMwEQYDVQQKEwpBbWF6b24uY29tMQswCQYDVQQLEwJD
TTEtMCsGCSqGSIb3DQEJARYeY29va2llLW1vbnN0ZXItY29yZUBhbWF6b24uY29t
ggkAs8c8NyOT3v0wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQCYZSRQ
zJNHXyKACrqMB5j1baUGf5NA0cZ/8s5iWeC9Gkwi7cXyiq9OrBaUtJBzAJTzfWbH
dfVaBL5FWuQsbkJWHe0mV+l4Kzl5bh/FSDSkhYR1duYRmdCXckQk6mAF6xG+1mpn
8YlJmbEhkDmBgJ8C8p0LCMNaO2xFLlNU0O+0ng==
-----END CERTIFICATE-----
"""
def test_ssl_args():
engine = sa.create_engine('redshift+psycopg2://test')
dialect = engine.dialect
url = engine.url
cargs, cparams = dialect.create_connect_args(url)
assert cargs == []
assert cparams.pop('host') == 'test'
assert cparams.pop('sslmode') == 'verify-full'
with open(cparams.pop('sslrootcert'), 'rb') as cert:
assert cert.read() == CERT
assert cparams == {}
| mit |
Jgarcia-IAS/SAT | openerp/addons-extra/odoo-pruebas/odoo-server/openerp/addons/base/ir/ir_ui_menu.py | 316 | 20548 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import operator
import re
import threading
import openerp.modules
from openerp.osv import fields, osv
from openerp import api, tools
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
MENU_ITEM_SEPARATOR = "/"
class ir_ui_menu(osv.osv):
_name = 'ir.ui.menu'
def __init__(self, *args, **kwargs):
cls = type(self)
# by design, self._menu_cache is specific to the database
cls._menu_cache_lock = threading.RLock()
cls._menu_cache = {}
super(ir_ui_menu, self).__init__(*args, **kwargs)
self.pool.get('ir.model.access').register_cache_clearing_method(self._name, 'clear_cache')
def clear_cache(self):
with self._menu_cache_lock:
# radical but this doesn't frequently happen
if self._menu_cache:
# Normally this is done by openerp.tools.ormcache
# but since we do not use it, set it by ourself.
self.pool._any_cache_cleared = True
self._menu_cache.clear()
self.load_menus_root._orig.clear_cache(self)
self.load_menus._orig.clear_cache(self)
@api.multi
@api.returns('self')
def _filter_visible_menus(self):
""" Filter `self` to only keep the menu items that should be visible in
the menu hierarchy of the current user.
Uses a cache for speeding up the computation.
"""
with self._menu_cache_lock:
groups = self.env.user.groups_id
# visibility is entirely based on the user's groups;
# self._menu_cache[key] gives the ids of all visible menus
key = frozenset(groups._ids)
if key in self._menu_cache:
visible = self.browse(self._menu_cache[key])
else:
# retrieve all menus, and determine which ones are visible
context = {'ir.ui.menu.full_list': True}
menus = self.with_context(context).search([])
# first discard all menus with groups the user does not have
menus = menus.filtered(
lambda menu: not menu.groups_id or menu.groups_id & groups)
# take apart menus that have an action
action_menus = menus.filtered('action')
folder_menus = menus - action_menus
visible = self.browse()
# process action menus, check whether their action is allowed
access = self.env['ir.model.access']
model_fname = {
'ir.actions.act_window': 'res_model',
'ir.actions.report.xml': 'model',
'ir.actions.wizard': 'model',
'ir.actions.server': 'model_id',
}
for menu in action_menus:
fname = model_fname.get(menu.action._name)
if not fname or not menu.action[fname] or \
access.check(menu.action[fname], 'read', False):
# make menu visible, and its folder ancestors, too
visible += menu
menu = menu.parent_id
while menu and menu in folder_menus and menu not in visible:
visible += menu
menu = menu.parent_id
self._menu_cache[key] = visible._ids
return self.filtered(lambda menu: menu in visible)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
ids = super(ir_ui_menu, self).search(cr, uid, args, offset=0,
limit=None, order=order, context=context, count=False)
if not ids:
if count:
return 0
return []
# menu filtering is done only on main menu tree, not other menu lists
if context.get('ir.ui.menu.full_list'):
result = ids
else:
result = self._filter_visible_menus(cr, uid, ids, context=context)
if offset:
result = result[long(offset):]
if limit:
result = result[:long(limit)]
if count:
return len(result)
return result
def name_get(self, cr, uid, ids, context=None):
res = []
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context is None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id:
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + MENU_ITEM_SEPARATOR
else:
parent_path = ''
return parent_path + elmt.name
def create(self, cr, uid, values, context=None):
self.clear_cache()
return super(ir_ui_menu, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self.clear_cache()
return super(ir_ui_menu, self).write(cr, uid, ids, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# Detach children and promote them to top-level, because it would be unwise to
# cascade-delete submenus blindly. We also can't use ondelete=set null because
# that is not supported when _parent_store is used (would silently corrupt it).
# TODO: ideally we should move them under a generic "Orphans" menu somewhere?
if isinstance(ids, (int, long)):
ids = [ids]
local_context = dict(context or {})
local_context['ir.ui.menu.full_list'] = True
direct_children_ids = self.search(cr, uid, [('parent_id', 'in', ids)], context=local_context)
if direct_children_ids:
self.write(cr, uid, direct_children_ids, {'parent_id': False})
result = super(ir_ui_menu, self).unlink(cr, uid, ids, context=context)
self.clear_cache()
return result
def copy(self, cr, uid, id, default=None, context=None):
ir_values_obj = self.pool.get('ir.values')
res = super(ir_ui_menu, self).copy(cr, uid, id, default=default, context=context)
datas=self.read(cr,uid,[res],['name'])[0]
rex=re.compile('\([0-9]+\)')
concat=rex.findall(datas['name'])
if concat:
next_num=int(concat[0])+1
datas['name']=rex.sub(('(%d)'%next_num),datas['name'])
else:
datas['name'] += '(1)'
self.write(cr,uid,[res],{'name':datas['name']})
ids = ir_values_obj.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('res_id', '=', id),
])
for iv in ir_values_obj.browse(cr, uid, ids):
ir_values_obj.copy(cr, uid, iv.id, default={'res_id': res},
context=context)
return res
def _action(self, cursor, user, ids, name, arg, context=None):
res = {}
ir_values_obj = self.pool.get('ir.values')
value_ids = ir_values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),
('key2', '=', 'tree_but_open'), ('res_id', 'in', ids)],
context=context)
values_action = {}
for value in ir_values_obj.browse(cursor, user, value_ids, context=context):
values_action[value.res_id] = value.value
for menu_id in ids:
res[menu_id] = values_action.get(menu_id, False)
return res
def _action_inv(self, cursor, user, menu_id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
ir_values_obj = self.pool.get('ir.values')
values_ids = ir_values_obj.search(cursor, user, [
('model', '=', self._name), ('key', '=', 'action'),
('key2', '=', 'tree_but_open'), ('res_id', '=', menu_id)],
context=context)
if value and values_ids:
ir_values_obj.write(cursor, user, values_ids, {'value': value}, context=ctx)
elif value:
# no values_ids, create binding
ir_values_obj.create(cursor, user, {
'name': 'Menuitem',
'model': self._name,
'value': value,
'key': 'action',
'key2': 'tree_but_open',
'res_id': menu_id,
}, context=ctx)
elif values_ids:
# value is False, remove existing binding
ir_values_obj.unlink(cursor, user, values_ids, context=ctx)
def _get_icon_pict(self, cr, uid, ids, name, args, context):
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = ('stock', (m.icon,'ICON_SIZE_MENU'))
return res
def onchange_icon(self, cr, uid, ids, icon):
if not icon:
return {}
return {'type': {'icon_pict': 'picture'}, 'value': {'icon_pict': ('stock', (icon,'ICON_SIZE_MENU'))}}
def read_image(self, path):
if not path:
return False
path_info = path.split(',')
icon_path = openerp.modules.get_module_resource(path_info[0],path_info[1])
icon_image = False
if icon_path:
try:
icon_file = tools.file_open(icon_path,'rb')
icon_image = base64.encodestring(icon_file.read())
finally:
icon_file.close()
return icon_image
def _get_image_icon(self, cr, uid, ids, names, args, context=None):
res = {}
for menu in self.browse(cr, uid, ids, context=context):
res[menu.id] = r = {}
for fn in names:
fn_src = fn[:-5] # remove _data
r[fn] = self.read_image(menu[fn_src])
return res
def _get_needaction_enabled(self, cr, uid, ids, field_names, args, context=None):
""" needaction_enabled: tell whether the menu has a related action
that uses the needaction mechanism. """
res = dict.fromkeys(ids, False)
for menu in self.browse(cr, uid, ids, context=context):
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
if menu.action.res_model in self.pool and self.pool[menu.action.res_model]._needaction:
res[menu.id] = True
return res
def get_needaction_data(self, cr, uid, ids, context=None):
""" Return for each menu entry of ids :
- if it uses the needaction mechanism (needaction_enabled)
- the needaction counter of the related action, taking into account
the action domain
"""
if context is None:
context = {}
res = {}
menu_ids = set()
for menu in self.browse(cr, uid, ids, context=context):
menu_ids.add(menu.id)
ctx = None
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.context:
try:
# use magical UnquoteEvalContext to ignore undefined client-side variables such as `active_id`
eval_ctx = tools.UnquoteEvalContext(**context)
ctx = eval(menu.action.context, locals_dict=eval_ctx, nocopy=True) or None
except Exception:
# if the eval still fails for some reason, we'll simply skip this menu
pass
menu_ref = ctx and ctx.get('needaction_menu_ref')
if menu_ref:
if not isinstance(menu_ref, list):
menu_ref = [menu_ref]
model_data_obj = self.pool.get('ir.model.data')
for menu_data in menu_ref:
try:
model, id = model_data_obj.get_object_reference(cr, uid, menu_data.split('.')[0], menu_data.split('.')[1])
if (model == 'ir.ui.menu'):
menu_ids.add(id)
except Exception:
pass
menu_ids = list(menu_ids)
for menu in self.browse(cr, uid, menu_ids, context=context):
res[menu.id] = {
'needaction_enabled': False,
'needaction_counter': False,
}
if menu.action and menu.action.type in ('ir.actions.act_window', 'ir.actions.client') and menu.action.res_model:
if menu.action.res_model in self.pool:
obj = self.pool[menu.action.res_model]
if obj._needaction:
if menu.action.type == 'ir.actions.act_window':
dom = menu.action.domain and eval(menu.action.domain, {'uid': uid}) or []
else:
dom = eval(menu.action.params_store or '{}', {'uid': uid}).get('domain')
res[menu.id]['needaction_enabled'] = obj._needaction
res[menu.id]['needaction_counter'] = obj._needaction_count(cr, uid, dom, context=context)
return res
def get_user_roots(self, cr, uid, context=None):
""" Return all root menu ids visible for the user.
:return: the root menu ids
:rtype: list(int)
"""
menu_domain = [('parent_id', '=', False)]
return self.search(cr, uid, menu_domain, context=context)
@api.cr_uid_context
@tools.ormcache_context(accepted_keys=('lang',))
def load_menus_root(self, cr, uid, context=None):
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(cr, uid, context=context)
menu_roots = self.read(cr, uid, menu_root_ids, fields, context=context) if menu_root_ids else []
return {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
@api.cr_uid_context
@tools.ormcache_context(accepted_keys=('lang',))
def load_menus(self, cr, uid, context=None):
""" Loads all menu items (all applications and their sub-menus).
:return: the menu root
:rtype: dict('children': menu_nodes)
"""
fields = ['name', 'sequence', 'parent_id', 'action']
menu_root_ids = self.get_user_roots(cr, uid, context=context)
menu_roots = self.read(cr, uid, menu_root_ids, fields, context=context) if menu_root_ids else []
menu_root = {
'id': False,
'name': 'root',
'parent_id': [-1, ''],
'children': menu_roots,
'all_menu_ids': menu_root_ids,
}
if not menu_roots:
return menu_root
# menus are loaded fully unlike a regular tree view, cause there are a
# limited number of items (752 when all 6.1 addons are installed)
menu_ids = self.search(cr, uid, [('id', 'child_of', menu_root_ids)], 0, False, False, context=context)
menu_items = self.read(cr, uid, menu_ids, fields, context=context)
# adds roots at the end of the sequence, so that they will overwrite
# equivalent menu items from full menu read when put into id:item
# mapping, resulting in children being correctly set on the roots.
menu_items.extend(menu_roots)
menu_root['all_menu_ids'] = menu_ids # includes menu_root_ids!
# make a tree using parent_id
menu_items_map = dict(
(menu_item["id"], menu_item) for menu_item in menu_items)
for menu_item in menu_items:
if menu_item['parent_id']:
parent = menu_item['parent_id'][0]
else:
parent = False
if parent in menu_items_map:
menu_items_map[parent].setdefault(
'children', []).append(menu_item)
# sort by sequence a tree using parent_id
for menu_item in menu_items:
menu_item.setdefault('children', []).sort(
key=operator.itemgetter('sequence'))
return menu_root
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'sequence': fields.integer('Sequence'),
'child_id': fields.one2many('ir.ui.menu', 'parent_id', 'Child IDs'),
'parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', select=True, ondelete="restrict"),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
'groups_id': fields.many2many('res.groups', 'ir_ui_menu_group_rel',
'menu_id', 'gid', 'Groups', help="If you have groups, the visibility of this menu will be based on these groups. "\
"If this field is empty, Odoo will compute visibility based on the related object's read access."),
'complete_name': fields.function(_get_full_name,
string='Full Path', type='char', size=128),
'icon': fields.selection(tools.icons, 'Icon', size=64),
'icon_pict': fields.function(_get_icon_pict, type='char', size=32),
'web_icon': fields.char('Web Icon File'),
'web_icon_hover': fields.char('Web Icon File (hover)'),
'web_icon_data': fields.function(_get_image_icon, string='Web Icon Image', type='binary', readonly=True, store=True, multi='icon'),
'web_icon_hover_data': fields.function(_get_image_icon, string='Web Icon Image (hover)', type='binary', readonly=True, store=True, multi='icon'),
'needaction_enabled': fields.function(_get_needaction_enabled,
type='boolean',
store=True,
string='Target model uses the need action mechanism',
help='If the menu entry action is an act_window action, and if this action is related to a model that uses the need_action mechanism, this field is set to true. Otherwise, it is false.'),
'action': fields.function(_action, fnct_inv=_action_inv,
type='reference', string='Action', size=21,
selection=[
('ir.actions.report.xml', 'ir.actions.report.xml'),
('ir.actions.act_window', 'ir.actions.act_window'),
('ir.actions.wizard', 'ir.actions.wizard'),
('ir.actions.act_url', 'ir.actions.act_url'),
('ir.actions.server', 'ir.actions.server'),
('ir.actions.client', 'ir.actions.client'),
]),
}
def _rec_message(self, cr, uid, ids, context=None):
return _('Error ! You can not create recursive Menu.')
_constraints = [
(osv.osv._check_recursion, _rec_message, ['parent_id'])
]
_defaults = {
'icon': 'STOCK_OPEN',
'icon_pict': ('stock', ('STOCK_OPEN', 'ICON_SIZE_MENU')),
'sequence': 10,
}
_order = "sequence,id"
_parent_store = True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aifil/odoo | openerp/addons/test_new_api/tests/test_related.py | 39 | 8025 | #
# test cases for related fields, etc.
#
import unittest
from openerp.osv import fields
from openerp.tests import common
class TestRelatedField(common.TransactionCase):
def setUp(self):
super(TestRelatedField, self).setUp()
self.alpha = self.registry('test_new_api.alpha')
self.bravo = self.registry('test_new_api.bravo')
self.alpha_id = self.alpha.create(self.cr, self.uid, {'name': 'Alpha'})
self.alpha.create(self.cr, self.uid, {'name': 'Beta'})
self.bravo.create(self.cr, self.uid, {'alpha_id': self.alpha_id})
def test_0_related(self):
""" test an usual related field """
# find bravos that satisfy [('alpha_id.name', '=', 'Alpha')]
alpha_ids = self.alpha.search(self.cr, self.uid, [('name', '=', 'Alpha')])
bravo_ids1 = self.bravo.search(self.cr, self.uid, [('alpha_id', 'in', alpha_ids)])
bravo_ids2 = self.bravo.search(self.cr, self.uid, [('alpha_name', '=', 'Alpha')])
self.assertEqual(bravo_ids1, bravo_ids2)
def do_test_company_field(self, field):
# get a bravo with a non-null alpha_id
ids = self.bravo.search(self.cr, self.uid, [('alpha_id', '!=', False)])
bravo = self.bravo.browse(self.cr, self.uid, ids[0])
# check reading related field
self.assertEqual(bravo[field], bravo.alpha_id)
# check that search on related field is equivalent to original field
ids1 = self.bravo.search(self.cr, self.uid, [('alpha_id', '=', bravo.alpha_id.id)])
ids2 = self.bravo.search(self.cr, self.uid, [(field, '=', bravo.alpha_id.id)])
self.assertEqual(ids1, ids2)
def test_1_single_related(self):
""" test a related field with a single indirection like fields.related('foo') """
self.do_test_company_field('related_alpha_id')
def test_2_double_related(self):
""" test a related field referring to a related field """
self.do_test_company_field('related_related_alpha_id')
def test_3_read_write(self):
""" write on a related field """
# find an alpha with a non-null name
alpha = self.alpha.browse(self.cr, self.uid, self.alpha_id)
self.assertTrue(alpha.name)
# find partners that satisfy [('alpha_id.name', '=', alpha.name)]
bravo_ids = self.bravo.search(self.cr, self.uid, [('alpha_name', '=', alpha.name)])
self.assertTrue(bravo_ids)
bravo = self.bravo.browse(self.cr, self.uid, bravo_ids[0])
# change the name of alpha through the related field, and check result
NAME = 'Monthy Pythons'
bravo.write({'alpha_name': NAME})
self.assertEqual(bravo.alpha_id.name, NAME)
self.assertEqual(bravo.alpha_name, NAME)
class TestPropertyField(common.TransactionCase):
def setUp(self):
super(TestPropertyField, self).setUp()
self.user = self.registry('res.users')
self.partner = self.registry('res.partner')
self.company = self.registry('res.company')
self.country = self.registry('res.country')
self.property = self.registry('ir.property')
self.imd = self.registry('ir.model.data')
@unittest.skip("invalid monkey-patching")
def test_1_property_multicompany(self):
cr, uid = self.cr, self.uid
parent_company_id = self.imd.get_object_reference(cr, uid, 'base', 'main_company')[1]
country_be = self.imd.get_object_reference(cr, uid, 'base', 'be')[1]
country_fr = self.imd.get_object_reference(cr, uid, 'base', 'fr')[1]
group_partner_manager = self.imd.get_object_reference(cr, uid, 'base', 'group_partner_manager')[1]
group_multi_company = self.imd.get_object_reference(cr, uid, 'base', 'group_multi_company')[1]
sub_company = self.company.create(cr, uid, {'name': 'MegaCorp', 'parent_id': parent_company_id})
alice = self.user.create(cr, uid, {'name': 'Alice',
'login':'alice',
'email':'[email protected]',
'company_id':parent_company_id,
'company_ids':[(6, 0, [parent_company_id, sub_company])],
'country_id':country_be,
'groups_id': [(6, 0, [group_partner_manager, group_multi_company])]
})
bob = self.user.create(cr, uid, {'name': 'Bob',
'login':'bob',
'email':'[email protected]',
'company_id':sub_company,
'company_ids':[(6, 0, [parent_company_id, sub_company])],
'country_id':country_fr,
'groups_id': [(6, 0, [group_partner_manager, group_multi_company])]
})
self.partner._columns = dict(self.partner._columns)
self.partner._columns.update({
'property_country': fields.property(type='many2one', relation="res.country", string="Country by company"),
})
self.partner._field_create(cr)
partner_id = self.partner.create(cr, alice, {
'name': 'An International Partner',
'email': '[email protected]',
'company_id': parent_company_id,
})
self.partner.write(cr, bob, [partner_id], {'property_country': country_fr})
self.assertEqual(self.partner.browse(cr, bob, partner_id).property_country.id, country_fr, "Bob does not see the value he has set on the property field")
self.partner.write(cr, alice, [partner_id], {'property_country': country_be})
self.assertEqual(self.partner.browse(cr, alice, partner_id).property_country.id, country_be, "Alice does not see the value he has set on the property field")
self.assertEqual(self.partner.browse(cr, bob, partner_id).property_country.id, country_fr, "Changes made by Alice have overwritten Bob's value")
class TestHtmlField(common.TransactionCase):
def setUp(self):
super(TestHtmlField, self).setUp()
self.partner = self.registry('res.partner')
def test_00_sanitize(self):
cr, uid, context = self.cr, self.uid, {}
old_columns = self.partner._columns
self.partner._columns = dict(old_columns)
self.partner._columns.update({
'comment': fields.html('Secure Html', sanitize=False),
})
some_ugly_html = """<p>Oops this should maybe be sanitized
% if object.some_field and not object.oriented:
<table>
% if object.other_field:
<tr style="border: 10px solid black;">
${object.mako_thing}
<td>
</tr>
% endif
<tr>
%if object.dummy_field:
<p>Youpie</p>
%endif"""
pid = self.partner.create(cr, uid, {
'name': 'Raoul Poilvache',
'comment': some_ugly_html,
}, context=context)
partner = self.partner.browse(cr, uid, pid, context=context)
self.assertEqual(partner.comment, some_ugly_html, 'Error in HTML field: content was sanitized but field has sanitize=False')
self.partner._columns.update({
'comment': fields.html('Unsecure Html', sanitize=True),
})
self.partner.write(cr, uid, [pid], {
'comment': some_ugly_html,
}, context=context)
partner = self.partner.browse(cr, uid, pid, context=context)
# sanitize should have closed tags left open in the original html
self.assertIn('</table>', partner.comment, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True')
self.assertIn('</td>', partner.comment, 'Error in HTML field: content does not seem to have been sanitized despise sanitize=True')
self.assertIn('<tr style="', partner.comment, 'Style attr should not have been stripped')
self.partner._columns['comment'] = fields.html('Stripped Html', sanitize=True, strip_style=True)
self.partner.write(cr, uid, [pid], {'comment': some_ugly_html}, context=context)
partner = self.partner.browse(cr, uid, pid, context=context)
self.assertNotIn('<tr style="', partner.comment, 'Style attr should have been stripped')
self.partner._columns = old_columns
| gpl-3.0 |
JonathanStein/odoo | addons/sale_margin/__openerp__.py | 261 | 1592 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Margins in Sales Orders',
'version':'1.0',
'category' : 'Sales Management',
'description': """
This module adds the 'Margin' on sales order.
=============================================
This gives the profitability by calculating the difference between the Unit
Price and Cost Price.
""",
'author':'OpenERP SA',
'depends':['sale'],
'demo':['sale_margin_demo.xml'],
'test': ['test/sale_margin.yml'],
'data':['security/ir.model.access.csv','sale_margin_view.xml'],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gomsec/pysecdump | wpc/users.py | 6 | 1807 | from wpc.user import user
import win32net
import wpc.conf
class users():
def __init__(self):
self.users = []
def get_filtered(self, ):
if self.users == []:
#try:
level = 1
resume = 0
while True:
userlist, total, resume = win32net.NetUserEnum(wpc.conf.remote_server, level, 0, resume, 999999)
#print u
for u in userlist:
# self.users.append(user['name'])
#try:
sid, name, type = wpc.conf.cache.LookupAccountName(wpc.conf.remote_server, u['name'])
self.users.append(user(sid))
#except:
# print "[E] failed to lookup sid of %s" % user['name']
if resume == 0:
break
return self.users
def get_all(self):
if self.users == []:
#try:
level = 0
resume = 0
while True:
userlist, total, resume = win32net.NetUserEnum(wpc.conf.remote_server, level, 0, resume, 999999)
#print u
for u in userlist:
# self.users.append(user['name'])
#try:
sid, name, type = wpc.conf.cache.LookupAccountName(wpc.conf.remote_server, u['name'])
self.users.append(user(sid))
#except:
# print "[E] failed to lookup sid of %s" % user['name']
if resume == 0:
break
#except:
# print "[E] NetUserEnum failed"
return self.users
| gpl-3.0 |
yining0417/zookeeper | build/zookeeper-3.4.6/contrib/zkpython/src/test/zktestbase.py | 98 | 3572 | #!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest, threading, zookeeper
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class TestBase(unittest.TestCase):
SERVER_PORT = 22182
def __init__(self,methodName='runTest'):
unittest.TestCase.__init__(self,methodName)
self.host = "localhost:%d" % self.SERVER_PORT
self.connected = False
self.handle = -1
logdir = os.environ.get("ZKPY_LOG_DIR")
logfile = os.path.join(logdir, self.__class__.__name__ + ".log")
try:
f = open(logfile,"w")
zookeeper.set_log_stream(f)
except IOError:
print("Couldn't open " + logfile + " for writing")
def setUp(self):
self.callback_flag = False
self.cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
self.cv.acquire()
self.connected = True
self.cv.notify()
self.cv.release()
self.cv.acquire()
self.handle = zookeeper.init(self.host, connection_watcher)
self.cv.wait(15.0)
self.cv.release()
if not self.connected:
raise Exception("Couldn't connect to host -", self.host)
def newConnection(self):
cv = threading.Condition()
self.pending_connection = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.pending_connection = True
cv.notify()
cv.release()
cv.acquire()
handle = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
if not self.pending_connection:
raise Exception("Couldn't connect to host -", self.host)
return handle
def ensureDeleted(self,path):
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle), "Not connected!")
try:
self.assertEqual(zookeeper.OK, zookeeper.delete(self.handle, path))
except zookeeper.NoNodeException:
pass
def ensureCreated(self,path,data="",flags=zookeeper.EPHEMERAL):
"""
It's possible not to get the flags you want here if the node already exists
"""
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle), "Not connected!")
try:
self.assertEqual(path, zookeeper.create(self.handle, path, data, [ZOO_OPEN_ACL_UNSAFE], flags))
except zookeeper.NodeExistsException:
pass
def tearDown(self):
if self.connected:
zookeeper.close(self.handle)
def all(self, iterable):
for element in iterable:
if not element:
return False
return True
| apache-2.0 |
Gabriel439/pip | pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/theano/scalar/tests/test_basic.py | 4 | 16173 | """
These routines are not well-tested. They are also old.
OB says that it is not important to test them well because Scalar Ops
are rarely used by themselves, instead they are the basis for Tensor Ops
(which should be checked thoroughly). Moreover, Scalar will be changed
to use numpy's scalar routines.
If you do want to rewrite these tests, bear in mind:
* You don't need to use Composite.
* FunctionGraph and DualLinker are old, use compile.function instead.
"""
import unittest
import numpy as np
import theano
from theano.gof import FunctionGraph
from theano import gof
from theano.tests import unittest_tools as utt
from theano.scalar.basic import (floats, float32, float64,
ints, int8, int32, complex64,
ComplexError, IntDiv, TrueDiv,
Composite, add, div_proxy,
and_, eq, neq, invert, mul, Scalar, InRange)
from theano.scalar.basic import (
true_div, inv, log, log2, log10, log1p, exp, exp2, expm1, sqrt, deg2rad,
rad2deg, cos, arccos, sin, arcsin, tan, arctan, arctan2, cosh, arccosh,
sinh, arcsinh, tanh, arctanh)
def inputs():
return floats('xyz')
class test_ScalarOps(unittest.TestCase):
def test_straightforward(self):
x, y, z = inputs()
e = mul(add(x, y), div_proxy(x, y))
g = FunctionGraph([x, y], [e])
fn = gof.DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 1.5
# This test is moved to theano.tensor.tests.test_basic.py:test_mod
# We move it their as under ubuntu the c_extract call of theano.scalar
# call PyInt_check and it fail under some os. If work in other case.
# As we use theano.scalar normally, but we use theano.tensor.scalar
# that is not important. Also this make the theano fct fail at call time
# so this is not a silent bug.
# --> This is why it is purposedly named 'tes_mod' instead of 'test_mod'.
def tes_mod(self):
"""
We add this test as not all language and C implementation give the same
signe to the result. This check that the c_code of `Mod` is implemented
as Python. That is what we want.
"""
x, y = ints('xy')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x%y])).make_function()
for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),
(1, 2), (-1, 2), (1, -2), (-1, -2),
(5, 3), (-5, 3), (5, -3), (-5, -3)
):
self.assertTrue(fn(a, b) == a%b, (a,))
class test_composite(unittest.TestCase):
def test_straightforward(self):
x, y, z = inputs()
e = mul(add(x, y), div_proxy(x, y))
C = Composite([x, y], [e])
c = C.make_node(x, y)
# print c.c_code(['x', 'y'], ['z'], dict(id = 0))
g = FunctionGraph([x, y], [c.out])
fn = gof.DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 1.5
def test_flatten(self):
# Test that we flatten multiple Composite.
x, y, z = inputs()
C = Composite([x, y], [x + y])
CC = Composite([x, y], [C(x * y, y)])
assert not isinstance(CC.outputs[0].owner.op, Composite)
# Test with multiple outputs
CC = Composite([x, y, z], [C(x * y, y), C(x * z, y)])
# We don't flatten that case.
assert isinstance(CC.outputs[0].owner.op, Composite)
def test_with_constants(self):
x, y, z = inputs()
e = mul(add(70.0, y), div_proxy(x, y))
C = Composite([x, y], [e])
c = C.make_node(x, y)
assert "70.0" in c.op.c_code(c, 'dummy', ['x', 'y'], ['z'], dict(id=0))
# print c.c_code(['x', 'y'], ['z'], dict(id = 0))
g = FunctionGraph([x, y], [c.out])
fn = gof.DualLinker().accept(g).make_function()
assert fn(1.0, 2.0) == 36.0
def test_many_outputs(self):
x, y, z = inputs()
e0 = x + y + z
e1 = x + y * z
e2 = x / y
C = Composite([x, y, z], [e0, e1, e2])
c = C.make_node(x, y, z)
# print c.c_code(['x', 'y', 'z'], ['out0', 'out1', 'out2'], dict(id = 0))
g = FunctionGraph([x, y, z], c.outputs)
fn = gof.DualLinker().accept(g).make_function()
assert fn(1.0, 2.0, 3.0) == [6.0, 7.0, 0.5]
def test_composite_printing(self):
x, y, z = floats('xyz')
e0 = x + y + z
e1 = x + y * z
e2 = x / y
e3 = x // 5
e4 = -x
e5 = x - y
e6 = x ** y + (-z)
e7 = x % 3
C = Composite([x, y, z], [e0, e1, e2, e3, e4, e5, e6, e7])
c = C.make_node(x, y, z)
g = FunctionGraph([x, y, z], c.outputs)
fn = gof.DualLinker().accept(g).make_function()
assert str(g) == ('[*1 -> Composite{((i0 + i1) + i2),'
' (i0 + (i1 * i2)), (i0 / i1), '
'(i0 // Constant{5}), '
'(-i0), (i0 - i1), ((i0 ** i1) + (-i2)),'
' (i0 % Constant{3})}(x, y, z), '
'*1::1, *1::2, *1::3, *1::4, *1::5, *1::6, *1::7]')
def test_make_node_continue_graph(self):
# This is a test for a bug (now fixed) that disabled the
# local_gpu_elemwise_0 optimization and printed an
# optimization warning on the terminal.
# We test that Composite.make_node accept as inputs Variable
# some that represent existing computation.
si0 = theano.scalar.int8()
si1 = theano.scalar.int8()
si2 = theano.scalar.float32()
sout = (si0 * si1) / si2
sop = theano.scalar.Composite([si0, si1, si2],
[sout])
si0 = theano.scalar.int8()
si1 = theano.scalar.int8()
si2 = theano.scalar.float32()
si3 = theano.scalar.float32()
sop.make_node(si0 * si3, si1, si2)
class test_logical(unittest.TestCase):
def test_gt(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x > y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a > b))
def test_lt(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x < y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a < b))
def test_le(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x <= y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a <= b))
def test_ge(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x >= y])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a >= b))
def test_eq(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [eq(x, y)])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a == b))
def test_neq(self):
x, y, z = inputs()
fn = gof.DualLinker().accept(FunctionGraph([x, y], [neq(x, y)])).make_function()
for a, b in ((3., 9), (3, 0.9), (3, 3)):
self.assertTrue(fn(a, b) == (a != b))
def test_or(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x|y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a|b), (a, b))
def test_xor(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x^y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a ^ b), (a, b))
def test_and(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [and_(x, y)])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a & b), (a, b))
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [x & y])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == (a & b), (a, b))
def test_not(self):
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [invert(x)])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == ~a, (a,))
x, y, z = ints('xyz')
fn = gof.DualLinker().accept(FunctionGraph([x, y], [~x])).make_function()
for a, b in ((0, 1), (0, 0), (1, 0), (1, 1)):
self.assertTrue(fn(a, b) == ~a, (a,))
# This class does not inherit from unittest.TestCase, because it would
# interfere with the "yield" mechanism that automatically generates test, see
# http://stackoverflow.com/questions/6689537/nose-test-generators-inside-class
# Therefore, it needs to be named "test_..." or "Test_...", so nose can pick
# it up by name, otherwise the tests would not be executed.
class test_upgrade_to_float(object):
# Test for Ops whose output has to be floating point, even when all
# inputs are ints.
# In particular, when the inputs are int8, the output should be
# at least float32, not float16.
unary_ops_vals = [
(inv, list(range(-127, 0)) + list(range(1, 127))),
(sqrt, list(range(0, 128))),
(log, list(range(1, 128))),
(log2, list(range(1, 128))),
(log10, list(range(1, 128))),
(log1p, list(range(0, 128))),
(exp, list(range(-127, 89))),
(exp2, list(range(-127, 89))),
(expm1, list(range(-127, 89))),
(deg2rad, list(range(-127, 128))),
(rad2deg, list(range(-127, 128))),
(cos, list(range(-127, 128))),
(arccos, list(range(-1, 2))),
(cosh, list(range(-89, 90))),
(arccosh, list(range(1, 128))),
(sin, list(range(-127, 128))),
(arcsin, list(range(-1, 2))),
(sinh, list(range(-89, 90))),
(arcsinh, list(range(-127, 128))),
(tan, list(range(-3, 4))),
(arctan, list(range(-127, 128))),
(tanh, list(range(-127, 128))),
(arctanh, [0])]
binary_ops_vals = [
(arctan2, list(range(-127, 128)), list(range(-127, 128)))]
@staticmethod
def _test_unary(unary_op, x_range):
xi = int8('xi')
xf = float32('xf')
ei = unary_op(xi)
fi = theano.function([xi], ei)
ef = unary_op(xf)
ff = theano.function([xf], ef)
for x_val in x_range:
outi = fi(x_val)
outf = ff(x_val)
assert outi.dtype == outf.dtype, 'incorrect dtype'
assert np.allclose(outi, outf), 'insufficient precision'
@staticmethod
def _test_binary(binary_op, x_range, y_range):
xi = int8('xi')
yi = int8('yi')
xf = float32('xf')
yf = float32('yf')
ei = binary_op(xi, yi)
fi = theano.function([xi, yi], ei)
ef = binary_op(xf, yf)
ff = theano.function([xf, yf], ef)
for x_val in x_range:
for y_val in y_range:
outi = fi(x_val, y_val)
outf = ff(x_val, y_val)
assert outi.dtype == outf.dtype, 'incorrect dtype'
assert np.allclose(outi, outf), 'insufficient precision'
def test_true_div(self):
# true_div's upcast policy is not exactly "upgrade_to_float",
# so the test is a little bit different
x_range = list(range(-127, 128))
y_range = list(range(-127, 0)) + list(range(1, 127))
xi = int8('xi')
yi = int8('yi')
xf = Scalar(theano.config.floatX)('xf')
yf = Scalar(theano.config.floatX)('yf')
ei = true_div(xi, yi)
fi = theano.function([xi, yi], ei)
ef = true_div(xf, yf)
ff = theano.function([xf, yf], ef)
for x_val in x_range:
for y_val in y_range:
outi = fi(x_val, y_val)
outf = ff(x_val, y_val)
assert outi.dtype == outf.dtype, 'incorrect dtype'
assert np.allclose(outi, outf), 'insufficient precision'
def test_unary(self):
# Automatically define all individual unary tests
for unary_op, x_range in self.unary_ops_vals:
test_name = 'test_%s' % unary_op.name
# Make a lambda function so we can name the test
test = lambda: self._test_unary(unary_op, x_range)
test.description = test_name
yield test
def test_binary(self):
# Automatically define all individual binary tests
for binary_op, x_range, y_range in self.binary_ops_vals:
test_name = 'test_%s' % binary_op.name
# Make a lambda function so we can name the test
test = lambda: self._test_binary(binary_op, x_range, y_range)
test.description = test_name
yield test
class test_complex_mod(unittest.TestCase):
"""Make sure % fails on complex numbers."""
def test_fail(self):
x = complex64()
y = int32()
try:
x % y
assert False
except ComplexError:
pass
class test_div(unittest.TestCase):
def test_0(self):
a = int8()
b = int32()
c = complex64()
d = float64()
f = float32()
#print (a//b).owner.op
assert isinstance((a//b).owner.op, IntDiv)
assert isinstance((b//a).owner.op, IntDiv)
assert isinstance((b/d).owner.op, TrueDiv)
assert isinstance((b/f).owner.op, TrueDiv)
assert isinstance((f/a).owner.op, TrueDiv)
assert isinstance((d/b).owner.op, TrueDiv)
assert isinstance((d/f).owner.op, TrueDiv)
assert isinstance((f/c).owner.op, TrueDiv)
assert isinstance((a/c).owner.op, TrueDiv)
def test_grad_gt():
x = float32(name='x')
y = float32(name='y')
z = x > y
g = theano.gradient.grad(z, y)
assert g.eval({ y : 1. }) == 0.
def test_grad_switch():
# This is a code snippet from the mailing list
# It caused an assert to be raised due to the
# switch op's grad method not handling integer
# inputs correctly
x = theano.tensor.matrix()
c = theano.tensor.matrix()
s = theano.tensor.switch(c, x, 0)
l = s.sum()
theano.gradient.grad(l, x)
def test_grad_identity():
# Check that the grad method of Identity correctly handles int dytpes
x = theano.tensor.imatrix('x')
# tensor_copy is Elemwise{Identity}
y = theano.tensor.tensor_copy(x)
l = y.sum(dtype=theano.config.floatX)
theano.gradient.grad(l, x)
def test_grad_inrange():
for bound_definition in [(True, True), (False, False)]:
# Instantiate op, and then take the gradient
op = InRange(*bound_definition)
x = theano.tensor.fscalar('x')
low = theano.tensor.fscalar('low')
high = theano.tensor.fscalar('high')
out = op(x, low, high)
gx, glow, ghigh = theano.tensor.grad(out, [x, low, high])
# We look if the gradient are equal to zero
# if x is lower than the lower bound,
# equal to the lower bound, between lower and higher bound,
# equal to the higher bound and higher than the higher
# bound.
# Mathematically we should have an infinite gradient when
# x is equal to the lower or higher bound but in that case
# Theano defines the gradient to be zero for stability.
f = theano.function([x, low, high], [gx, glow, ghigh])
utt.assert_allclose(f(0, 1, 5), [0, 0, 0])
utt.assert_allclose(f(1, 1, 5), [0, 0, 0])
utt.assert_allclose(f(2, 1, 5), [0, 0, 0])
utt.assert_allclose(f(5, 1, 5), [0, 0, 0])
utt.assert_allclose(f(7, 1, 5), [0, 0, 0])
# Testing of Composite is done in tensor/tests/test_opt.py
# in test_fusion, TestCompositeCodegen
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
NL66278/odoo | addons/account_payment/account_invoice.py | 382 | 2377 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import osv
class Invoice(osv.osv):
_inherit = 'account.invoice'
# Forbid to cancel an invoice if the related move lines have already been
# used in a payment order. The risk is that importing the payment line
# in the bank statement will result in a crash cause no more move will
# be found in the payment line
def action_cancel(self, cr, uid, ids, context=None):
payment_line_obj = self.pool.get('payment.line')
for inv in self.browse(cr, uid, ids, context=context):
pl_line_ids = []
if inv.move_id and inv.move_id.line_id:
inv_mv_lines = [x.id for x in inv.move_id.line_id]
pl_line_ids = payment_line_obj.search(cr, uid, [('move_line_id','in',inv_mv_lines)], context=context)
if pl_line_ids:
pay_line = payment_line_obj.browse(cr, uid, pl_line_ids, context=context)
payment_order_name = ','.join(map(lambda x: x.order_id.reference, pay_line))
raise osv.except_osv(_('Error!'), _("You cannot cancel an invoice which has already been imported in a payment order. Remove it from the following payment order : %s."%(payment_order_name)))
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hsfzxjy/wisecitymbc | site_packages/django_filters/filterset.py | 2 | 13862 | from __future__ import absolute_import
from __future__ import unicode_literals
from copy import deepcopy
from django import forms
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.related import RelatedObject
from django.utils import six
from django.utils.datastructures import SortedDict
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.db.models.sql.constants import LOOKUP_SEP # noqa
from .filters import (Filter, CharFilter, BooleanFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter)
ORDER_BY_FIELD = 'o'
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return SortedDict(filters)
def get_model_field(model, f):
parts = f.split(LOOKUP_SEP)
opts = model._meta
for name in parts[:-1]:
try:
rel = opts.get_field_by_name(name)[0]
except FieldDoesNotExist:
return None
if isinstance(rel, RelatedObject):
model = rel.model
opts = rel.opts
else:
model = rel.rel.to
opts = model._meta
try:
rel, model, direct, m2m = opts.get_field_by_name(parts[-1])
except FieldDoesNotExist:
return None
return rel
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = SortedDict()
opts = model._meta
if fields is None:
fields = [f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField)]
for f in fields:
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
if field is None:
field_dict[f] = None
continue
if isinstance(field, RelatedObject):
filter_ = filter_for_reverse_field(field, f)
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
class FilterSetOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
# We are defining FilterSet itself here
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
if opts.model:
filters = filters_for_model(opts.model, opts.fields, opts.exclude,
new_class.filter_for_field,
new_class.filter_for_reverse_field)
filters.update(declared_filters)
else:
filters = declared_filters
if None in filters.values():
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet")
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.IPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
strict = True
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
def __iter__(self):
for obj in self.qs:
yield obj
def __len__(self):
return len(self.qs)
def __getitem__(self, key):
return self.qs[key]
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.strict and self.is_bound and not valid:
self._qs = self.queryset.none()
return self._qs
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
# for invalid values either:
# strictly "apply" filter yielding no results and get outta here
if self.strict:
self._qs = self.queryset.none()
return self._qs
else: # or ignore this filter altogether
pass
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
if ordered_value in EMPTY_VALUES and self.strict:
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
def count(self):
return self.qs.count()
@property
def form(self):
if not hasattr(self, '_form'):
fields = SortedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = [(f, _('%s (descending)' % capfirst(f[1:])) if f[0] == '-' else capfirst(f))
for f in self._meta.order_by]
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(fltr.name or f, fltr.label or capfirst(f)),
("-%s" % (fltr.name or f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label="Ordering", required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
return [order_choice]
@classmethod
def filter_for_field(cls, f, name):
filter_for_field = dict(FILTER_FOR_DBFIELD_DEFAULTS)
filter_for_field.update(cls.filter_overrides)
default = {
'name': name,
'label': capfirst(f.verbose_name)
}
if f.choices:
default['choices'] = f.choices
return ChoiceFilter(**default)
data = filter_for_field.get(f.__class__)
if data is None:
# could be a derived field, inspect parents
for class_ in f.__class__.mro():
# skip if class_ is models.Field or object
# 1st item in mro() is original class
if class_ in (f.__class__, models.Field, object):
continue
data = filter_for_field.get(class_)
if data:
break
if data is None:
return
filter_class = data.get('filter_class')
default.update(data.get('extra', lambda f: {})(f))
if filter_class is not None:
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = f.field.rel
queryset = f.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| gpl-2.0 |
aquilesIIIMB/Analisis_Espectral_LFP | analysis-tools/OpenEphys.py | 1 | 27029 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 3 15:18:38 2014
@author: Dan Denman and Josh Siegle
Loads .continuous, .events, and .spikes files saved from the Open Ephys GUI
Usage:
import OpenEphys
data = OpenEphys.load(pathToFile) # returns a dict with data, timestamps, etc.
"""
import os
import numpy as np
import scipy.signal
import scipy.io
import time
import struct
import json
from copy import deepcopy
import re
# constants for pre-allocating matrices:
MAX_NUMBER_OF_SPIKES = 1e6
MAX_NUMBER_OF_EVENTS = 1e6
def load(filepath):
# redirects to code for individual file types
if 'continuous' in filepath:
data = loadContinuous(filepath)
elif 'spikes' in filepath:
data = loadSpikes(filepath)
elif 'events' in filepath:
data = loadEvents(filepath)
else:
raise Exception("Not a recognized file type. Please input a .continuous, .spikes, or .events file")
return data
def loadFolder(folderpath,**kwargs):
# load all continuous files in a folder
data = { }
# load all continuous files in a folder
if 'channels' in kwargs.keys():
filelist = ['100_CH'+x+'.continuous' for x in map(str,kwargs['channels'])]
else:
filelist = os.listdir(folderpath)
t0 = time.time()
numFiles = 0
for i, f in enumerate(filelist):
if '.continuous' in f:
data[f.replace('.continuous','')] = loadContinuous(os.path.join(folderpath, f))
numFiles += 1
print ''.join(('Avg. Load Time: ', str((time.time() - t0)/numFiles),' sec'))
print ''.join(('Total Load Time: ', str((time.time() - t0)),' sec'))
return data
def loadFolderToArray(folderpath, channels='all', dtype=float,
source='100', recording=None, start_record=None, stop_record=None,
verbose=True):
"""Load the neural data files in a folder to a single array.
By default, all channels in the folder are loaded in numerical order.
Args:
folderpath : string, path to folder containing OpenEphys files
channels : list of channel numbers to read
If 'all', then all channels are loaded in numerical order
dtype : float or np.int16
If float, then the data will be multiplied by bitVolts to convert
to microvolts. This increases the memory required by 4 times.
source :
recording : int, or None
Multiple recordings in the same folder are suffixed with an
incrementing label. For the first or only recording, leave this as
None. Otherwise, specify an integer.
start_record, stop_record : the first and last record to read from
each file. This is converted into an appropriate number of samples
and passed to loadContinuous. Python indexing is used, so
`stop_record` is not inclusive. If `start_record` is None,
start at the beginning; if `stop_record` is None, read to the end.
verbose : print status updateds
Returns: numpy array of shape (n_samples, n_channels)
"""
# Get list of files
filelist = get_filelist(folderpath, source, channels, recording=None)
# Keep track of the time taken
t0 = time.time()
# Get the header info and use this to set start_record and stop_record
header = get_header_from_folder(folderpath, filelist)
if start_record is None:
start_record = 0
if stop_record is None:
stop_record = header['n_records']
# Extract each channel in order
arr_l = []
for filename in filelist:
arr = loadContinuous(os.path.join(folderpath, filename), dtype,
start_record=start_record, stop_record=stop_record,
verbose=verbose)['data']
arr_l.append(arr)
# Concatenate into an array of shape (n_samples, n_channels)
data_array = np.transpose(arr_l)
if verbose:
time_taken = time.time() - t0
print 'Avg. Load Time: %0.3f sec' % (time_taken / len(filelist))
print 'Total Load Time: %0.3f sec' % time_taken
return data_array
def loadContinuous(filepath, dtype=float, verbose=True,
start_record=None, stop_record=None, ignore_last_record=True):
"""Load continuous data from a single channel in the file `filepath`.
This is intended to be mostly compatible with the previous version.
The differences are:
- Ability to specify start and stop records
- Converts numeric data in the header from string to numeric data types
- Does not rely on a predefined maximum data size
- Does not necessarily drop the last record, which is usually incomplete
- Uses the block length that is specified in the header, instead of
hardcoding it.
- Returns timestamps and recordNumbers as int instead of float
- Tests the record metadata (N and record marker) for internal consistency
The OpenEphys file format breaks the data stream into "records",
typically of length 1024 samples. There is only one timestamp per record.
Args:
filepath : string, path to file to load
dtype : float or np.int16
If float, then the data will be multiplied by bitVolts to convert
to microvolts. This increases the memory required by 4 times.
verbose : whether to print debugging messages
start_record, stop_record : indices that control how much data
is read and returned. Pythonic indexing is used,
so `stop_record` is not inclusive. If `start` is None, reading
begins at the beginning; if `stop` is None, reading continues
until the end.
ignore_last_record : The last record in the file is almost always
incomplete (padded with zeros). By default it is ignored, for
compatibility with the old version of this function.
Returns: dict, with following keys
data : array of samples of data
header : the header info, as returned by readHeader
timestamps : the timestamps of each record of data that was read
recordingNumber : the recording number of each record of data that
was read. The length is the same as `timestamps`.
"""
if dtype not in [float, np.int16]:
raise ValueError("Invalid data type. Must be float or np.int16")
if verbose:
print "Loading continuous data from " + filepath
"""Here is the OpenEphys file format:
'each record contains one 64-bit timestamp, one 16-bit sample
count (N), 1 uint16 recordingNumber, N 16-bit samples, and
one 10-byte record marker (0 1 2 3 4 5 6 7 8 255)'
Thus each record has size 2*N + 22 bytes.
"""
# This is what the record marker should look like
spec_record_marker = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 255])
# Lists for data that's read
timestamps = []
recordingNumbers = []
samples = []
samples_read = 0
records_read = 0
# Open the file
with file(filepath, 'rb') as f:
# Read header info, file length, and number of records
header = readHeader(f)
record_length_bytes = 2 * header['blockLength'] + 22
fileLength = os.fstat(f.fileno()).st_size
n_records = get_number_of_records(filepath)
# Use this to set start and stop records if not specified
if start_record is None:
start_record = 0
if stop_record is None:
stop_record = n_records
# We'll stop reading after this many records are read
n_records_to_read = stop_record - start_record
# Seek to the start location, relative to the current position
# right after the header.
f.seek(record_length_bytes * start_record, 1)
# Keep reading till the file is finished
while f.tell() < fileLength and records_read < n_records_to_read:
# Skip the last record if requested, which usually contains
# incomplete data
if ignore_last_record and f.tell() == (
fileLength - record_length_bytes):
break
# Read the timestamp for this record
# litte-endian 64-bit signed integer
timestamps.append(np.fromfile(f, np.dtype('<i8'), 1))
# Read the number of samples in this record
# little-endian 16-bit unsigned integer
N = np.fromfile(f, np.dtype('<u2'), 1).item()
if N != header['blockLength']:
raise IOError('Found corrupted record in block ' +
str(recordNumber))
# Read and store the recording numbers
# big-endian 16-bit unsigned integer
recordingNumbers.append(np.fromfile(f, np.dtype('>u2'), 1))
# Read the data
# big-endian 16-bit signed integer
data = np.fromfile(f, np.dtype('>i2'), N)
if len(data) != N:
raise IOError("could not load the right number of samples")
# Optionally convert dtype
if dtype == float:
data = data * header['bitVolts']
# Store the data
samples.append(data)
# Extract and test the record marker
record_marker = np.fromfile(f, np.dtype('<u1'), 10)
if np.any(record_marker != spec_record_marker):
raise IOError("corrupted record marker at record %d" %
records_read)
# Update the count
samples_read += len(samples)
records_read += 1
# Concatenate results, or empty arrays if no data read (which happens
# if start_sample is after the end of the data stream)
res = {'header': header}
if samples_read > 0:
res['timestamps'] = np.concatenate(timestamps)
res['data'] = np.concatenate(samples)
res['recordingNumber'] = np.concatenate(recordingNumbers)
else:
res['timestamps'] = np.array([], dtype=np.int)
res['data'] = np.array([], dtype=dtype)
res['recordingNumber'] = np.array([], dtype=np.int)
return res
def loadSpikes(filepath):
data = { }
print 'loading spikes...'
f = open(filepath,'rb')
header = readHeader(f)
if float(header[' version']) < 0.4:
raise Exception('Loader is only compatible with .spikes files with version 0.4 or higher')
data['header'] = header
numChannels = int(header['num_channels'])
numSamples = 40 # **NOT CURRENTLY WRITTEN TO HEADER**
spikes = np.zeros((MAX_NUMBER_OF_SPIKES, numSamples, numChannels))
timestamps = np.zeros(MAX_NUMBER_OF_SPIKES)
source = np.zeros(MAX_NUMBER_OF_SPIKES)
gain = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
thresh = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
sortedId = np.zeros((MAX_NUMBER_OF_SPIKES, numChannels))
recNum = np.zeros(MAX_NUMBER_OF_SPIKES)
currentSpike = 0
while f.tell() < os.fstat(f.fileno()).st_size:
eventType = np.fromfile(f, np.dtype('<u1'),1) #always equal to 4, discard
timestamps[currentSpike] = np.fromfile(f, np.dtype('<i8'), 1)
software_timestamp = np.fromfile(f, np.dtype('<i8'), 1)
source[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
numChannels = np.fromfile(f, np.dtype('<u2'), 1)
numSamples = np.fromfile(f, np.dtype('<u2'), 1)
sortedId[currentSpike] = np.fromfile(f, np.dtype('<u2'),1)
electrodeId = np.fromfile(f, np.dtype('<u2'),1)
channel = np.fromfile(f, np.dtype('<u2'),1)
color = np.fromfile(f, np.dtype('<u1'), 3)
pcProj = np.fromfile(f, np.float32, 2)
sampleFreq = np.fromfile(f, np.dtype('<u2'),1)
waveforms = np.fromfile(f, np.dtype('<u2'), numChannels*numSamples)
wv = np.reshape(waveforms, (numChannels, numSamples))
gain[currentSpike,:] = np.fromfile(f, np.float32, numChannels)
thresh[currentSpike,:] = np.fromfile(f, np.dtype('<u2'), numChannels)
recNum[currentSpike] = np.fromfile(f, np.dtype('<u2'), 1)
for ch in range(numChannels):
spikes[currentSpike,:,ch] = (np.float64(wv[ch])-32768)/(gain[currentSpike,ch]/1000)
currentSpike += 1
data['spikes'] = spikes[:currentSpike,:,:]
data['timestamps'] = timestamps[:currentSpike]
data['source'] = source[:currentSpike]
data['gain'] = gain[:currentSpike,:]
data['thresh'] = thresh[:currentSpike,:]
data['recordingNumber'] = recNum[:currentSpike]
data['sortedId'] = sortedId[:currentSpike]
return data
def loadEvents(filepath):
data = { }
print 'loading events...'
f = open(filepath,'rb')
header = readHeader(f)
if float(header['version']) < 0.4:
raise Exception('Loader is only compatible with .events files with version 0.4 or higher')
data['header'] = header
index = -1
channel = np.zeros(MAX_NUMBER_OF_EVENTS)
timestamps = np.zeros(MAX_NUMBER_OF_EVENTS)
sampleNum = np.zeros(MAX_NUMBER_OF_EVENTS)
nodeId = np.zeros(MAX_NUMBER_OF_EVENTS)
eventType = np.zeros(MAX_NUMBER_OF_EVENTS)
eventId = np.zeros(MAX_NUMBER_OF_EVENTS)
recordingNumber = np.zeros(MAX_NUMBER_OF_EVENTS)
while f.tell() < os.fstat(f.fileno()).st_size:
index += 1
timestamps[index] = np.fromfile(f, np.dtype('<i8'), 1)
sampleNum[index] = np.fromfile(f, np.dtype('<i2'), 1)
eventType[index] = np.fromfile(f, np.dtype('<u1'), 1)
nodeId[index] = np.fromfile(f, np.dtype('<u1'), 1)
eventId[index] = np.fromfile(f, np.dtype('<u1'), 1)
channel[index] = np.fromfile(f, np.dtype('<u1'), 1)
recordingNumber[index] = np.fromfile(f, np.dtype('<u2'), 1)
data['channel'] = channel[:index]
data['timestamps'] = timestamps[:index]
data['eventType'] = eventType[:index]
data['nodeId'] = nodeId[:index]
data['eventId'] = eventId[:index]
data['recordingNumber'] = recordingNumber[:index]
data['sampleNum'] = sampleNum[:index]
return data
def readHeader(f):
"""Read header information from the first 1024 bytes of an OpenEphys file.
Args:
f: An open file handle to an OpenEphys file
Returns: dict with the following keys.
- bitVolts : float, scaling factor, microvolts per bit
- blockLength : int, e.g. 1024, length of each record (see
loadContinuous)
- bufferSize : int, e.g. 1024
- channel : the channel, eg "'CH1'"
- channelType : eg "'Continuous'"
- date_created : eg "'15-Jun-2016 21212'" (What are these numbers?)
- description : description of the file format
- format : "'Open Ephys Data Format'"
- header_bytes : int, e.g. 1024
- sampleRate : float, e.g. 30000.
- version: eg '0.4'
Note that every value is a string, even numeric data like bitVolts.
Some strings have extra, redundant single apostrophes.
"""
header = {}
# Read the data as a string
# Remove newlines and redundant "header." prefixes
# The result should be a series of "key = value" strings, separated
# by semicolons.
header_string = f.read(1024).replace('\n','').replace('header.','')
# Parse each key = value string separately
for pair in header_string.split(';'):
if '=' in pair:
key, value = pair.split(' = ')
key = key.strip()
value = value.strip()
# Convert some values to numeric
if key in ['bitVolts', 'sampleRate']:
header[key] = float(value)
elif key in ['blockLength', 'bufferSize', 'header_bytes']:
header[key] = int(value)
else:
# Keep as string
header[key] = value
return header
def downsample(trace,down):
downsampled = scipy.signal.resample(trace,np.shape(trace)[0]/down)
return downsampled
def writeChannelMapFile(mapping, filename='mapping.prb'):
with open(filename, 'w') as outfile:
json.dump( \
{'0': { \
'mapping' : mapping.tolist(), \
'reference' : [-1] * mapping.size, \
'enabled' : [True] * mapping.size \
}, \
'refs' : {\
'channels' : [-1] * mapping.size \
}, \
'recording' : { \
'channels': [False] * mapping.size \
}, \
}, \
outfile, \
indent = 4, separators = (',', ': ') \
)
def pack(folderpath, filename='openephys.dat', dref=None,
chunk_size=4000, start_record=None, stop_record=None, verbose=True,
**kwargs):
"""Read OpenEphys formatted data in chunks and write to a flat binary file.
The data will be written in a fairly standard binary format:
ch0_sample0, ch1_sample0, ..., chN_sample0,
ch0_sample1, ch1_sample1, ..., chN_sample1,
and so on. Each sample is a 2-byte signed integer.
Because the data are read from the OpenEphys files in chunks, it
is not necessary to hold the entire dataset in memory at once. It is
also possible to specify starting and stopping locations to write out
a subset of the data.
Args:
folderpath : string, path to folder containing all channels
filename : name of file to store packed binary data
If this file exists, it will be overwritten
dref: Digital referencing - either supply a channel number or
'ave' to reference to the average of packed channels.
chunk_size : the number of records (not bytes or samples!) to read at
once. 4000 records of 64-channel data requires ~500 MB of memory.
The record size is usually 1024 samples.
start_record, stop_record : the first record to process and the
last record to process. If start_record is None, start at the
beginning; if stop_record is None, go until the end.
verbose : print out status info
**kwargs : This is passed to loadFolderToArray for each chunk.
See documentation there for the keywords `source`, `channels`,
`recording`, and `ignore_last_record`.
"""
# Get header info to determine how many records we have to pack
header = get_header_from_folder(folderpath, **kwargs)
if start_record is None:
start_record = 0
if stop_record is None:
stop_record = header['n_records']
# Manually remove the output file if it exists (later we append)
if os.path.exists(filename):
if verbose:
print "overwriting %s" % filename
os.remove(filename)
# Iterate over chunks
for chunk_start in range(start_record, stop_record, chunk_size):
# Determine where the chunk stops
chunk_stop = np.min([stop_record, chunk_start + chunk_size])
if verbose:
print "loading chunk from %d to %d" % (chunk_start, chunk_stop)
# Load the chunk
data_array = loadFolderToArray(folderpath, dtype=np.int16,
start_record=chunk_start, stop_record=chunk_stop,
verbose=False, **kwargs)
# This only happens if we happen to be loading a chunk consisting
# of only the last record, and also ignore_last_record is True
if len(data_array) == 0:
break
# Digital referencing
if dref:
# Choose a reference
if dref == 'ave':
reference = np.mean(data_array, 1)
else:
# Figure out which channels are included
if 'channels' in kwargs and kwargs['channels'] != 'all':
channels = kwargs['channels']
else:
channels = _get_sorted_channels(folderpath)
# Find the reference channel
dref_idx = channels.index(dref)
reference = data_array[:, dref_idx].copy()
# Subtract the reference
for i in range(data_array.shape[1]):
data_array[:,i] = data_array[:,i] - reference
# Explicity open in append mode so we don't just overwrite
with file(os.path.join(folderpath, filename), 'ab') as fi:
data_array.tofile(fi)
def regex_capture(pattern, list_of_strings, take_index=0):
"""Apply regex `pattern` to each string and return a captured group.
pattern : string, regex pattern
list_of_strings : list of strings to apply the pattern to
Strings that do not match the pattern are ignored.
take_index : The index of the captured group to return
Returns: a list of strings. Each element is the captured group from
one of the input strings.
"""
res_l = []
for s in list_of_strings:
m = re.match(pattern, s)
# Append the capture, if any
if m is not None:
res_l.append(m.groups()[take_index])
return res_l
def _get_sorted_channels(folderpath, recording=None):
"""Return a sorted list of the continuous channels in folderpath.
folderpath : string, path to location of continuous files on disk
recording : None, or int
If there is only one recording in the folder, leave as None.
Otherwise, specify the number of the recording as an integer.
"""
if recording is None:
return sorted([int(f.split('_CH')[1].split('.')[0]) for f in os.listdir(folderpath)
if '.continuous' in f and '_CH' in f])
else:
# Form a string from the recording number
if recording == 1:
# The first recording has no suffix
recording_s = ''
else:
recording_s = '_%d' % recording
# Form a regex pattern to be applied to each filename
# We will capture the channel number: (\d+)
regex_pattern = '%s_CH(\d+)%s.continuous' % ('100', recording_s)
# Apply the pattern to each filename and return the captured channels
channel_numbers_s = regex_capture(regex_pattern, os.listdir(folderpath))
channel_numbers_int = map(int, channel_numbers_s)
return sorted(channel_numbers_int)
def get_number_of_records(filepath):
# Open the file
with file(filepath, 'rb') as f:
# Read header info
header = readHeader(f)
# Get file length
fileLength = os.fstat(f.fileno()).st_size
# Determine the number of records
record_length_bytes = 2 * header['blockLength'] + 22
n_records = int((fileLength - 1024) / record_length_bytes)
if (n_records * record_length_bytes + 1024) != fileLength:
raise IOError("file does not divide evenly into full records")
return n_records
def get_filelist(folderpath, source='100', channels='all', recording=None):
"""Given a folder of data files and a list of channels, get filenames.
folderpath : string, folder containing OpenEphys data files
source : string, typically '100'
channels : list of numeric channel numbers to acquire
If 'all', then _get_sorted_channels is used to get all channels
from that folder in sorted order
recording : the recording number, or None if there is only one recording
Returns: a list of filenames corresponding one-to-one to the channels
in `channels`. The filenames must be joined with `folderpath` to
construct a full filename.
"""
# Get all channels if requested
if channels == 'all':
channels = _get_sorted_channels(folderpath, recording=recording)
# Get the list of continuous filenames
if recording is None or recording == 1:
# The first recording has no suffix
filelist = ['%s_CH%d.continuous' % (source, chan)
for chan in channels]
else:
filelist = ['%s_CH%d_%d.continuous' % (source, chan, recording)
for chan in channels]
return filelist
def get_header_from_folder(folderpath, filelist=None, **kwargs):
"""Return the header info for all files in `folderpath`.
The header for each file is loaded individually. The following keys
are supposed to be the same for every file:
['bitVolts', 'blockLength', 'bufferSize', 'date_created',
'description', 'format', 'header_bytes', 'sampleRate', 'version']
They are checked for consistency and returned in a single dict.
Finally the number of records is also checked for each file, checked
for consistency, and returned as the key 'n_records'.
folderpath : folder containing OpenEphys data files
filelist : list of filenames within `folderpath` to load
If None, then provide optional keyword arguments `source`,
`channels`, and/or `recording`. They are passed to `get_filelist`
to get the filenames in this folder.
Returns: dict
"""
included_keys = ['blockLength', 'bufferSize', 'date_created',
'description', 'format', 'header_bytes', 'version', 'n_records']
included_float_keys = ['bitVolts', 'sampleRate']
# Get filelist if it was not provided
if filelist is None:
filelist = get_filelist(folderpath, **kwargs)
# Get header for each file, as well as number of records
header_l = []
for filename in filelist:
full_filename = os.path.join(folderpath, filename)
with file(full_filename) as fi:
header = readHeader(fi)
header['n_records'] = get_number_of_records(full_filename)
header_l.append(header)
if len(header_l) == 0:
raise IOError("no headers could be loaded")
# Form a single header based on all of them, starting with the first one
unique_header = {}
for key in included_keys + included_float_keys:
unique_header[key] = header_l[0][key]
# Check every header
for header in header_l:
# Check the regular keys
for key in included_keys:
if unique_header[key] != header[key]:
raise ValueError("inconsistent header info in key %s" % key)
# Check the floating point keys
for key in included_float_keys:
if not np.isclose(unique_header[key], header[key]):
raise ValueError("inconsistent header info in key %s" % key)
return unique_header
| gpl-3.0 |
jchevin/MissionPlanner-master | Lib/site-packages/scipy/signal/tests/test_waveforms.py | 57 | 10585 |
import numpy as np
from numpy.testing import TestCase, assert_almost_equal, assert_equal, assert_, \
assert_raises, run_module_suite
import scipy.signal.waveforms as waveforms
# These chirp_* functions are the instantaneous frequencies of the signals
# returned by chirp().
def chirp_linear(t, f0, f1, t1):
f = f0 + (f1 - f0) * t / t1
return f
def chirp_quadratic(t, f0, f1, t1, vertex_zero=True):
if vertex_zero:
f = f0 + (f1 - f0) * t**2 / t1**2
else:
f = f1 - (f1 - f0) * (t1 - t)**2 / t1**2
return f
def chirp_geometric(t, f0, f1, t1):
f = f0 * (f1/f0)**(t/t1)
return f
def chirp_hyperbolic(t, f0, f1, t1):
f = f0*f1*t1 / ((f0 - f1)*t + f1*t1)
return f
def compute_frequency(t, theta):
"""Compute theta'(t)/(2*pi), where theta'(t) is the derivative of theta(t)."""
# Assume theta and t are 1D numpy arrays.
# Assume that t is uniformly spaced.
dt = t[1] - t[0]
f = np.diff(theta)/(2*np.pi) / dt
tf = 0.5*(t[1:] + t[:-1])
return tf, f
class TestChirp(TestCase):
def test_linear_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='linear')
assert_almost_equal(w, 1.0)
def test_linear_freq_01(self):
method = 'linear'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_linear_freq_02(self):
method = 'linear'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 100)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_linear(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic')
assert_almost_equal(w, 1.0)
def test_quadratic_at_zero2(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='quadratic',
vertex_zero=False)
assert_almost_equal(w, 1.0)
def test_quadratic_freq_01(self):
method = 'quadratic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_quadratic_freq_02(self):
method = 'quadratic'
f0 = 20.0
f1 = 10.0
t1 = 10.0
t = np.linspace(0, t1, 2000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_quadratic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_at_zero(self):
w = waveforms.chirp(t=0, f0=1.0, f1=2.0, t1=1.0, method='logarithmic')
assert_almost_equal(w, 1.0)
def test_logarithmic_freq_01(self):
method = 'logarithmic'
f0 = 1.0
f1 = 2.0
t1 = 1.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_02(self):
method = 'logarithmic'
f0 = 200.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_logarithmic_freq_03(self):
method = 'logarithmic'
f0 = 100.0
f1 = 100.0
t1 = 10.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_geometric(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_hyperbolic_at_zero(self):
w = waveforms.chirp(t=0, f0=10.0, f1=1.0, t1=1.0, method='hyperbolic')
assert_almost_equal(w, 1.0)
def test_hyperbolic_freq_01(self):
method = 'hyperbolic'
f0 = 10.0
f1 = 1.0
t1 = 1.0
t = np.linspace(0, t1, 10000)
phase = waveforms._chirp_phase(t, f0, t1, f1, method)
tf, f = compute_frequency(t, phase)
abserr = np.max(np.abs(f - chirp_hyperbolic(tf, f0, f1, t1)))
assert_(abserr < 1e-6)
def test_hyperbolic_freq_02(self):
method = 'hyperbolic'
f0 = 10.0
f1 = 100.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_hyperbolic_freq_03(self):
method = 'hyperbolic'
f0 = -10.0
f1 = 0.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_unknown_method(self):
method = "foo"
f0 = 10.0
f1 = 20.0
t1 = 1.0
t = np.linspace(0, t1, 10)
assert_raises(ValueError, waveforms.chirp, t, f0, t1, f1, method)
def test_integer_t1(self):
f0 = 10.0
f1 = 20.0
t = np.linspace(-1, 1, 11)
t1 = 3.0
float_result = waveforms.chirp(t, f0, t1, f1)
t1 = 3
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 't1=3' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f0(self):
f1 = 20.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f0 = 10.0
float_result = waveforms.chirp(t, f0, t1, f1)
f0 = 10
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_f1(self):
f0 = 10.0
t1 = 3.0
t = np.linspace(-1, 1, 11)
f1 = 20.0
float_result = waveforms.chirp(t, f0, t1, f1)
f1 = 20
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_all(self):
f0 = 10
t1 = 3
f1 = 20
t = np.linspace(-1, 1, 11)
float_result = waveforms.chirp(t, float(f0), float(t1), float(f1))
int_result = waveforms.chirp(t, f0, t1, f1)
err_msg = "Integer input 'f0=10, t1=3, f1=20' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
class TestSweepPoly(TestCase):
def test_sweep_poly_quad1(self):
p = np.poly1d([1.0, 0.0, 1.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_const(self):
p = np.poly1d(2.0)
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_linear(self):
p = np.poly1d([-1.0, 10.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_quad2(self):
p = np.poly1d([1.0, 0.0, -2.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic(self):
p = np.poly1d([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic2(self):
"""Use an array of coefficients instead of a poly1d."""
p = np.array([2.0, 1.0, 0.0, -2.0])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
def test_sweep_poly_cubic3(self):
"""Use a list of coefficients instead of a poly1d."""
p = [2.0, 1.0, 0.0, -2.0]
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
tf, f = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs(f - expected))
assert_(abserr < 1e-6)
class TestGaussPulse(TestCase):
def test_integer_fc(self):
float_result = waveforms.gausspulse('cutoff', fc=1000.0)
int_result = waveforms.gausspulse('cutoff', fc=1000)
err_msg = "Integer input 'fc=1000' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bw(self):
float_result = waveforms.gausspulse('cutoff', bw=1.0)
int_result = waveforms.gausspulse('cutoff', bw=1)
err_msg = "Integer input 'bw=1' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_bwr(self):
float_result = waveforms.gausspulse('cutoff', bwr=-6.0)
int_result = waveforms.gausspulse('cutoff', bwr=-6)
err_msg = "Integer input 'bwr=-6' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
def test_integer_tpr(self):
float_result = waveforms.gausspulse('cutoff', tpr=-60.0)
int_result = waveforms.gausspulse('cutoff', tpr=-60)
err_msg = "Integer input 'tpr=-60' gives wrong result"
assert_equal(int_result, float_result, err_msg=err_msg)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
nicky-ji/edx-nicky | lms/djangoapps/bulk_email/migrations/0009_force_unique_course_ids.py | 53 | 6184 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'CourseAuthorization', fields ['course_id']
db.create_unique('bulk_email_courseauthorization', ['course_id'])
def backwards(self, orm):
# Removing unique constraint on 'CourseAuthorization', fields ['course_id']
db.delete_unique('bulk_email_courseauthorization', ['course_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'bulk_email.courseauthorization': {
'Meta': {'object_name': 'CourseAuthorization'},
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'email_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'bulk_email.courseemail': {
'Meta': {'object_name': 'CourseEmail'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'text_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'to_option': ('django.db.models.fields.CharField', [], {'default': "'myself'", 'max_length': '64'})
},
'bulk_email.courseemailtemplate': {
'Meta': {'object_name': 'CourseEmailTemplate'},
'html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plain_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'bulk_email.optout': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'Optout'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['bulk_email'] | agpl-3.0 |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py | 2 | 8617 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import contextlib
import numpy as np
import six
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import unique_name
from test_imperative_base import new_program_scope
from jit_load_rename_var import rename_var_with_generator
import paddle.fluid.transpiler.details.program_utils as pu
LOADED_VAR_SUFFIX = ".load_0"
def while_softmax_regression(img):
def cond(i, times, pred):
return i < times
def body(i, times, pred):
pred = fluid.layers.fc(input=pred, size=10, act='softmax')
i = i + 1
return [i, times, pred]
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
times = fluid.layers.fill_constant(shape=[1], dtype='int64', value=5)
pred = fluid.layers.fc(input=img, size=10, act='softmax')
i, times, pred = fluid.layers.while_loop(
cond=cond, body=body, loop_vars=[i, times, pred])
return pred
class TestImperativeStaticModelRunnerWhile(unittest.TestCase):
def setUp(self):
self.seed = 90
self.batch_size = 32
self.batch_num = 50
self.save_dirname = "while.inference.model"
self.model_filename = None
self.params_filename = None
def _random_batch_reader(self):
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(self.batch_num):
batch_image, batch_label = _get_random_images_and_labels(
[self.batch_size, 784], [self.batch_size, 1])
yield batch_image, batch_label
return __reader__
def train_and_save_model(self):
startup_program = fluid.default_startup_program()
main_program = fluid.default_main_program()
img = fluid.data(name='img', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
pred = while_softmax_regression(img)
loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
loader = fluid.io.DataLoader.from_generator(
feed_list=[img, label], capacity=5, iterable=True)
loader.set_batch_generator(self._random_batch_reader(), places=place)
for data in loader():
exe.run(main_program, feed=data, fetch_list=[avg_loss])
fluid.io.save_inference_model(
self.save_dirname, ["img"], [pred],
exe,
model_filename=self.model_filename,
params_filename=self.params_filename)
def load_and_train_dygraph(self):
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.dygraph.guard(place):
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
np.random.seed(self.seed)
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
while_net = fluid.dygraph.static_runner.StaticModelRunner(
self.save_dirname)
dy_param_init_value = {}
for param in while_net.parameters():
dy_param_init_value[param.name] = param.numpy()
sgd = fluid.optimizer.SGD(learning_rate=0.001,
parameter_list=while_net.parameters())
train_loader = fluid.io.DataLoader.from_generator(capacity=10)
train_loader.set_batch_generator(
self._random_batch_reader(), places=place)
while_net.train()
for data in train_loader():
img = data[0]
label = data[1]
label.stop_gradient = True
cost = while_net(img)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
sgd.minimize(avg_loss)
while_net.clear_gradients()
dy_out = avg_loss.numpy()
dy_param_value = {}
for param in while_net.parameters():
dy_param_value[param.name] = param.numpy()
return dy_out, dy_param_init_value, dy_param_value
def load_and_train_static(self):
with new_program_scope():
fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed
np.random.seed(self.seed)
img = fluid.data(name='img', shape=[None, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
pred = while_softmax_regression(img)
loss = fluid.layers.cross_entropy(input=pred, label=label)
avg_loss = fluid.layers.mean(loss)
optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimizer.minimize(avg_loss)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
fluid.io.load_params(
exe,
self.save_dirname,
main_program=fluid.default_main_program(),
filename=self.params_filename)
static_param_init_value = {}
static_param_name_list = []
for param in fluid.default_main_program().all_parameters():
static_param_name_list.append(param.name)
static_param_init_value[param.name] = fluid.executor._fetch_var(
param.name)
loader = fluid.io.DataLoader.from_generator(
feed_list=[img, label], capacity=5, iterable=True)
loader.set_batch_generator(
self._random_batch_reader(), places=place)
for data in loader():
fetch_list = [avg_loss.name]
fetch_list.extend(static_param_name_list)
out = exe.run(fluid.default_main_program(),
feed=data,
fetch_list=[avg_loss])
static_param_value = {}
static_out = out[0]
for i in range(1, len(out)):
static_param_value[static_param_name_list[i - 1]] = out[i]
return static_out, static_param_init_value, static_param_value
def test_while_no_params_filename(self):
# Phase 1. run and save static model
self.train_and_save_model()
# # Phase 2. load model & train dygraph
with unique_name.guard():
dy_out, dy_param_init_value, dy_param_value = \
self.load_and_train_dygraph()
with unique_name.guard():
static_out, static_param_init_value, static_param_value = \
self.load_and_train_static()
# Phase 3. compare
with unique_name.guard():
dict_old_new_init = rename_var_with_generator(
static_param_init_value.keys())
for key, value in six.iteritems(static_param_init_value):
key = dict_old_new_init[key]
self.assertTrue(np.array_equal(value, dy_param_init_value[key]))
self.assertTrue(np.allclose(static_out, dy_out))
for key, value in six.iteritems(static_param_value):
key += LOADED_VAR_SUFFIX
self.assertTrue(np.allclose(value, dy_param_value[key], atol=1e-5))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/gunicorn-19.9.0/gunicorn/six.py | 320 | 27344 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.8.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| apache-2.0 |
bieschke/nuffle | lib/python/formencode/context.py | 5 | 5404 | """
A dynamic-scope-like system, aka fluid variables.
The idea behind dynamic scoped variables is for when, at one level,
you want to change the behavior of something you call. Except you
can't pass in any new arguments (e.g., there's some function or object
inbetween you and the thing you want to change), or you can't predict
exactly what you will want to change.
You should use it like::
context = Context()
def do_stuff():
state = context.set(inside='do_stuff')
try:
do stuff...
finally:
state.restore()
Then ``context.inside`` will be set to ``'do_stuff'`` inside that try
block. If a value isn't set, you'll get an attribute error.
Note that all values are thread local; this means you cannot use a
context object to pass information to another thread. In a
single-thread environment it doesn't really matter.
Typically you will create ``Context`` instances for your application,
environment, etc. These should be global module-level variables, that
may be imported by any interested module; each instance is a namespace
of its own.
Sometimes it's nice to have default values, instead of getting
attribute errors. This makes it easier to put in new variables that
are intended to be used elsewhere, without having to use
``getattr(context, 'var', default)`` to avoid AttributeErrors. There
are two ways (that can be used together) to do this.
First, when instantiating a ``Context`` object, you can give it a
``default`` value. If given, then all variables will default to that
value. ``None`` is a typical value for that.
Another is ``context.set_default(**vars)``, which will set only those
variables to default values. This will not effect the stack of
scopes, but will only add defaults.
When Python 2.5 comes out, this syntax would certainly be useful::
with context(page='view'):
do stuff...
And ``page`` will be set to ``'view'`` only inside that ``with``
block.
"""
from formencode.util import threadinglocal
from itertools import count
__all__ = ['Context', 'ContextRestoreError']
_restore_ids = count()
class _NoDefault:
pass
class ContextRestoreError(Exception):
"""
Raised when something is restored out-of-order.
"""
class Context(object):
def __init__(self, default=_NoDefault):
self.__dict__['_local'] = threadinglocal.local()
self.__dict__['_default'] = default
def __getattr__(self, attr):
if attr.startswith('_'):
raise AttributeError
try:
stack = self._local.stack
except AttributeError:
stack = []
for i in range(len(stack)-1, -1, -1):
if attr in stack[i][0]:
return stack[i][0][attr]
if self._default is _NoDefault:
raise AttributeError(
"The attribute %s has not been set on %r"
% (attr, self))
return self._default
def __setattr__(self, attr, value):
raise AttributeError(
"You can only write attribute on context object with the .set() method")
def set(self, **kw):
state_id = _restore_ids.next()
try:
stack = self._local.stack
except AttributeError:
stack = self._local.stack = [({}, -1)]
restorer = RestoreState(self, state_id)
stack.append((kw, state_id))
return restorer
def _restore(self, state_id):
try:
stack = self._local.stack
except AttributeError:
raise ContextRestoreError(
"Tried to restore context %r (to state ID %s) but no variables have been set in context"
% (self, state_id))
if stack[-1][1] == -1:
raise ContextRestoreError(
"Out of order restoration of context %r (to state ID %s); the stack state is empty"
% (self, state_id))
if stack[-1][1] != state_id:
raise ContextRestoreError(
"Out of order restoration of context %r (to state ID %s) when last state is %s"
% (self, state_id, stack[-1][1]))
stack.pop()
def set_default(self, **kw):
try:
stack = self._local.stack
except AttributeError:
stack = self._local.stack = [({}, -1)]
stack[0][0].update(kw)
def __repr__(self):
try:
stack = self._local.stack
except AttributeError:
stack = []
myid = hex(abs(id(self)))[2:]
if not stack:
return '<%s %s (empty)>' % (self.__class__.__name__, myid)
cur = {}
for vars, state_id in stack:
cur.update(vars)
keys = cur.keys()
keys.sort()
varlist = []
for key in keys:
rep = repr(cur[key])
if len(rep) > 10:
rep = rep[:9]+'...'+rep[-1]
varlist.append('%s=%s' % (key, rep))
return '<%s %s %s>' % (
self.__class__.__name__, myid, ' '.join(varlist))
class RestoreState(object):
def __init__(self, context, state_id):
self.state_id = state_id
self.context = context
self.restored = False
def restore(self):
if self.restored:
# @@: Should this really be allowed?
return
self.context._restore(self.state_id)
self.restored = True
| gpl-2.0 |
chenjiafan/pjsip | tests/pjsua/scripts-sendto/999_asterisk_err.py | 59 | 1312 | # $Id: 999_asterisk_err.py 2081 2008-06-27 21:59:15Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# http://lists.pjsip.org/pipermail/pjsip_lists.pjsip.org/2008-June/003426.html:
#
# Report in pjsip mailing list on 27/6/2008 that this message will
# cause pjsip to respond with 500 and then second request will cause
# segfault.
complete_msg = \
"""INVITE sip:[email protected]:5060;transport=UDP SIP/2.0
Via: SIP/2.0/UDP 192.168.1.11:5060;branch=z9hG4bK74a60ee5;rport
From: \"A user\" <sip:[email protected]>;tag=as2858a32c
To: <sip:[email protected]:5060;transport=UDP>
Contact: <sip:[email protected]>
Call-ID: [email protected]
CSeq: 102 INVITE
User-Agent: Asterisk PBX
Max-Forwards: 70
Date: Fri, 27 Jun 2008 08:46:47 GMT
Allow: INVITE, ACK, CANCEL, OPTIONS, BYE, REFER, SUBSCRIBE, NOTIFY
Supported: replaces
Content-Type: application/sdp
Content-Length: 285
v=0
o=root 4236 4236 IN IP4 192.168.1.11
s=session
c=IN IP4 192.168.1.11
t=0 0
m=audio 14390 RTP/AVP 0 3 8 101
a=rtpmap:0 PCMU/8000
a=rtpmap:3 GSM/8000
a=rtpmap:8 PCMA/8000
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-16
a=silenceSupp:off - - - -
a=ptime:20
a=sendrecv
"""
sendto_cfg = sip.SendtoCfg( "Asterisk 500", "--null-audio --auto-answer 200",
"", 200, complete_msg=complete_msg)
| gpl-2.0 |
infoelliex/addons-yelizariev | import_framework/mapper.py | 16 | 13482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import re
import math
class mapper(object):
"""
super class for all mapper class
They are call before import data
to transform the mapping into real value that we
will import
the call function receive a dictionary with external data
'external_field' : value
"""
def __call__(self, external_values):
raise NotImplementedError()
class dbmapper(mapper):
"""
Super class for mapper that need to access to
data base or any function of the import_framework
self.parent contains a reference to the instance of
the import framework
"""
def set_parent(self, parent):
self.parent = parent
def res2xmlid(self, model, res_id):
data = self.parent.pool['ir.model.data'].search(self.parent.cr,
self.parent.uid,
[('res_id', '=', res_id),
('model', '=', model),
])
if not data:
return []
return self.parent.pool['ir.model.data'].browse(self.parent.cr, self.parent.uid, data)
class concat(mapper):
"""
Use : contact('field_name1', 'field_name2', delimiter='_')
concat value of fields using the delimiter, delimiter is optional
and by default is a space
"""
def __init__(self, *arg, **delimiter):
self.arg = arg
self.delimiter = delimiter and delimiter.get('delimiter', ' ') or ' '
def __call__(self, external_values):
return self.delimiter.join(map(lambda x : tools.ustr(external_values.get(x,'')or ''), self.arg))
class tags_from_fields(dbmapper):
def __init__(self, table, field_list):
self.table = table
self.field_list = field_list
def __call__(self, external_values):
res = []
for f in self.field_list:
value = external_values.get(f)
value = value or ''
if not isinstance(value, basestring):
value = str(value)
for v in value.split(','):
v = do_clean_sugar(v)
v = do_clean_xml_id(v)
if v:
id = self.parent._generate_xml_id(v, self.table + f)
res.append(id)
return ','.join(res)
class ppconcat(mapper):
"""
Use : contact('field_name1', 'field_name2', delimiter='_')
concat external field name and value of fields using the delimiter,
delimiter is optional and by default is a two line feeds
"""
def __init__(self, *arg, **kwargs):
self.arg = arg
self.delimiter = kwargs and kwargs.get('delimiter', ' ') or '\n\n'
self.skip_value = kwargs and kwargs.get('skip_value')
if not type(self.skip_value) == str:
self.skip_value = '^^'
def __call__(self, external_values):
return self.delimiter.join(map(lambda x : x + ": " + tools.ustr(external_values.get(x,'')), filter(lambda x: external_values.get(x) and (self.skip_value != external_values.get(x)), self.arg)))
class first(mapper):
def __init__(self, *arg, **kwargs):
self.arg = arg
self.lower = kwargs and kwargs.get('lower') or False
def __call__(self, external_values):
v = ''
for a in self.arg:
v = external_values.get(a, '')
if v:
break
if v and self.lower:
v = v.lower()
return v
class fixdate(mapper):
"""
convert '2010-02-12 13:26:25' to '2010-02-12'
"""
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
s = external_values.get(self.field_name)
if not s:
return ''
return str(s).split(' ')[0]
class const(mapper):
"""
Use : const(arg)
return always arg
"""
def __init__(self, val):
self.val = val
def __call__(self, external_values):
return self.val
def do_clean_xml_id(value):
return re.sub('[\'", ^]','_', (value and unicode(value) or ''))
class value(mapper):
"""
Use : value(external_field_name)
Return the value of the external field name
this is equivalent to the a single string
usefull for call if you want your call get the value
and don't care about the name of the field
call(self.method, value('field1'))
"""
def __init__(self, val, default='', fallback=False, lower=False):
self.val = val
self.default = default
self.fallback = fallback
self.lower = lower
def __call__(self, external_values):
val = external_values.get(self.val)
if self.fallback and not val:
val = external_values.get(self.fallback)
val = val or self.default
if self.lower:
val = (str(val) or '').lower()
return val
class mapper_int(mapper):
def __init__(self, val, default=0):
self.val = val
self.default = default
def __call__(self, external_values):
val = external_values.get(self.val, self.default)
return val and int(val) or 0
def do_clean_sugar(v):
return (v or '').replace('^','').strip()
class clean_sugar(mapper):
def __init__(self, val, default=0):
self.val = val
self.default = default
def __call__(self, external_values):
val = external_values.get(self.val, self.default)
return do_clean_sugar(val)
class map_val(mapper):
"""
Use : map_val(external_field, val_mapping)
where val_mapping is a dictionary
with external_val : openerp_val
usefull for selection field like state
to map value
"""
def __init__(self, val, map, default=''):
self.val = value(val)
self.map = map
self.default = default
def __call__(self, external_values):
return self.map.get(self.val(external_values), self.default)
class ref(dbmapper):
"""
Use : ref(table_name, external_id)
return the xml_id of the ressource
to associate an already imported object with the current object
"""
def __init__(self, table, field_name):
self.table = table
self.field_name = field_name
def __call__(self, external_values):
return self.parent.xml_id_exist(self.table, external_values.get(self.field_name))
class refbyname(dbmapper):
"""
Use : refbyname(table_name, external_name, res.model)
same as ref but use the name of the ressource to find it
"""
def __init__(self, table, field_name, model):
self.table = table
self.field_name = field_name
self.model = model
def __call__(self, external_values):
v = external_values.get(self.field_name, '')
return self.parent.name_exist(self.table, v , self.model)
class xml_id(dbmapper):
def __init__(self, table, field_name='id'):
self.table = table
self.field_name = field_name
def __call__(self, external_values):
field_value = external_values.get(self.field_name)
if isinstance(field_value, float) and math.isnan(field_value):
return ''
field_value = do_clean_xml_id(field_value)
if not field_value:
return ''
return self.parent._generate_xml_id(field_value, self.table)
class user2partner(dbmapper):
def __init__(self, table_user, field_name='id'):
self.table_user = table_user
#self.table_partner = table_partner
self.field_name = field_name
def __call__(self, external_values):
id = xml_id(self.table_user, self.field_name)
id.set_parent(self.parent)
user_xml_id = id(external_values)
return user_xml_id+'_res_partner'
class user_by_login(dbmapper):
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
login = external_values.get(self.field_name)
if not login:
return ''
id = self.parent.pool['res.users'].search(self.parent.cr, self.parent.uid, [('login', '=', login)], context=self.parent.context)
if id:
return id[0]
else:
return ''
FIX_COUNTRY = {
'UK': 'United Kingdom'
}
class country_by_name(dbmapper):
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
value = external_values.get(self.field_name)
if not value:
return ''
value = FIX_COUNTRY.get(value, value)
id = self.parent.pool['res.country'].search(self.parent.cr, self.parent.uid,
[('name', '=', value)], context=self.parent.context)
if id:
return id[0]
else:
return ''
class res_id(dbmapper):
def __init__(self, get_table, field_name, default='0'):
self.get_table = get_table
self.field_name = field_name
self.default = default
def __call__(self, external_values):
id = xml_id(self.get_table(external_values), self.field_name)
id.set_parent(self.parent)
xmlid = id(external_values)
res_id = self.parent.pool['ir.model.data'].xmlid_to_res_id(self.parent.cr,
self.parent.uid,
'.'+xmlid)
return res_id and str(res_id) or self.default
class emails2partners(dbmapper):
def __init__(self, field_name):
self.field_name = field_name
def __call__(self, external_values):
alias_domain = self.parent.cache.get('alias_domain', None)
if alias_domain is None:
ir_config_parameter = self.parent.pool.get("ir.config_parameter")
alias_domain = ir_config_parameter.get_param(self.parent.cr, self.parent.uid, "mail.catchall.domain")
print 'alias_domain', alias_domain
alias_domain = alias_domain or ''
self.parent.cache['alias_domain'] = alias_domain
s = external_values.get(self.field_name, '')
s = s.lower()
res = []
for email in re.findall('[^<>, ]*@[^<>, ]*', s):
if alias_domain and alias_domain == email.split('@')[1]:
res_users = self.parent.pool.get("res.users")
user_id = res_users.search(self.parent.cr, self.parent.uid,
[('alias_name','=', email.split('@')[0])])
if user_id:
user_id = user_id[0]
partner_id = res_users.browse(self.parent.cr, self.parent.uid,
user_id).partner_id.id
#tmp
res.append(str(partner_id))
continue
else:
#print 'alias not found', email
pass
#
partner_id = self.parent.pool['res.partner'].search(self.parent.cr,
self.parent.uid,
[('email', '=', email),
])
if partner_id:
partner_id = partner_id[0]
#tmp
res.append(str(partner_id))
continue
else:
#print 'partner not found', email
pass
res = ','.join(res)
#print 'emails2partners', s, res
return res
class call(mapper):
"""
Use : call(function, arg1, arg2)
to call the function with external val follow by the arg specified
"""
def __init__(self, fun, *arg):
self.fun = fun
self.arg = arg
def __call__(self, external_values):
args = []
for arg in self.arg:
if isinstance(arg, mapper):
args.append(arg(external_values))
else:
args.append(arg)
return self.fun(external_values, *args)
| lgpl-3.0 |
vvps/pyBackup | bkup.py | 1 | 1258 | import os, sys
import zipfile
from time import *
import datetime
import balloontip as bt
startTime = datetime.datetime.now()
lf = open('C:/pyBackup.log', 'w')
lf.write("Starting backup.. " + strftime("%a, %d %b %Y %H:%M:%S", localtime()) + "\n");
sourceLocation = 'E:'
destinationLocation = 'V:/backupdata'
def zip(src, dst):
zf = zipfile.ZipFile("%s.zip" % (dst), "w", zipfile.ZIP_DEFLATED)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(os.path.abspath(src)):]
lf.write("Zipping %s \n" % arcname)
zf.write(absname, arcname) #Comment this line during testing!
zf.close()
try:
zip(sourceLocation, destinationLocation)
elapsedTime = datetime.datetime.now() - startTime
lf.write("End backup " + strftime("%a, %d %b %Y %H:%M:%S", localtime()) + "\n");
lf.write("Total time elapsed %s" % str(elapsedTime))
btMessage = ("Backup successful!\nTotal time elapsed %s" % str(elapsedTime))
except:
e = sys.exc_info()[0]
lf.write( "Error: %s" % e )
btMessage = ("Backup failed!\nError: %s" % e )
lf.close()
bt.balloon_tip('Backup script', btMessage)
| unlicense |
xcasper/python_koans | python3/koans/about_dictionaries.py | 91 | 1948 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertDictEqual({}, empty_dict)
self.assertEqual(__, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = { 'one': 'uno', 'two': 'dos' }
self.assertEqual(__, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = { 'one': 'uno', 'two': 'dos' }
self.assertEqual(__, babel_fish['one'])
self.assertEqual(__, babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = { 'one': 'uno', 'two': 'dos' }
babel_fish['one'] = 'eins'
expected = { 'two': 'dos', 'one': __ }
self.assertDictEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = { 'one': 'uno', 'two': 'dos' }
dict2 = { 'two': 'dos', 'one': 'uno' }
self.assertEqual(__, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish.keys()))
self.assertEqual(__, len(babel_fish.values()))
self.assertEqual(__, 'one' in babel_fish.keys())
self.assertEqual(__, 'two' in babel_fish.values())
self.assertEqual(__, 'uno' in babel_fish.keys())
self.assertEqual(__, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf', 'confused looking zebra'), 42)
self.assertEqual(__, len(cards))
self.assertEqual(__, cards['green elf'])
self.assertEqual(__, cards['yellow dwarf'])
| mit |
Trust-Code/PySPED | pysped/tabela/servico.py | 9 | 1249 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import os
CURDIR = os.path.dirname(os.path.abspath(__file__))
class _Servico(object):
def __init__(self, codigo='', descricao=''):
self.codigo = codigo
self.descricao = descricao
def __str__(self):
return unicode.encode(self.__unicode__(), 'utf-8')
def __unicode__(self):
return self.codigo + ' - ' + self.descricao
def __repr__(self):
return str(self)
def _monta_dicionario_codigo():
dicionario = {}
arquivo = open(os.path.join(CURDIR, 'servico.txt'), 'r')
#
# Pula a primeira linha
#
arquivo.readline()
for linha in arquivo:
linha = linha.decode('utf-8').replace('\n', '').replace('\r', '')
campos = linha.split('|')
s = _Servico(codigo=campos[0], descricao=campos[1])
dicionario[s.codigo] = s
#
# Normaliza os códigos para terem quatro dígitos quando não for o caso
#
if len(s.codigo) < 4:
dicionario['0' + s.codigo] = s
return dicionario
if not hasattr(sys.modules[__name__], 'SERVICO_CODIGO'):
SERVICO_CODIGO = _monta_dicionario_codigo()
| lgpl-2.1 |
tensorflow/tpu | models/experimental/resnet50_keras/resnet50_ctl_tf1.py | 1 | 8815 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""ResNet-50 implemented with Keras running on Cloud TPUs.
This file shows how you can run ResNet-50 on a Cloud TPU using the TensorFlow
Keras support. This is configured for ImageNet (e.g. 1000 classes), but you can
easily adapt to your own datasets by changing the code appropriately.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import imagenet_input
import resnet_model
# Common flags for TPU models.
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_string('data', None, 'Path to training and testing data.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model weights and training/evaluation summaries '
'are stored. If not specified, save to /tmp/resnet50.'))
flags.DEFINE_integer('num_cores', 8, 'Number of TPU cores.')
FLAGS = flags.FLAGS
# Imagenet training and test data sets.
APPROX_IMAGENET_TRAINING_IMAGES = 1281167 # Number of images in ImageNet-1k train dataset.
IMAGENET_VALIDATION_IMAGES = 50000 # Number of eval images.
PER_CORE_BATCH_SIZE = 128
NUM_CLASSES = 1000
# Training hyperparameters.
_EPOCHS = 90
_USE_BFLOAT16 = False
_BASE_LEARNING_RATE = 0.4
DEFAULT_MODEL_DIR = '/tmp/resnet50'
_WEIGHTS_TXT = 'resnet50_weights'
# Allow overriding epochs, steps_per_epoch for testing
flags.DEFINE_integer('num_epochs', _EPOCHS, '')
flags.DEFINE_integer(
'steps_per_epoch', None,
'Steps for epoch during training. If unspecified, use default value.')
# Learning rate schedule
_LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def compute_learning_rate(lr_epoch):
"""Learning rate for each step."""
warmup_lr_multiplier, warmup_end_epoch = _LR_SCHEDULE[0]
if lr_epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return (_BASE_LEARNING_RATE * warmup_lr_multiplier *
lr_epoch / warmup_end_epoch)
for mult, start_epoch in _LR_SCHEDULE:
if lr_epoch >= start_epoch:
learning_rate = _BASE_LEARNING_RATE * mult
else:
break
return learning_rate
def main(unused_argv):
model_dir = FLAGS.model_dir if FLAGS.model_dir else DEFAULT_MODEL_DIR
batch_size = PER_CORE_BATCH_SIZE * FLAGS.num_cores
steps_per_epoch = FLAGS.steps_per_epoch or (int(
APPROX_IMAGENET_TRAINING_IMAGES // batch_size))
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
logging.info('Saving checkpoints at %s', model_dir)
logging.info('Use TPU at %s', FLAGS.tpu if FLAGS.tpu is not None else 'local')
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
imagenet_train = imagenet_input.ImageNetInput(
is_training=True,
data_dir=FLAGS.data,
batch_size=batch_size,
use_bfloat16=_USE_BFLOAT16)
imagenet_eval = imagenet_input.ImageNetInput(
is_training=False,
data_dir=FLAGS.data,
batch_size=batch_size,
use_bfloat16=_USE_BFLOAT16)
train_iterator = strategy.experimental_distribute_dataset(
imagenet_train.input_fn()).make_initializable_iterator()
test_iterator = strategy.experimental_distribute_dataset(
imagenet_eval.input_fn()).make_initializable_iterator()
with strategy.scope():
logging.info('Building Keras ResNet-50 model')
model = resnet_model.ResNet50(num_classes=NUM_CLASSES)
optimizer = tf.keras.optimizers.SGD(
learning_rate=_BASE_LEARNING_RATE, momentum=0.9, nesterov=True)
training_loss = tf.keras.metrics.Mean('training_loss', dtype=tf.float32)
training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'training_accuracy', dtype=tf.float32)
test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
'test_accuracy', dtype=tf.float32)
logging.info('Finished building Keras ResNet-50 model')
def train_step(inputs):
"""Training StepFn."""
images, labels = inputs
with tf.GradientTape() as tape:
predictions = model(images, training=True)
# Loss calculations.
#
# Part 1: Prediciton loss.
prediction_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, predictions)
loss1 = tf.reduce_mean(prediction_loss)
# Part 2: Model weights regularization
loss2 = tf.reduce_sum(model.losses)
# Scale the loss given the TPUStrategy will reduce sum all gradients.
loss = loss1 + loss2
scaled_loss = loss / strategy.num_replicas_in_sync
grads = tape.gradient(scaled_loss, model.trainable_variables)
update_vars = optimizer.apply_gradients(
zip(grads, model.trainable_variables))
update_loss = training_loss.update_state(loss)
update_accuracy = training_accuracy.update_state(labels, predictions)
with tf.control_dependencies([update_vars, update_loss, update_accuracy]):
return tf.identity(loss)
def test_step(inputs):
"""Evaluation StepFn."""
images, labels = inputs
predictions = model(images, training=False)
loss = tf.keras.losses.sparse_categorical_crossentropy(labels, predictions)
loss = tf.reduce_mean(loss)
update_loss = test_loss.update_state(loss)
update_accuracy = test_accuracy.update_state(labels, predictions)
with tf.control_dependencies([update_loss, update_accuracy]):
return tf.identity(loss)
dist_train = strategy.experimental_local_results(
strategy.run(train_step, args=(next(train_iterator),)))
dist_test = strategy.experimental_local_results(
strategy.run(test_step, args=(next(test_iterator),)))
training_loss_result = training_loss.result()
training_accuracy_result = training_accuracy.result()
test_loss_result = test_loss.result()
test_accuracy_result = test_accuracy.result()
train_iterator_init = train_iterator.initialize()
test_iterator_init = test_iterator.initialize()
config = tf.ConfigProto()
config.allow_soft_placement = True
cluster_spec = resolver.cluster_spec()
if cluster_spec:
config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with tf.Session(target=resolver.master(), config=config) as sess:
all_variables = (
tf.global_variables() +
training_loss.variables + training_accuracy.variables +
test_loss.variables + test_accuracy.variables)
sess.run([v.initializer for v in all_variables])
sess.run(train_iterator_init)
for epoch in range(0, FLAGS.num_epochs):
logging.info('Starting to run epoch: %s', epoch)
for step in range(steps_per_epoch):
learning_rate = compute_learning_rate(epoch + 1 +
(float(step) / steps_per_epoch))
sess.run(optimizer.lr.assign(learning_rate))
if step % 20 == 0:
logging.info('Learning rate at step %s in epoch %s is %s', step,
epoch, learning_rate)
sess.run(dist_train)
if step % 20 == 0:
logging.info('Training loss: %s, accuracy: %s%%',
round(sess.run(training_loss_result), 4),
round(sess.run(training_accuracy_result) * 100, 2))
training_loss.reset_states()
training_accuracy.reset_states()
sess.run(test_iterator_init)
for step in range(steps_per_eval):
if step % 20 == 0:
logging.info('Starting to run eval step %s of epoch: %s', step,
epoch)
sess.run(dist_test)
if step % 20 == 0:
logging.info('Test loss: %s, accuracy: %s%%',
round(sess.run(test_loss_result), 4),
round(sess.run(test_accuracy_result) * 100, 2))
test_loss.reset_states()
test_accuracy.reset_states()
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
app.run(main)
| apache-2.0 |
kronoscode/Booktype | lib/booktype/apps/edit/utils.py | 7 | 3015 | # -*- coding: utf-8 -*-
"""
Utility functions related with editor app
"""
import sputnik
from lxml import etree
from booktype.utils.plugins import icejs
def clean_chapter_html(content, text_only=False, **kwargs):
"""
Removes icejs contents for now. We could later add more functionality to
this function to clean other stuff
Args:
- content: html string
- text_only: Boolean
Returns:
- cleaned either html or text content :)
"""
ice_params = icejs.IceCleanPlugin.OPTIONS
cleaned = icejs.ice_cleanup(content, **ice_params)
if kwargs.get('clean_comments_trail', False):
for comment_bubble in cleaned.xpath(".//a[@class='comment-link']"):
comment_bubble.drop_tree()
if text_only:
return ' '.join(cleaned.itertext())
cnt = etree.tostring(cleaned, pretty_print=True)
return cnt[6:-8]
def color_me(l, rgb, pos):
# TODO: add docstrings and improve if possible
if pos:
t1 = l.find('>', pos[0])
t2 = l.find('<', pos[0])
if (t1 == t2) or (t1 > t2 and t2 != -1):
out = l[:pos[0]]
out += '<span class="diff changed">'+color_me(l[pos[0]:pos[1]], rgb, None)+'</span>'
out += l[pos[1]:]
else:
out = l
return out
out = '<span class="%s">' % rgb
n = 0
m = 0
while True:
n = l.find('<', n)
if n == -1: # no more tags
out += l[m:n]
break
else:
if l[n+1] == '/': # tag ending
# closed tag
out += l[m:n]
j = l.find('>', n)+1
tag = l[n:j]
out += '</span>'+tag
n = j
else: # tag start
out += l[m:n]
j = l.find('>', n)+1
if j == 0:
out = l[n:]
n = len(l)
else:
tag = l[n:j]
if not tag.replace(' ','').replace('/','').lower() in ['<br>', '<hr>']:
if n != 0:
out += '</span>'
out += tag+'<span class="%s">' % rgb
else:
out += tag
n = j
m = n
out += l[n:]+'</span>'
return out
def send_notification(request, bookid, version, message, *message_args):
"""Send notification.
Add notification message to channel
Args:
reuest: Client Request object
bookid: Unique Book id
version: Book version
message: Notification message key
message_args: positional arguments for message format
"""
channel_name = '/booktype/book/%s/%s/' % (bookid, version)
user = request.user
sputnik.addMessageToChannel(request, channel_name, {
'command': 'notification',
'message': message,
'username': user.username,
'message_args': message_args
}, myself=False)
| agpl-3.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/test/test_runpy.py | 84 | 29328 | # Test the runpy module
import unittest
import os
import os.path
import sys
import re
import tempfile
import importlib, importlib.machinery, importlib.util
import py_compile
from test.support import (
forget, make_legacy_pyc, run_unittest, unload, verbose, no_tracing,
create_empty_file)
from test.script_helper import (
make_pkg, make_script, make_zip_pkg, make_zip_script, temp_dir)
import runpy
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
example_source = """\
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
del f
# Check the sys module
import sys
run_argv0 = sys.argv[0]
run_name_in_sys_modules = __name__ in sys.modules
module_in_sys_modules = (run_name_in_sys_modules and
globals() is sys.modules[__name__].__dict__)
# Check nested operation
import runpy
nested = runpy._run_module_code('x=1\\n', mod_name='<run>')
"""
implicit_namespace = {
"__name__": None,
"__file__": None,
"__cached__": None,
"__package__": None,
"__doc__": None,
"__spec__": None
}
example_namespace = {
"sys": sys,
"runpy": runpy,
"result": ["Top level assignment", "Lower level reference"],
"run_argv0": sys.argv[0],
"run_name_in_sys_modules": False,
"module_in_sys_modules": False,
"nested": dict(implicit_namespace,
x=1, __name__="<run>", __loader__=None),
}
example_namespace.update(implicit_namespace)
class CodeExecutionMixin:
# Issue #15230 (run_path not handling run_name correctly) highlighted a
# problem with the way arguments were being passed from higher level APIs
# down to lower level code. This mixin makes it easier to ensure full
# testing occurs at those upper layers as well, not just at the utility
# layer
# Figuring out the loader details in advance is hard to do, so we skip
# checking the full details of loader and loader_state
CHECKED_SPEC_ATTRIBUTES = ["name", "parent", "origin", "cached",
"has_location", "submodule_search_locations"]
def assertNamespaceMatches(self, result_ns, expected_ns):
"""Check two namespaces match.
Ignores any unspecified interpreter created names
"""
# Avoid side effects
result_ns = result_ns.copy()
expected_ns = expected_ns.copy()
# Impls are permitted to add extra names, so filter them out
for k in list(result_ns):
if k.startswith("__") and k.endswith("__"):
if k not in expected_ns:
result_ns.pop(k)
if k not in expected_ns["nested"]:
result_ns["nested"].pop(k)
# Spec equality includes the loader, so we take the spec out of the
# result namespace and check that separately
result_spec = result_ns.pop("__spec__")
expected_spec = expected_ns.pop("__spec__")
if expected_spec is None:
self.assertIsNone(result_spec)
else:
# If an expected loader is set, we just check we got the right
# type, rather than checking for full equality
if expected_spec.loader is not None:
self.assertEqual(type(result_spec.loader),
type(expected_spec.loader))
for attr in self.CHECKED_SPEC_ATTRIBUTES:
k = "__spec__." + attr
actual = (k, getattr(result_spec, attr))
expected = (k, getattr(expected_spec, attr))
self.assertEqual(actual, expected)
# For the rest, we still don't use direct dict comparison on the
# namespace, as the diffs are too hard to debug if anything breaks
self.assertEqual(set(result_ns), set(expected_ns))
for k in result_ns:
actual = (k, result_ns[k])
expected = (k, expected_ns[k])
self.assertEqual(actual, expected)
def check_code_execution(self, create_namespace, expected_namespace):
"""Check that an interface runs the example code correctly
First argument is a callable accepting the initial globals and
using them to create the actual namespace
Second argument is the expected result
"""
sentinel = object()
expected_ns = expected_namespace.copy()
run_name = expected_ns["__name__"]
saved_argv0 = sys.argv[0]
saved_mod = sys.modules.get(run_name, sentinel)
# Check without initial globals
result_ns = create_namespace(None)
self.assertNamespaceMatches(result_ns, expected_ns)
self.assertIs(sys.argv[0], saved_argv0)
self.assertIs(sys.modules.get(run_name, sentinel), saved_mod)
# And then with initial globals
initial_ns = {"sentinel": sentinel}
expected_ns["sentinel"] = sentinel
result_ns = create_namespace(initial_ns)
self.assertIsNot(result_ns, initial_ns)
self.assertNamespaceMatches(result_ns, expected_ns)
self.assertIs(sys.argv[0], saved_argv0)
self.assertIs(sys.modules.get(run_name, sentinel), saved_mod)
class ExecutionLayerTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
def test_run_code(self):
expected_ns = example_namespace.copy()
expected_ns.update({
"__loader__": None,
})
def create_ns(init_globals):
return _run_code(example_source, {}, init_globals)
self.check_code_execution(create_ns, expected_ns)
def test_run_module_code(self):
mod_name = "<Nonsense>"
mod_fname = "Some other nonsense"
mod_loader = "Now you're just being silly"
mod_package = '' # Treat as a top level module
mod_spec = importlib.machinery.ModuleSpec(mod_name,
origin=mod_fname,
loader=mod_loader)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__loader__": mod_loader,
"__package__": mod_package,
"__spec__": mod_spec,
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return _run_module_code(example_source,
init_globals,
mod_name,
mod_spec)
self.check_code_execution(create_ns, expected_ns)
# TODO: Use self.addCleanup to get rid of a lot of try-finally blocks
class RunModuleTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
self.assertEqual(run_module("runpy")["__name__"], "runpy")
def _add_pkg_dir(self, pkg_dir, namespace=False):
os.mkdir(pkg_dir)
if namespace:
return None
pkg_fname = os.path.join(pkg_dir, "__init__.py")
create_empty_file(pkg_fname)
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test",
*, namespace=False, parent_namespaces=False):
# Enforce a couple of internal sanity checks on test cases
if (namespace or parent_namespaces) and not depth:
raise RuntimeError("Can't mark top level module as a "
"namespace package")
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = os.path.realpath(tempfile.mkdtemp())
if verbose > 1: print(" Package tree in:", sub_dir)
sys.path.insert(0, pkg_dir)
if verbose > 1: print(" Updated sys.path:", sys.path[0])
if depth:
namespace_flags = [parent_namespaces] * depth
namespace_flags[-1] = namespace
for namespace_flag in namespace_flags:
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir, namespace_flag)
if verbose > 1: print(" Next level in:", sub_dir)
if verbose > 1: print(" Created:", pkg_fname)
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose > 1: print(" Created:", mod_fname)
mod_name = (pkg_name+".")*depth + mod_base
mod_spec = importlib.util.spec_from_file_location(mod_name,
mod_fname)
return pkg_dir, mod_fname, mod_name, mod_spec
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose > 1: print(" Removed sys.modules entries")
del sys.path[0]
if verbose > 1: print(" Removed sys.path entry")
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
try:
os.rmdir(top)
if verbose > 1: print(" Removed package tree")
except OSError as ex:
if verbose > 1: print(ex) # Persist with cleaning up
def _fix_ns_for_legacy_pyc(self, ns, alter_sys):
char_to_add = "c" if __debug__ else "o"
ns["__file__"] += char_to_add
ns["__cached__"] = ns["__file__"]
spec = ns["__spec__"]
new_spec = importlib.util.spec_from_file_location(spec.name,
ns["__file__"])
ns["__spec__"] = new_spec
if alter_sys:
ns["run_argv0"] += char_to_add
def _check_module(self, depth, alter_sys=False,
*, namespace=False, parent_namespaces=False):
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(example_source, depth,
namespace=namespace,
parent_namespaces=parent_namespaces))
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__cached__": mod_spec.cached,
"__package__": mod_name.rpartition(".")[0],
"__spec__": mod_spec,
})
if alter_sys:
expected_ns.update({
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return run_module(mod_name, init_globals, alter_sys=alter_sys)
try:
if verbose > 1: print("Running from source:", mod_name)
self.check_code_execution(create_ns, expected_ns)
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case loader caches paths
importlib.invalidate_caches()
if verbose > 1: print("Running from compiled:", mod_name)
self._fix_ns_for_legacy_pyc(expected_ns, alter_sys)
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose > 1: print("Module executed successfully")
def _check_package(self, depth, alter_sys=False,
*, namespace=False, parent_namespaces=False):
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(example_source, depth, "__main__",
namespace=namespace,
parent_namespaces=parent_namespaces))
pkg_name = mod_name.rpartition(".")[0]
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": mod_name,
"__file__": mod_fname,
"__cached__": importlib.util.cache_from_source(mod_fname),
"__package__": pkg_name,
"__spec__": mod_spec,
})
if alter_sys:
expected_ns.update({
"run_argv0": mod_fname,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
def create_ns(init_globals):
return run_module(pkg_name, init_globals, alter_sys=alter_sys)
try:
if verbose > 1: print("Running from source:", pkg_name)
self.check_code_execution(create_ns, expected_ns)
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case loader caches paths
if verbose > 1: print("Running from compiled:", pkg_name)
importlib.invalidate_caches()
self._fix_ns_for_legacy_pyc(expected_ns, alter_sys)
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose > 1: print("Package executed successfully")
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling.py")
create_empty_file(sibling_fname)
if verbose > 1: print(" Added sibling module:", sibling_fname)
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose > 1: print(" Added uncle package:", uncle_dir)
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose > 1: print(" Added cousin package:", cousin_dir)
nephew_fname = os.path.join(cousin_dir, "nephew.py")
create_empty_file(nephew_fname)
if verbose > 1: print(" Added nephew module:", nephew_fname)
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(contents, depth))
if run_name is None:
expected_name = mod_name
else:
expected_name = run_name
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose > 1: print("Running from source:", mod_name)
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertEqual(d1["__name__"], expected_name)
self.assertEqual(d1["__package__"], pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
importlib.invalidate_caches()
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
make_legacy_pyc(mod_fname)
unload(mod_name) # In case the loader caches paths
if verbose > 1: print("Running from compiled:", mod_name)
importlib.invalidate_caches()
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertEqual(d2["__name__"], expected_name)
self.assertEqual(d2["__package__"], pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose > 1: print("Module executed successfully")
def test_run_module(self):
for depth in range(4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth)
def test_run_module_in_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth, namespace=True, parent_namespaces=True)
def test_run_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth)
def test_run_package_in_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, parent_namespaces=True)
def test_run_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, namespace=True)
def test_run_namespace_package_in_namespace_package(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, namespace=True, parent_namespaces=True)
def test_run_module_alter_sys(self):
for depth in range(4):
if verbose > 1: print("Testing package depth:", depth)
self._check_module(depth, alter_sys=True)
def test_run_package_alter_sys(self):
for depth in range(1, 4):
if verbose > 1: print("Testing package depth:", depth)
self._check_package(depth, alter_sys=True)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose > 1: print("Testing relative imports at depth:", depth)
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose > 1: print("Testing main relative imports at depth:", depth)
self._check_relative_imports(depth, "__main__")
def test_run_name(self):
depth = 1
run_name = "And now for something completely different"
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg(example_source, depth))
forget(mod_name)
expected_ns = example_namespace.copy()
expected_ns.update({
"__name__": run_name,
"__file__": mod_fname,
"__cached__": importlib.util.cache_from_source(mod_fname),
"__package__": mod_name.rpartition(".")[0],
"__spec__": mod_spec,
})
def create_ns(init_globals):
return run_module(mod_name, init_globals, run_name)
try:
self.check_code_execution(create_ns, expected_ns)
finally:
self._del_pkg(pkg_dir, depth, mod_name)
def test_pkgutil_walk_packages(self):
# This is a dodgy hack to use the test_runpy infrastructure to test
# issue #15343. Issue #15348 declares this is indeed a dodgy hack ;)
import pkgutil
max_depth = 4
base_name = "__runpy_pkg__"
package_suffixes = ["uncle", "uncle.cousin"]
module_suffixes = ["uncle.cousin.nephew", base_name + ".sibling"]
expected_packages = set()
expected_modules = set()
for depth in range(1, max_depth):
pkg_name = ".".join([base_name] * depth)
expected_packages.add(pkg_name)
for name in package_suffixes:
expected_packages.add(pkg_name + "." + name)
for name in module_suffixes:
expected_modules.add(pkg_name + "." + name)
pkg_name = ".".join([base_name] * max_depth)
expected_packages.add(pkg_name)
expected_modules.add(pkg_name + ".runpy_test")
pkg_dir, mod_fname, mod_name, mod_spec = (
self._make_pkg("", max_depth))
self.addCleanup(self._del_pkg, pkg_dir, max_depth, mod_name)
for depth in range(2, max_depth+1):
self._add_relative_modules(pkg_dir, "", depth)
for finder, mod_name, ispkg in pkgutil.walk_packages([pkg_dir]):
self.assertIsInstance(finder,
importlib.machinery.FileFinder)
if ispkg:
expected_packages.remove(mod_name)
else:
expected_modules.remove(mod_name)
self.assertEqual(len(expected_packages), 0, expected_packages)
self.assertEqual(len(expected_modules), 0, expected_modules)
class RunPathTestCase(unittest.TestCase, CodeExecutionMixin):
"""Unit tests for runpy.run_path"""
def _make_test_script(self, script_dir, script_basename,
source=None, omit_suffix=False):
if source is None:
source = example_source
return make_script(script_dir, script_basename,
source, omit_suffix)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0, mod_name=None,
expect_spec=True, check_loader=True):
# First check is without run_name
def create_ns(init_globals):
return run_path(script_name, init_globals)
expected_ns = example_namespace.copy()
if mod_name is None:
spec_name = expected_name
else:
spec_name = mod_name
if expect_spec:
mod_spec = importlib.util.spec_from_file_location(spec_name,
expected_file)
mod_cached = mod_spec.cached
if not check_loader:
mod_spec.loader = None
else:
mod_spec = mod_cached = None
expected_ns.update({
"__name__": expected_name,
"__file__": expected_file,
"__cached__": mod_cached,
"__package__": "",
"__spec__": mod_spec,
"run_argv0": expected_argv0,
"run_name_in_sys_modules": True,
"module_in_sys_modules": True,
})
self.check_code_execution(create_ns, expected_ns)
# Second check makes sure run_name works in all cases
run_name = "prove.issue15230.is.fixed"
def create_ns(init_globals):
return run_path(script_name, init_globals, run_name)
if expect_spec and mod_name is None:
mod_spec = importlib.util.spec_from_file_location(run_name,
expected_file)
if not check_loader:
mod_spec.loader = None
expected_ns["__spec__"] = mod_spec
expected_ns["__name__"] = run_name
expected_ns["__package__"] = run_name.rpartition(".")[0]
self.check_code_execution(create_ns, expected_ns)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegex(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name, expect_spec=False)
def test_basic_script_no_suffix(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name,
omit_suffix=True)
self._check_script(script_name, "<run_path>", script_name,
script_name, expect_spec=False)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name, expect_spec=False)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir, mod_name=mod_name)
def test_directory_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
if not sys.dont_write_bytecode:
legacy_pyc = make_legacy_pyc(script_name)
self._check_script(script_dir, "<run_path>", legacy_pyc,
script_dir, mod_name=mod_name)
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name,
mod_name=mod_name, check_loader=False)
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = py_compile.compile(script_name, doraise=True)
zip_name, fname = make_zip_script(script_dir, 'test_zip',
compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name,
mod_name=mod_name, check_loader=False)
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
@no_tracing
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegex(RuntimeError, msg, run_path, zip_name)
def test_encoding(self):
with temp_dir() as script_dir:
filename = os.path.join(script_dir, 'script.py')
with open(filename, 'w', encoding='latin1') as f:
f.write("""
#coding:latin1
s = "non-ASCII: h\xe9"
""")
result = run_path(filename)
self.assertEqual(result['s'], "non-ASCII: h\xe9")
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
fredericlepied/ansible | lib/ansible/module_utils/gcdns.py | 187 | 2531 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <[email protected]>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
try:
from libcloud.dns.types import Provider
from libcloud.dns.providers import get_driver
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
from ansible.module_utils.gcp import gcp_connect
from ansible.module_utils.gcp import unexpected_error_msg as gcp_error
USER_AGENT_PRODUCT = "Ansible-gcdns"
USER_AGENT_VERSION = "v1"
def gcdns_connect(module, provider=None):
"""Return a GCP connection for Google Cloud DNS."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
provider = provider or Provider.GOOGLE
return gcp_connect(module, provider, get_driver, USER_AGENT_PRODUCT, USER_AGENT_VERSION)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return gcp_error(error)
| gpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/compiler/tests/variable_ops_test.py | 15 | 10467 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reading and writing variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
class VariableOpsTest(XLATestCase):
"""Test cases for resource variable operators."""
def testOneWriteOneOutput(self):
# Regression test for a bug where computations with one non-constant
# output and one variable update were mishandled.
for dtype in self.numeric_types:
init = np.array([[1, 2j], [3, 4]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
p = array_ops.placeholder(dtype)
x = v.assign_add(p)
with ops.control_dependencies([x]):
y = v.read_value()
self.assertAllClose(
np.array([[2, 1 + 2j], [4, 5]]).astype(dtype), sess.run(y, {
p: 1
}))
def testSparseRead0DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8j, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read(2)
self.assertAllClose(
np.array([8j, 9, 10, 11]).astype(dtype), sess.run(x))
def testSparseRead1DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2, 3], [4, 5, 6j, 7], [8, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([2, 1])
self.assertAllClose(
np.array([[8, 9, 10, 11], [4, 5, 6j, 7]]).astype(dtype),
sess.run(x))
def testSparseRead2DIndices(self):
for dtype in self.numeric_types:
init = np.array([[0, 1, 2j, 3], [4, 5, 6, 7], [8, 9, 10,
11]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [0, 2]])
self.assertAllClose(
np.array([[[8, 9, 10, 11], [4, 5, 6, 7]],
[[0, 1, 2j, 3], [8, 9, 10, 11]]]).astype(dtype),
sess.run(x))
def testSparseRead2DIndices3DTensor(self):
for dtype in self.numeric_types:
init = np.array([[[0, 1, 2], [3, 4, 5]], [[10, 11, 12], [13, 14, 15]],
[[20, 21, 22], [23, 24j, 25]],
[[30, 31, 32], [33, 34, 35]]]).astype(dtype)
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
sess.run(variables.variables_initializer([v]))
x = v.sparse_read([[2, 1], [3, 0]])
self.assertAllClose(
np.array(
[[[[20, 21, 22], [23, 24j, 25]], [[10, 11, 12], [13, 14, 15]]],
[[[30, 31, 32], [33, 34, 35]], [[0, 1, 2], [3, 4, 5]]]],
).astype(dtype), sess.run(x))
def testShape(self):
for dtype in self.numeric_types:
init = np.ones([2, 3]).astype(dtype)
with self.test_session() as session, self.test_scope():
v = resource_variable_ops.ResourceVariable(init)
session.run(variables.variables_initializer([v]))
h = v.handle
s32, s64 = session.run([
resource_variable_ops.variable_shape(h),
resource_variable_ops.variable_shape(h, out_type=dtypes.int64)
])
self.assertEqual(s32.dtype, np.int32)
self.assertEqual(s64.dtype, np.int64)
self.assertAllEqual(s32, [2, 3])
self.assertAllEqual(s64, [2, 3])
def testReadWrite(self):
"""Tests initialization, reading, and writing a resource variable."""
for dtype in self.numeric_types:
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
x = variable_scope.get_variable(
"x",
shape=[],
dtype=dtype,
initializer=init_ops.constant_initializer(2))
a = x.read_value()
with ops.control_dependencies([a]):
b = state_ops.assign(x, dtype(47))
with ops.control_dependencies([b]):
c = x.read_value()
with ops.control_dependencies([c]):
d = state_ops.assign_add(x, np.array(6 + 2j).astype(dtype))
with ops.control_dependencies([d]):
e = state_ops.assign_sub(x, dtype(3))
with ops.control_dependencies([e]):
f = x.read_value()
session.run(variables.global_variables_initializer())
v1, v2, v3 = session.run([a, c, f])
self.assertAllClose(dtype(2), v1)
self.assertAllClose(dtype(47), v2)
self.assertAllClose(np.array(50 + 2j).astype(dtype), v3)
def testTraining(self):
"""Tests a gradient descent step for a simple model."""
with self.test_session() as session:
with self.test_scope():
with variable_scope.variable_scope("ascope", use_resource=True):
w = variable_scope.get_variable(
"w",
shape=[4, 2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)))
b = variable_scope.get_variable(
"b",
shape=[2],
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(
np.array([2, 3], dtype=np.float32)))
x = array_ops.placeholder(dtypes.float32, shape=[1, 4])
y = math_ops.matmul(x, w) + b
loss = math_ops.reduce_sum(y)
optimizer = GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
session.run(variables.global_variables_initializer())
session.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
vw, vb = session.run([w, b])
self.assertAllClose(
np.array(
[[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]],
dtype=np.float32),
vw,
rtol=1e-4)
self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32), vb, rtol=1e-4)
class StridedSliceAssignChecker(object):
"""Compares the results of a slice assignment using Tensorflow and numpy."""
def __init__(self, test, x, dtype):
self.dtype = dtype
self.test = test
self.x_np = np.array(x).astype(dtype)
# Randomly start on mode 0 or 1.
self.which_mode = np.random.randint(2, size=1)[0]
def __setitem__(self, index, value):
self.which_mode = 1 - self.which_mode
value = np.array(value).astype(self.dtype)
with self.test.test_session() as sess, self.test.test_scope():
x = constant_op.constant(self.x_np, dtype=self.dtype)
var = resource_variable_ops.ResourceVariable(x)
sess.run(variables.variables_initializer([var]))
if self.which_mode == 0:
val = sess.run(var[index].assign(value))
else:
assert self.which_mode == 1
val = sess.run(state_ops.assign(var[index], value))
valnp = np.copy(self.x_np)
valnp[index] = np.array(value)
self.test.assertAllEqual(val, valnp)
class SliceAssignTest(XLATestCase):
def testSliceAssign(self):
for dtype in self.numeric_types:
checker = StridedSliceAssignChecker(self, [[1, 2, 3], [4, 5, 6]],
dtype=dtype)
# No-op assignment
checker[:] = [[10, 20, 30], [40, 50, 60]]
# Checks trivial (1,1) shape tensor
checker[1:2, 1:2] = [[66]]
# shrink shape changes
checker[1:2, 1] = [66]
checker[1, 1:2] = [66]
checker[1, 1] = 66
# newaxis shape changes
checker[:, None, :] = [[[10, 20, 30]], [[40, 50, 50]]]
# shrink and newaxis
checker[None, None, 0, 0:1] = [[[99]]]
# Non unit strides
checker[::1, 1::-1] = [[3, 33], [4, 44]]
# degenerate interval
checker[8:10, 0] = []
checker[8:10, 8:10] = [[]]
# Assign vector to scalar (rank-0) using newaxis
checker2 = StridedSliceAssignChecker(self, 222, dtype=dtype)
checker2[()] = 6 # no indices
checker2[...] = 6 # ellipsis
checker2[None] = [6] # new axis
def testUninitialized(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"uninitialized variable"):
with self.test_session() as sess, self.test_scope():
v = resource_variable_ops.ResourceVariable([1, 2])
sess.run(v[:].assign([1, 2]))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
tealover/nova | nova/tests/unit/api/openstack/compute/test_api.py | 26 | 5850 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six
import webob
import webob.dec
import webob.exc
from nova.api import openstack as openstack_api
from nova.api.openstack import wsgi
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
class APITest(test.NoDBTestCase):
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
return openstack_api.FaultWrapper(inner_app)
def test_malformed_json(self):
req = webob.Request.blank('/')
req.method = 'POST'
req.body = '{'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_malformed_xml(self):
req = webob.Request.blank('/')
req.method = 'POST'
req.body = '<hi im not xml>'
req.headers["content-type"] = "application/xml"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_vendor_content_type_json(self):
ctype = 'application/vnd.openstack.compute+json'
req = webob.Request.blank('/')
req.headers['Accept'] = ctype
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, ctype)
jsonutils.loads(res.body)
def test_exceptions_are_converted_to_faults_webob_exc(self):
@webob.dec.wsgify
def raise_webob_exc(req):
raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
# api.application = raise_webob_exc
api = self._wsgi_app(raise_webob_exc)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(resp.status_int, 404, resp.body)
def test_exceptions_are_converted_to_faults_api_fault(self):
@webob.dec.wsgify
def raise_api_fault(req):
exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
return wsgi.Fault(exc)
# api.application = raise_api_fault
api = self._wsgi_app(raise_api_fault)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('itemNotFound', resp.body)
self.assertEqual(resp.status_int, 404, resp.body)
def test_exceptions_are_converted_to_faults_exception(self):
@webob.dec.wsgify
def fail(req):
raise Exception("Threw an exception")
# api.application = fail
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.NovaException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, resp.body)
self.assertEqual(resp.status_int, 500, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, resp.body)
self.assertEqual(resp.status_int, exception_type.code, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in six.iteritems(exception_type.headers):
self.assertIn(key, resp.headers)
self.assertEqual(resp.headers[key], str(value))
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_nova_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_nova_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
| apache-2.0 |
hrjn/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
thanhacun/odoo | addons/report/controllers/main.py | 210 | 6943 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.web.http import Controller, route, request
from openerp.addons.web.controllers.main import _serialize_exception
from openerp.osv import osv
from openerp.tools import html_escape
import simplejson
from werkzeug import exceptions, url_decode
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from werkzeug.datastructures import Headers
from reportlab.graphics.barcode import createBarcodeDrawing
class ReportController(Controller):
#------------------------------------------------------
# Report controllers
#------------------------------------------------------
@route([
'/report/<path:converter>/<reportname>',
'/report/<path:converter>/<reportname>/<docids>',
], type='http', auth='user', website=True)
def report_routes(self, reportname, docids=None, converter=None, **data):
report_obj = request.registry['report']
cr, uid, context = request.cr, request.uid, request.context
if docids:
docids = [int(i) for i in docids.split(',')]
options_data = None
if data.get('options'):
options_data = simplejson.loads(data['options'])
if data.get('context'):
# Ignore 'lang' here, because the context in data is the one from the webclient *but* if
# the user explicitely wants to change the lang, this mechanism overwrites it.
data_context = simplejson.loads(data['context'])
if data_context.get('lang'):
del data_context['lang']
context.update(data_context)
if converter == 'html':
html = report_obj.get_html(cr, uid, docids, reportname, data=options_data, context=context)
return request.make_response(html)
elif converter == 'pdf':
pdf = report_obj.get_pdf(cr, uid, docids, reportname, data=options_data, context=context)
pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]
return request.make_response(pdf, headers=pdfhttpheaders)
else:
raise exceptions.HTTPException(description='Converter %s not implemented.' % converter)
#------------------------------------------------------
# Misc. route utils
#------------------------------------------------------
@route(['/report/barcode', '/report/barcode/<type>/<path:value>'], type='http', auth="user")
def report_barcode(self, type, value, width=600, height=100, humanreadable=0):
"""Contoller able to render barcode images thanks to reportlab.
Samples:
<img t-att-src="'/report/barcode/QR/%s' % o.name"/>
<img t-att-src="'/report/barcode/?type=%s&value=%s&width=%s&height=%s' %
('QR', o.name, 200, 200)"/>
:param type: Accepted types: 'Codabar', 'Code11', 'Code128', 'EAN13', 'EAN8', 'Extended39',
'Extended93', 'FIM', 'I2of5', 'MSI', 'POSTNET', 'QR', 'Standard39', 'Standard93',
'UPCA', 'USPS_4State'
:param humanreadable: Accepted values: 0 (default) or 1. 1 will insert the readable value
at the bottom of the output image
"""
try:
width, height, humanreadable = int(width), int(height), bool(humanreadable)
barcode = createBarcodeDrawing(
type, value=value, format='png', width=width, height=height,
humanReadable = humanreadable
)
barcode = barcode.asString('png')
except (ValueError, AttributeError):
raise exceptions.HTTPException(description='Cannot convert into barcode.')
return request.make_response(barcode, headers=[('Content-Type', 'image/png')])
@route(['/report/download'], type='http', auth="user")
def report_download(self, data, token):
"""This function is used by 'qwebactionmanager.js' in order to trigger the download of
a pdf/controller report.
:param data: a javascript array JSON.stringified containg report internal url ([0]) and
type [1]
:returns: Response with a filetoken cookie and an attachment header
"""
requestcontent = simplejson.loads(data)
url, type = requestcontent[0], requestcontent[1]
try:
if type == 'qweb-pdf':
reportname = url.split('/report/pdf/')[1].split('?')[0]
docids = None
if '/' in reportname:
reportname, docids = reportname.split('/')
if docids:
# Generic report:
response = self.report_routes(reportname, docids=docids, converter='pdf')
else:
# Particular report:
data = url_decode(url.split('?')[1]).items() # decoding the args represented in JSON
response = self.report_routes(reportname, converter='pdf', **dict(data))
response.headers.add('Content-Disposition', 'attachment; filename=%s.pdf;' % reportname)
response.set_cookie('fileToken', token)
return response
elif type =='controller':
reqheaders = Headers(request.httprequest.headers)
response = Client(request.httprequest.app, BaseResponse).get(url, headers=reqheaders, follow_redirects=True)
response.set_cookie('fileToken', token)
return response
else:
return
except Exception, e:
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return request.make_response(html_escape(simplejson.dumps(error)))
@route(['/report/check_wkhtmltopdf'], type='json', auth="user")
def check_wkhtmltopdf(self):
return request.registry['report']._check_wkhtmltopdf()
| agpl-3.0 |
grupoprog3/proyecto_final | Entrega Final/flask/Lib/encodings/mac_latin2.py | 219 | 14118 | """ Python Character Mapping Codec mac_latin2 generated from 'MAPPINGS/VENDORS/MICSFT/MAC/LATIN2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-latin2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
bluewish/tiny4412-linux-3.5 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
surtrstudios/gtest | scripts/pump.py | 2471 | 23673 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| bsd-3-clause |
liukaijv/flask | flask/testing.py | 150 | 5081 | # -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs):
"""Creates a new test builder with some application defaults thrown in."""
http_host = app.config.get('SERVER_NAME')
app_root = app.config.get('APPLICATION_ROOT')
if base_url is None:
url = url_parse(path)
base_url = 'http://%s/' % (url.netloc or http_host or 'localhost')
if app_root:
base_url += app_root.lstrip('/')
if url.netloc:
path = url.path
if url.query:
path += '?' + url.query
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a with body when used in a with statement. For general information
about how to use this class refer to :class:`werkzeug.test.Client`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a with statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the with block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
sess = app.open_session(c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not app.session_interface.is_null_session(sess):
app.save_session(sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.