repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/future/utils/surrogateescape.py | 59 | 6084 | """
This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error
handler of Python 3.
Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc
"""
# This code is released under the Python license and the BSD 2-clause license
import codecs
import sys
from future import utils
FS_ERRORS = 'surrogateescape'
# # -- Python 2/3 compatibility -------------------------------------
# FS_ERRORS = 'my_surrogateescape'
def u(text):
if utils.PY3:
return text
else:
return text.decode('unicode_escape')
def b(data):
if utils.PY3:
return data.encode('latin1')
else:
return data
if utils.PY3:
_unichr = chr
bytes_chr = lambda code: bytes((code,))
else:
_unichr = unichr
bytes_chr = chr
def surrogateescape_handler(exc):
"""
Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode
character U+DCxx on decoding, and these are translated into the
original bytes on encoding.
"""
mystring = exc.object[exc.start:exc.end]
try:
if isinstance(exc, UnicodeDecodeError):
# mystring is a byte-string in this case
decoded = replace_surrogate_decode(mystring)
elif isinstance(exc, UnicodeEncodeError):
# In the case of u'\udcc3'.encode('ascii',
# 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an
# exception anyway after this function is called, even though I think
# it's doing what it should. It seems that the strict encoder is called
# to encode the unicode string that this function returns ...
decoded = replace_surrogate_encode(mystring)
else:
raise exc
except NotASurrogateError:
raise exc
return (decoded, exc.end)
class NotASurrogateError(Exception):
pass
def replace_surrogate_encode(mystring):
"""
Returns a (unicode) string, not the more logical bytes, because the codecs
register_error functionality expects this.
"""
decoded = []
for ch in mystring:
# if utils.PY3:
# code = ch
# else:
code = ord(ch)
# The following magic comes from Py3.3's Python/codecs.c file:
if not 0xD800 <= code <= 0xDCFF:
# Not a surrogate. Fail with the original exception.
raise exc
# mybytes = [0xe0 | (code >> 12),
# 0x80 | ((code >> 6) & 0x3f),
# 0x80 | (code & 0x3f)]
# Is this a good idea?
if 0xDC00 <= code <= 0xDC7F:
decoded.append(_unichr(code - 0xDC00))
elif code <= 0xDCFF:
decoded.append(_unichr(code - 0xDC00))
else:
raise NotASurrogateError
return str().join(decoded)
def replace_surrogate_decode(mybytes):
"""
Returns a (unicode) string
"""
decoded = []
for ch in mybytes:
# We may be parsing newbytes (in which case ch is an int) or a native
# str on Py2
if isinstance(ch, int):
code = ch
else:
code = ord(ch)
if 0x80 <= code <= 0xFF:
decoded.append(_unichr(0xDC00 + code))
elif code <= 0x7F:
decoded.append(_unichr(code))
else:
# # It may be a bad byte
# # Try swallowing it.
# continue
# print("RAISE!")
raise NotASurrogateError
return str().join(decoded)
def encodefilename(fn):
if FS_ENCODING == 'ascii':
# ASCII encoder of Python 2 expects that the error handler returns a
# Unicode string encodable to ASCII, whereas our surrogateescape error
# handler has to return bytes in 0x80-0xFF range.
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if code < 128:
ch = bytes_chr(code)
elif 0xDC80 <= code <= 0xDCFF:
ch = bytes_chr(code - 0xDC00)
else:
raise UnicodeEncodeError(FS_ENCODING,
fn, index, index+1,
'ordinal not in range(128)')
encoded.append(ch)
return bytes().join(encoded)
elif FS_ENCODING == 'utf-8':
# UTF-8 encoder of Python 2 encodes surrogates, so U+DC80-U+DCFF
# doesn't go through our error handler
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if 0xD800 <= code <= 0xDFFF:
if 0xDC80 <= code <= 0xDCFF:
ch = bytes_chr(code - 0xDC00)
encoded.append(ch)
else:
raise UnicodeEncodeError(
FS_ENCODING,
fn, index, index+1, 'surrogates not allowed')
else:
ch_utf8 = ch.encode('utf-8')
encoded.append(ch_utf8)
return bytes().join(encoded)
else:
return fn.encode(FS_ENCODING, FS_ERRORS)
def decodefilename(fn):
return fn.decode(FS_ENCODING, FS_ERRORS)
FS_ENCODING = 'ascii'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]')
# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
# normalize the filesystem encoding name.
# For example, we expect "utf-8", not "UTF8".
FS_ENCODING = codecs.lookup(FS_ENCODING).name
def register_surrogateescape():
"""
Registers the surrogateescape error handler on Python 2 (only)
"""
if utils.PY3:
return
try:
codecs.lookup_error(FS_ERRORS)
except LookupError:
codecs.register_error(FS_ERRORS, surrogateescape_handler)
if __name__ == '__main__':
pass
# # Tests:
# register_surrogateescape()
# b = decodefilename(fn)
# assert b == encoded, "%r != %r" % (b, encoded)
# c = encodefilename(b)
# assert c == fn, '%r != %r' % (c, fn)
# # print("ok")
| mit |
peterbe/grymt | grymt.py | 1 | 13942 | #!/usr/bin/env python
import codecs
import datetime
import hashlib
import os
import re
import shutil
import subprocess
import cssmin
import jsmin
build_inline_regex = re.compile(
'(<\!--\s*build:(include)\s+([\w\$\-\./]*)\s*-->)',
re.MULTILINE | re.DOTALL
)
build_regex = re.compile(
'(<\!--\s*build:(\w+)\s+([\w\$\-\./]*)\s*-->(.*?)<\!--\s*endbuild\s-->)',
re.MULTILINE | re.DOTALL
)
src_regex = re.compile('src=["\']([^"\']+)["\']')
href_regex = re.compile('href=["\']([^"\']+)["\']')
html_comment_regex = re.compile('<\!--.*?-->', re.MULTILINE | re.DOTALL)
def _find_html_pages(source):
paths = []
for each in os.listdir(source):
path = os.path.join(source, each)
if os.path.isdir(path) and not each.startswith('.'):
paths.extend(_find_html_pages(path))
elif os.path.isfile(path) and path.endswith('.html'):
paths.append(path)
return paths
def read(path):
with codecs.open(path, 'r', 'utf-8') as f:
return f.read()
def write(path, content):
mkdir(os.path.dirname(path))
with codecs.open(path, 'w', 'utf-8') as f:
f.write(content)
def mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
return
if os.path.isfile(newdir):
raise OSError("a file with the same name as the desired "
"dir, '%s', already exists." % newdir)
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir(head)
if tail:
os.mkdir(newdir)
def get_git_revision(short=False):
sha = subprocess.check_output('git rev-parse HEAD'.split()).strip()
if short:
sha = sha[:10]
return sha
def already_minified(filename):
for part in ('-min-', '-min.', '.min.', '.minified.', '.pack.', '-jsmin.'):
if part in filename:
return True
return False
def hash_all_css_images(css_code, rel_dir, source_dir, dest_dir):
def replacer(match):
filename = match.groups()[0]
if (filename.startswith('"') and filename.endswith('"')) or \
(filename.startswith("'") and filename.endswith("'")):
filename = filename[1:-1]
if 'data:image' in filename or filename.startswith('http://'):
return 'url("%s")' % filename
if filename == '.':
# this is a known IE hack in CSS
return 'url(".")'
# It's really quite common that the CSS file refers to the file
# that doesn't exist because if you refer to an image in CSS for
# a selector you never use you simply don't suffer.
# That's why we say not to warn on nonexisting files
new_filename = filename
full_path = os.path.abspath(os.path.join(rel_dir, filename))
if os.path.isfile(full_path):
hash = hashlib.md5(open(full_path, 'rb').read()).hexdigest()[:10]
a, b = os.path.splitext(filename)
new_filename = '%s-%s%s' % (a, hash, b)
new_filename = os.path.basename(new_filename)
new_filepath = os.path.abspath(os.path.join(dest_dir, new_filename))
mkdir(os.path.dirname(new_filepath))
shutil.copyfile(full_path, new_filepath)
return match.group().replace(filename, new_filename)
_regex = re.compile('url\(([^\)]+)\)')
css_code = _regex.sub(replacer, css_code)
return css_code
def minify_javascript(code):
try:
p = subprocess.Popen(
['uglifyjs'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate(input=code.encode('utf-8'))
return stdout
except OSError:
return jsmin.jsmin(code)
class Page(object):
def __init__(self, path, source_directory, output_directory,
compress_js=True, compress_css=True,
inline_js=False, inline_css=True,
remove_html_comments=False,
git_revision=None):
self.path = path
self.source_directory = source_directory
if not output_directory.endswith('/'):
output_directory += '/'
self.output_directory = output_directory
self.compress_js = compress_js
self.compress_css = compress_css
self.inline_js = inline_js
self.inline_css = inline_css
self.remove_html_comments = remove_html_comments
self.processed_files = [path]
self.git_revision = git_revision
def _parse_html(self):
content = read(self.path)
for whole, type_, parameter in build_inline_regex.findall(content):
if type_ == 'include':
if parameter.startswith('/'):
parameter = parameter[1:]
file_path = os.path.join(os.path.dirname(self.path), parameter)
with open(file_path) as f:
content = content.replace(whole, f.read())
else:
raise NotImplementedError(type_)
for whole, type_, destination_name, bulk in build_regex.findall(content):
if type_ == 'remove':
content = content.replace(whole, '')
continue
else:
output_directory = self.output_directory
destination_name_dir = os.path.dirname(destination_name)
if destination_name_dir.startswith('/'):
destination_name_dir = destination_name_dir[1:]
output_directory = os.path.join(
output_directory,
destination_name_dir
)
combined = []
template = None
if type_ == 'js':
if self.inline_js:
output_directory = self.output_directory
for src in src_regex.findall(bulk):
if src.startswith('/'):
path = self.source_directory + src
else:
path = os.path.join(self.source_directory, src)
this_content = read(path)
self.processed_files.append(path)
if not already_minified(os.path.basename(path)):
this_content = minify_javascript(this_content)
combined.append('/* %s */' % src)
combined.append(this_content.strip())
if self.inline_js:
template = '<script>%s</script>'
else:
tag_template = '<script src="%s"></script>'
elif type_ == 'css':
if self.inline_css:
output_directory = self.output_directory
for href in href_regex.findall(bulk):
if href.startswith('/'):
path = self.source_directory + href
else:
path = os.path.join(self.source_directory, href)
this_content = read(path)
this_content = hash_all_css_images(
this_content,
os.path.dirname(path),
self.source_directory,
output_directory
)
self.processed_files.append(path)
if not already_minified(os.path.basename(path)):
this_content = cssmin.cssmin(this_content)
combined.append('/* %s */' % href)
combined.append(this_content.strip())
if self.inline_css:
template = '<style>%s</style>'
else:
tag_template = '<link rel="stylesheet" href="%s">'
combined.append('') # so it ends with a newline
combined = '\n'.join(
[unicode(x, 'utf-8') if isinstance(x, str) else x
for x in combined]
)
if template:
content = content.replace(
whole,
template % combined
)
else:
if '$hash' in destination_name:
destination_name = destination_name.replace(
'$hash',
hashlib.md5(combined.encode('utf-8')).hexdigest()[:7]
)
if '$date' in destination_name:
destination_name = destination_name.replace(
'$date',
datetime.datetime.utcnow().strftime('%Y-%m-%d')
)
if destination_name.startswith('/'):
destination_name = destination_name[1:]
destination_path = os.path.join(
self.output_directory,
destination_name
)
write(destination_path, combined)
remove = self.output_directory
if remove.endswith('/'):
remove = remove[:-1]
destination_path = destination_path.replace(remove, '')
content = content.replace(
whole,
tag_template % destination_path
)
if self.remove_html_comments:
def comment_replacer(match):
group = match.group()
beginning = group[len('<!--'):].strip()
if beginning.startswith('!'):
return group.replace('<!--!', '<!--')
return ''
content = html_comment_regex.sub(comment_replacer, content)
else:
content = content.replace('<!--!', '<!--')
if '$git_revision_short' in content:
content = content.replace(
'$git_revision_short',
self.get_git_revision(short=True)
)
if '$git_revision' in content:
content = content.replace(
'$git_revision',
self.get_git_revision()
)
return content
def parse(self):
new_content = self._parse_html()
out_path = self.path.replace(
self.source_directory,
self.output_directory
)
write(out_path, new_content)
def get_git_revision(self, short=False):
if self.git_revision:
if short:
return self.git_revision[:10]
else:
return self.git_revision
else:
return get_git_revision(short=short)
def copy_files(source, dest, processed_files):
for each in os.listdir(source):
path = os.path.join(source, each)
if os.path.isdir(path):
copy_files(
path,
os.path.join(dest, each),
processed_files
)
elif each.endswith('~'):
pass
elif path not in processed_files:
mkdir(dest)
shutil.copyfile(path, os.path.join(dest, each))
def run(
source_directory,
output_directory='./dist',
wipe_first=False,
inline_js=False,
inline_css=False,
remove_html_comments=False,
git_revision=None,
):
if wipe_first:
assert output_directory not in source_directory
if os.path.isdir(output_directory):
shutil.rmtree(output_directory)
processed_files = []
if not source_directory:
raise ValueError("no directory to read from set")
if not os.path.isdir(source_directory):
raise IOError('%s is not a directory' % source_directory)
for html_file in _find_html_pages(source_directory):
page = Page(
html_file,
source_directory,
output_directory,
inline_js=inline_js,
inline_css=inline_css,
remove_html_comments=remove_html_comments,
git_revision=git_revision,
)
page.parse()
processed_files.extend(page.processed_files)
copy_files(source_directory, output_directory, processed_files)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'source_directory',
help='Where the raw stuff is',
)
parser.add_argument(
'-o',
'--output-directory',
help='Where the generated stuff goes (default ./dist)',
default='./dist',
)
parser.add_argument(
'-w',
'--wipe-first',
help='Clear output directory first',
default=False,
dest='wipe_first',
action='store_true'
)
parser.add_argument(
'--remove-html-comments',
help='Removes all HTML comments',
default=False,
dest='remove_html_comments',
action='store_true'
)
parser.add_argument(
'--inline-css',
help='Make all CSS inline',
default=False,
dest='inline_css',
action='store_true'
)
parser.add_argument(
'--inline-js',
help='Make all JS inline',
default=False,
dest='inline_js',
action='store_true'
)
parser.add_argument(
'--git-revision',
help='Known git revision sha to use',
default='',
)
args = parser.parse_args()
return run(
source_directory=args.source_directory,
output_directory=args.output_directory,
wipe_first=args.wipe_first,
inline_js=args.inline_js,
inline_css=args.inline_css,
remove_html_comments=args.remove_html_comments,
git_revision=args.git_revision,
)
if __name__ == '__main__':
import sys
sys.exit(main())
| mpl-2.0 |
carnell69/kuma | vendor/packages/pyflakes/messages.py | 34 | 3741 | """
Provide the class Message and its subclasses.
"""
class Message(object):
message = ''
message_args = ()
def __init__(self, filename, loc):
self.filename = filename
self.lineno = loc.lineno
self.col = getattr(loc, 'col_offset', 0)
def __str__(self):
return '%s:%s: %s' % (self.filename, self.lineno,
self.message % self.message_args)
class UnusedImport(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class RedefinedWhileUnused(Message):
message = 'redefinition of unused %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class RedefinedInListComp(Message):
message = 'list comprehension redefines %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportShadowedByLoopVar(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportStarUsed(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class UndefinedName(Message):
message = 'undefined name %r'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class DoctestSyntaxError(Message):
message = 'syntax error in doctest'
def __init__(self, filename, loc, position=None):
Message.__init__(self, filename, loc)
if position:
(self.lineno, self.col) = position
self.message_args = ()
class UndefinedExport(Message):
message = 'undefined name %r in __all__'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UndefinedLocal(Message):
message = ('local variable %r (defined in enclosing scope on line %r) '
'referenced before assignment')
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class DuplicateArgument(Message):
message = 'duplicate argument %r in function definition'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class LateFutureImport(Message):
message = 'future import(s) %r after other statements'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class UnusedVariable(Message):
"""
Indicates that a variable has been explicity assigned to but not actually
used.
"""
message = 'local variable %r is assigned to but never used'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class ReturnWithArgsInsideGenerator(Message):
"""
Indicates a return statement with arguments inside a generator.
"""
message = '\'return\' with argument inside generator'
class ReturnOutsideFunction(Message):
"""
Indicates a return statement outside of a function/method.
"""
message = '\'return\' outside function'
| mpl-2.0 |
NeovaHealth/odoo | openerp/service/wsgi_server.py | 335 | 9490 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
WSGI stack, common code.
"""
import httplib
import urllib
import xmlrpclib
import StringIO
import errno
import logging
import platform
import socket
import sys
import threading
import traceback
import werkzeug.serving
import werkzeug.contrib.fixers
import openerp
import openerp.tools.config as config
import websrv_lib
_logger = logging.getLogger(__name__)
# XML-RPC fault codes. Some care must be taken when changing these: the
# constants are also defined client-side and must remain in sync.
# User code must use the exceptions defined in ``openerp.exceptions`` (not
# create directly ``xmlrpclib.Fault`` objects).
RPC_FAULT_CODE_CLIENT_ERROR = 1 # indistinguishable from app. error.
RPC_FAULT_CODE_APPLICATION_ERROR = 1
RPC_FAULT_CODE_WARNING = 2
RPC_FAULT_CODE_ACCESS_DENIED = 3
RPC_FAULT_CODE_ACCESS_ERROR = 4
def xmlrpc_return(start_response, service, method, params, string_faultcode=False):
"""
Helper to call a service's method with some params, using a wsgi-supplied
``start_response`` callback.
This is the place to look at to see the mapping between core exceptions
and XML-RPC fault codes.
"""
# Map OpenERP core exceptions to XML-RPC fault codes. Specific exceptions
# defined in ``openerp.exceptions`` are mapped to specific fault codes;
# all the other exceptions are mapped to the generic
# RPC_FAULT_CODE_APPLICATION_ERROR value.
# This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
# exception handling.
try:
result = openerp.http.dispatch_rpc(service, method, params)
response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
except Exception, e:
if string_faultcode:
response = xmlrpc_handle_exception_string(e)
else:
response = xmlrpc_handle_exception_int(e)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def xmlrpc_handle_exception_int(e):
if isinstance(e, openerp.osv.orm.except_orm): # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.Warning) or isinstance(e, openerp.exceptions.RedirectWarning):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance (e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
info = sys.exc_info()
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def xmlrpc_handle_exception_string(e):
if isinstance(e, openerp.osv.orm.except_orm):
fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.Warning):
fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault('AccessDenied', str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
info = sys.exc_info()
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def wsgi_xmlrpc(environ, start_response):
""" Two routes are available for XML-RPC
/xmlrpc/<service> route returns faultCode as strings. This is a historic
violation of the protocol kept for compatibility.
/xmlrpc/2/<service> is a new route that returns faultCode as int and is
therefore fully compliant.
"""
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith('/xmlrpc/'):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# Distinguish betweed the 2 faultCode modes
string_faultcode = True
if environ['PATH_INFO'].startswith('/xmlrpc/2/'):
service = environ['PATH_INFO'][len('/xmlrpc/2/'):]
string_faultcode = False
else:
service = environ['PATH_INFO'][len('/xmlrpc/'):]
params, method = xmlrpclib.loads(data)
return xmlrpc_return(start_response, service, method, params, string_faultcode)
# WSGI handlers registered through the register_wsgi_handler() function below.
module_handlers = []
# RPC endpoints registered through the register_rpc_endpoint() function below.
rpc_handlers = {}
def register_wsgi_handler(handler):
""" Register a WSGI handler.
Handlers are tried in the order they are added. We might provide a way to
register a handler for specific routes later.
"""
module_handlers.append(handler)
def register_rpc_endpoint(endpoint, handler):
""" Register a handler for a given RPC enpoint.
"""
rpc_handlers[endpoint] = handler
def application_unproxied(environ, start_response):
""" WSGI entry point."""
# cleanup db/uid trackers - they're set at HTTP dispatch in
# web.session.OpenERPSession.send() and at RPC dispatch in
# openerp.service.web_services.objects_proxy.dispatch().
# /!\ The cleanup cannot be done at the end of this `application`
# method because werkzeug still produces relevant logging afterwards
if hasattr(threading.current_thread(), 'uid'):
del threading.current_thread().uid
if hasattr(threading.current_thread(), 'dbname'):
del threading.current_thread().dbname
with openerp.api.Environment.manage():
# Try all handlers until one returns some result (i.e. not None).
wsgi_handlers = [wsgi_xmlrpc]
wsgi_handlers += module_handlers
for handler in wsgi_handlers:
result = handler(environ, start_response)
if result is None:
continue
return result
# We never returned from the loop.
response = 'No handler found.\n'
start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))])
return [response]
def application(environ, start_response):
if config['proxy_mode'] and 'HTTP_X_FORWARDED_HOST' in environ:
return werkzeug.contrib.fixers.ProxyFix(application_unproxied)(environ, start_response)
else:
return application_unproxied(environ, start_response)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
arista-eosplus/ansible-modules-extras | cloud/amazon/ec2_win_password.py | 62 | 4838 | #!/usr/bin/python
DOCUMENTATION = '''
---
module: ec2_win_password
short_description: gets the default administrator password for ec2 windows instances
description:
- Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
version_added: "2.0"
author: "Rick Mendes (@rickmendes)"
options:
instance_id:
description:
- The instance id to get the password data from.
required: true
key_file:
description:
- Path to the file containing the key pair used on the instance.
required: true
key_passphrase:
version_added: "2.0"
description:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
region:
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
wait:
version_added: "2.0"
description:
- Whether or not to wait for the password to be available before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
version_added: "2.0"
description:
- Number of seconds to wait before giving up.
required: false
default: 120
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Example of getting a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
# Example of getting a password with a password protected key
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_protected_test_key.pem"
key_passphrase: "secret"
# Example of waiting for a password
tasks:
- name: get the Administrator password
ec2_win_password:
profile: my-boto-profile
instance_id: i-XXXXXX
region: us-east-1
key_file: "~/aws-creds/my_test_key.pem"
wait: yes
wait_timeout: 45
'''
from base64 import b64decode
from os.path import expanduser
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import datetime
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=True),
key_file = dict(required=True),
key_passphrase = dict(no_log=True, default=None, required=False),
wait = dict(type='bool', default=False, required=False),
wait_timeout = dict(default=120, required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='Boto required for this module.')
instance_id = module.params.get('instance_id')
key_file = expanduser(module.params.get('key_file'))
key_passphrase = module.params.get('key_passphrase')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
ec2 = ec2_connect(module)
if wait:
start = datetime.datetime.now()
end = start + datetime.timedelta(seconds=wait_timeout)
while datetime.datetime.now() < end:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and not decoded:
time.sleep(5)
else:
break
else:
data = ec2.get_password_data(instance_id)
decoded = b64decode(data)
if wait and datetime.datetime.now() >= end:
module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
f = open(key_file, 'r')
key = RSA.importKey(f.read(), key_passphrase)
cipher = PKCS1_v1_5.new(key)
sentinel = 'password decryption failed!!!'
try:
decrypted = cipher.decrypt(decoded, sentinel)
except ValueError as e:
decrypted = None
if decrypted == None:
module.exit_json(win_password='', changed=False)
else:
if wait:
elapsed = datetime.datetime.now() - start
module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
else:
module.exit_json(win_password=decrypted, changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
pratapvardhan/pandas | asv_bench/benchmarks/reshape.py | 3 | 3829 | from itertools import product
import numpy as np
from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long
from .pandas_vb_common import setup # noqa
class Melt(object):
goal_time = 0.2
def setup(self):
self.df = DataFrame(np.random.randn(10000, 3), columns=['A', 'B', 'C'])
self.df['id1'] = np.random.randint(0, 10, 10000)
self.df['id2'] = np.random.randint(100, 1000, 10000)
def time_melt_dataframe(self):
melt(self.df, id_vars=['id1', 'id2'])
class Pivot(object):
goal_time = 0.2
def setup(self):
N = 10000
index = date_range('1/1/2000', periods=N, freq='h')
data = {'value': np.random.randn(N * 50),
'variable': np.arange(50).repeat(N),
'date': np.tile(index.values, 50)}
self.df = DataFrame(data)
def time_reshape_pivot_time_series(self):
self.df.pivot('date', 'variable', 'value')
class SimpleReshape(object):
goal_time = 0.2
def setup(self):
arrays = [np.arange(100).repeat(100),
np.roll(np.tile(np.arange(100), 100), 25)]
index = MultiIndex.from_arrays(arrays)
self.df = DataFrame(np.random.randn(10000, 4), index=index)
self.udf = self.df.unstack(1)
def time_stack(self):
self.udf.stack()
def time_unstack(self):
self.df.unstack(1)
class Unstack(object):
goal_time = 0.2
def setup(self):
m = 100
n = 1000
levels = np.arange(m)
index = MultiIndex.from_product([levels] * 2)
columns = np.arange(n)
values = np.arange(m * m * n).reshape(m * m, n)
self.df = DataFrame(values, index, columns)
self.df2 = self.df.iloc[:-1]
def time_full_product(self):
self.df.unstack()
def time_without_last_row(self):
self.df2.unstack()
class SparseIndex(object):
goal_time = 0.2
def setup(self):
NUM_ROWS = 1000
self.df = DataFrame({'A': np.random.randint(50, size=NUM_ROWS),
'B': np.random.randint(50, size=NUM_ROWS),
'C': np.random.randint(-10, 10, size=NUM_ROWS),
'D': np.random.randint(-10, 10, size=NUM_ROWS),
'E': np.random.randint(10, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
self.df = self.df.set_index(['A', 'B', 'C', 'D', 'E'])
def time_unstack(self):
self.df.unstack()
class WideToLong(object):
goal_time = 0.2
def setup(self):
nyrs = 20
nidvars = 20
N = 5000
self.letters = list('ABCD')
yrvars = [l + str(num)
for l, num in product(self.letters, range(1, nyrs + 1))]
columns = [str(i) for i in range(nidvars)] + yrvars
self.df = DataFrame(np.random.randn(N, nidvars + len(yrvars)),
columns=columns)
self.df['id'] = self.df.index
def time_wide_to_long_big(self):
wide_to_long(self.df, self.letters, i='id', j='year')
class PivotTable(object):
goal_time = 0.2
def setup(self):
N = 100000
fac1 = np.array(['A', 'B', 'C'], dtype='O')
fac2 = np.array(['one', 'two'], dtype='O')
ind1 = np.random.randint(0, 3, size=N)
ind2 = np.random.randint(0, 2, size=N)
self.df = DataFrame({'key1': fac1.take(ind1),
'key2': fac2.take(ind2),
'key3': fac2.take(ind2),
'value1': np.random.randn(N),
'value2': np.random.randn(N),
'value3': np.random.randn(N)})
def time_pivot_table(self):
self.df.pivot_table(index='key1', columns=['key2', 'key3'])
| bsd-3-clause |
carmine/open-kilda | services/topology-engine/queue-engine/tests/smoke-tests/deploy-flow-rules.py | 2 | 1675 | #!/usr/bin/env python
# Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
from base64 import b64encode
url = "http://localhost:8088/api/v1/flows"
headers = {
'Content-Type': 'application/json',
'correlation_id': 'deploy-flow-1',
'Authorization': 'Basic %s' % b64encode(b"kilda:kilda").decode("ascii")
}
#
# This models one of the first flows used by ATDD. It sends the request to teh NB API so that
# kilda will construct the flow path rules.
# TODO: would be better to pull from the same data, ensure code bases on synchronized..
# at the moment, this is hardcoded here, and ATDD has a separate source.
#
j_data = {"flowid":"c3none",
"source":
{"switch-id":"de:ad:be:ef:00:00:00:02", "port-id":1, "vlan-id":0},
"destination":
{"switch-id":"de:ad:be:ef:00:00:00:04", "port-id":2, "vlan-id":0},
"maximum-bandwidth":10000,
"description":"c3none",
"last-updated":"null"}
result = requests.put(url, json=j_data, headers=headers)
print result.status_code
print result.text
| apache-2.0 |
scripnichenko/nova | nova/tests/unit/keymgr/test_not_implemented_key_mgr.py | 79 | 1697 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test cases for the not implemented key manager.
"""
from nova.keymgr import not_implemented_key_mgr
from nova.tests.unit.keymgr import test_key_mgr
class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
def _create_key_manager(self):
return not_implemented_key_mgr.NotImplementedKeyManager()
def test_create_key(self):
self.assertRaises(NotImplementedError,
self.key_mgr.create_key, None)
def test_store_key(self):
self.assertRaises(NotImplementedError,
self.key_mgr.store_key, None, None)
def test_copy_key(self):
self.assertRaises(NotImplementedError,
self.key_mgr.copy_key, None, None)
def test_get_key(self):
self.assertRaises(NotImplementedError,
self.key_mgr.get_key, None, None)
def test_delete_key(self):
self.assertRaises(NotImplementedError,
self.key_mgr.delete_key, None, None)
| apache-2.0 |
anthonyfok/frescobaldi | frescobaldi_app/toplevel.py | 5 | 1156 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Importing this module makes all other modules and packages inside this
package available as toplevel modules and packages.
"""
import os
import sys
def install():
from . import __path__ as path
sys.path[0:0] = map(os.path.abspath, path)
del path[:]
| gpl-2.0 |
currychou/1 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/SDL.py | 603 | 1813 | from browser import document
SDL_INIT_VIDEO=0
SDL_GL_DOUBLEBUFFER=1
SDL_GL_DEPTH_SIZE=2
SDL_DOUBLEBUF=3
SDL_ANYFORMAT=4
SDL_ACTIVEEVENT=5
SDL_ALLEVENTS=5
SDL_KEYDOWN=6
SDL_KEYUP=7
SDL_MOUSEMOTION=8
SDL_MOUSEBUTTONDOWN=9
SDL_MOUSEBUTTONUP=10
SDL_JOYAXISMOTION=11
SDL_JOYBALLMOTION=12
SDL_JOYHATMOTION=13
SDL_JOYBUTTONUP=14
SDL_JOYBUTTONDOWN=15
SDL_QUIT=16
SDL_SYSWMEVENT=17
SDL_VIDEORESIZE=18
SDL_VIDEOEXPOSE=19
SDL_NOEVENT=20
SDL_GETEVENT=21
SDL_OPENGL=False
def SDL_WasInit(var):
return True
_attrs={}
_wm={}
def SDL_PeepEvents(num, event, mask):
pass
def SDL_GL_SetAttribute(variable, value):
_attrs[variable]=value
def SDL_GL_GetAttribute(variable):
return _attrs.getvalue(variable, None)
def SDL_GL_SetVideoMode(width, height, depth, flags):
pass
def SDL_WM_SetCaption(title, icontitle):
_wm['title']=title
_wm['icontitle']=icontitle
def SDL_PumpEvents():
pass
def SDL_SetVideoMode(width, height, depth, flags):
pass
def SDL_SetColorKey(surface, key, value):
pass
def SDL_WM_GetCaption():
return _wm.get('title', ''), _wm.get('icontitle', '')
def SDL_UpdateRect(screen, x1, y1, x2, y2):
screen.canvas.style.width=screen.canvas.style.width
def SDL_UpdateRects(screen, rects):
for _rect in rects:
SDL_UpdateRect(screen, _rect)
def SDL_GetVideoSurface():
return _Screen
def SDL_GetVideoInfo():
return
def SDL_VideoModeOK(width, height, depth, flags):
pass
def SDL_SetPalette(surface, sdl_var, colors, flag):
pass
class Screen:
def __init__(self):
self.flags=0
@property
def canvas(self):
return document.get(selector='canvas')[0]
_Screen=Screen()
class SDL_Rect:
def __init__(self, x, y, w, h):
self.x=x
self.y=y
self.w=w
self.h=h
def SDL_Flip(screen):
pass
| gpl-3.0 |
CubicERP/odoo | addons/fleet/fleet.py | 266 | 46376 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
import datetime
from openerp import tools
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
from dateutil.relativedelta import relativedelta
def str_to_datetime(strdate):
return datetime.datetime.strptime(strdate, tools.DEFAULT_SERVER_DATE_FORMAT)
class fleet_vehicle_cost(osv.Model):
_name = 'fleet.vehicle.cost'
_description = 'Cost related to a vehicle'
_order = 'date desc, vehicle_id asc'
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, False)
for record in self.browse(cr,uid,ids,context=context):
if record.odometer_id:
res[record.id] = record.odometer_id.value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if not value:
raise except_orm(_('Operation not allowed!'), _('Emptying the odometer value of a vehicle is not allowed.'))
date = self.browse(cr, uid, id, context=context).date
if not(date):
date = fields.date.context_today(self, cr, uid, context=context)
vehicle_id = self.browse(cr, uid, id, context=context).vehicle_id
data = {'value': value, 'date': date, 'vehicle_id': vehicle_id.id}
odometer_id = self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
return self.write(cr, uid, id, {'odometer_id': odometer_id}, context=context)
_columns = {
'name': fields.related('vehicle_id', 'name', type="char", string='Name', store=True),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True, help='Vehicle concerned by this log'),
'cost_subtype_id': fields.many2one('fleet.service.type', 'Type', help='Cost type purchased with this cost'),
'amount': fields.float('Total Price'),
'cost_type': fields.selection([('contract', 'Contract'), ('services','Services'), ('fuel','Fuel'), ('other','Other')], 'Category of the cost', help='For internal purpose only', required=True),
'parent_id': fields.many2one('fleet.vehicle.cost', 'Parent', help='Parent cost to this current cost'),
'cost_ids': fields.one2many('fleet.vehicle.cost', 'parent_id', 'Included Services'),
'odometer_id': fields.many2one('fleet.vehicle.odometer', 'Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Odometer Value', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
'date' :fields.date('Date',help='Date when the cost has been executed'),
'contract_id': fields.many2one('fleet.vehicle.log.contract', 'Contract', help='Contract attached to this cost'),
'auto_generated': fields.boolean('Automatically Generated', readonly=True, required=True),
}
_defaults ={
'cost_type': 'other',
}
def create(self, cr, uid, data, context=None):
#make sure that the data are consistent with values of parent and contract records given
if 'parent_id' in data and data['parent_id']:
parent = self.browse(cr, uid, data['parent_id'], context=context)
data['vehicle_id'] = parent.vehicle_id.id
data['date'] = parent.date
data['cost_type'] = parent.cost_type
if 'contract_id' in data and data['contract_id']:
contract = self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, data['contract_id'], context=context)
data['vehicle_id'] = contract.vehicle_id.id
data['cost_subtype_id'] = contract.cost_subtype_id.id
data['cost_type'] = contract.cost_type
if 'odometer' in data and not data['odometer']:
#if received value for odometer is 0, then remove it from the data as it would result to the creation of a
#odometer log with 0, which is to be avoided
del(data['odometer'])
return super(fleet_vehicle_cost, self).create(cr, uid, data, context=context)
class fleet_vehicle_tag(osv.Model):
_name = 'fleet.vehicle.tag'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class fleet_vehicle_state(osv.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Used to order the note stages")
}
_sql_constraints = [('fleet_state_name_unique','unique(name)', 'State name already exists')]
class fleet_vehicle_model(osv.Model):
def _model_name_get_fnc(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.modelname
if record.brand_id.name:
name = record.brand_id.name + ' / ' + name
res[record.id] = name
return res
def on_change_brand(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {'value': {'image_medium': False}}
brand = self.pool.get('fleet.vehicle.model.brand').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': brand.image,
}
}
_name = 'fleet.vehicle.model'
_description = 'Model of a vehicle'
_order = 'name asc'
_columns = {
'name': fields.function(_model_name_get_fnc, type="char", string='Name', store=True),
'modelname': fields.char('Model name', required=True),
'brand_id': fields.many2one('fleet.vehicle.model.brand', 'Model Brand', required=True, help='Brand of the vehicle'),
'vendors': fields.many2many('res.partner', 'fleet_vehicle_model_vendors', 'model_id', 'partner_id', string='Vendors'),
'image': fields.related('brand_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('brand_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('brand_id', 'image_small', type="binary", string="Logo (small)"),
}
class fleet_vehicle_model_brand(osv.Model):
_name = 'fleet.vehicle.model.brand'
_description = 'Brand model of the vehicle'
_order = 'name asc'
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Brand Name', required=True),
'image': fields.binary("Logo",
help="This field holds the image used as logo for the brand, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized logo of the brand. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Smal-sized photo", type="binary", multi="_get_image",
store = {
'fleet.vehicle.model.brand': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the brand. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
}
class fleet_vehicle(osv.Model):
_inherit = 'mail.thread'
def _vehicle_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = record.model_id.brand_id.name + '/' + record.model_id.modelname + ' / ' + record.license_plate
return res
def return_action_to_open(self, cr, uid, ids, context=None):
""" This opens the xml view specified in xml_id for the current vehicle """
if context is None:
context = {}
if context.get('xml_id'):
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet', context['xml_id'], context=context)
res['context'] = context
res['context'].update({'default_vehicle_id': ids[0]})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
return False
def act_show_log_cost(self, cr, uid, ids, context=None):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
if context is None:
context = {}
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid ,'fleet','fleet_vehicle_costs_act', context=context)
res['context'] = context
res['context'].update({
'default_vehicle_id': ids[0],
'search_default_parent_false': True
})
res['domain'] = [('vehicle_id','=', ids[0])]
return res
def _get_odometer(self, cr, uid, ids, odometer_id, arg, context):
res = dict.fromkeys(ids, 0)
for record in self.browse(cr,uid,ids,context=context):
ids = self.pool.get('fleet.vehicle.odometer').search(cr, uid, [('vehicle_id', '=', record.id)], limit=1, order='value desc')
if len(ids) > 0:
res[record.id] = self.pool.get('fleet.vehicle.odometer').browse(cr, uid, ids[0], context=context).value
return res
def _set_odometer(self, cr, uid, id, name, value, args=None, context=None):
if value:
date = fields.date.context_today(self, cr, uid, context=context)
data = {'value': value, 'date': date, 'vehicle_id': id}
return self.pool.get('fleet.vehicle.odometer').create(cr, uid, data, context=context)
def _search_get_overdue_contract_reminder(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today,))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _search_contract_renewal_due_soon(self, cr, uid, obj, name, args, context):
res = []
for field, operator, value in args:
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value == True) or (operator in ('<>', '!=') and value == False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.date.context_today(self, cr, uid, context=context)
datetime_today = datetime.datetime.strptime(today, tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = str((datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
cr.execute('select cost.vehicle_id, count(contract.id) as contract_number FROM fleet_vehicle_cost cost left join fleet_vehicle_log_contract contract on contract.cost_id = cost.id WHERE contract.expiration_date is not null AND contract.expiration_date > %s AND contract.expiration_date < %s AND contract.state IN (\'open\', \'toclose\') GROUP BY cost.vehicle_id', (today, limit_date))
res_ids = [x[0] for x in cr.fetchall()]
res.append(('id', search_operator, res_ids))
return res
def _get_contract_reminder_fnc(self, cr, uid, ids, field_names, unknow_none, context=None):
res= {}
for record in self.browse(cr, uid, ids, context=context):
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'toclose') and element.expiration_date:
current_date_str = fields.date.context_today(self, cr, uid, context=context)
due_time_str = element.expiration_date
current_date = str_to_datetime(current_date_str)
due_time = str_to_datetime(due_time_str)
diff_time = (due_time-current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < 15 and diff_time >= 0:
due_soon = True;
total += 1
if overdue or due_soon:
ids = self.pool.get('fleet.vehicle.log.contract').search(cr,uid,[('vehicle_id', '=', record.id), ('state', 'in', ('open', 'toclose'))], limit=1, order='expiration_date asc')
if len(ids) > 0:
#we display only the name of the oldest overdue/due soon contract
name=(self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, ids[0], context=context).cost_subtype_id.name)
res[record.id] = {
'contract_renewal_overdue': overdue,
'contract_renewal_due_soon': due_soon,
'contract_renewal_total': (total - 1), #we remove 1 from the real total for display purposes
'contract_renewal_name': name,
}
return res
def _get_default_state(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'vehicle_state_active')
except ValueError:
model_id = False
return model_id
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Odometer = self.pool['fleet.vehicle.odometer']
LogFuel = self.pool['fleet.vehicle.log.fuel']
LogService = self.pool['fleet.vehicle.log.services']
LogContract = self.pool['fleet.vehicle.log.contract']
Cost = self.pool['fleet.vehicle.cost']
return {
vehicle_id: {
'odometer_count': Odometer.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'fuel_logs_count': LogFuel.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'service_count': LogService.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'contract_count': LogContract.search_count(cr, uid, [('vehicle_id', '=', vehicle_id)], context=context),
'cost_count': Cost.search_count(cr, uid, [('vehicle_id', '=', vehicle_id), ('parent_id', '=', False)], context=context)
}
for vehicle_id in ids
}
_name = 'fleet.vehicle'
_description = 'Information on a vehicle'
_order= 'license_plate asc'
_columns = {
'name': fields.function(_vehicle_name_get_fnc, type="char", string='Name', store=True),
'company_id': fields.many2one('res.company', 'Company'),
'license_plate': fields.char('License Plate', required=True, help='License plate number of the vehicle (ie: plate number for a car)'),
'vin_sn': fields.char('Chassis Number', help='Unique number written on the vehicle motor (VIN/SN number)', copy=False),
'driver_id': fields.many2one('res.partner', 'Driver', help='Driver of the vehicle'),
'model_id': fields.many2one('fleet.vehicle.model', 'Model', required=True, help='Model of the vehicle'),
'log_fuel': fields.one2many('fleet.vehicle.log.fuel', 'vehicle_id', 'Fuel Logs'),
'log_services': fields.one2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs'),
'log_contracts': fields.one2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts'),
'cost_count': fields.function(_count_all, type='integer', string="Costs" , multi=True),
'contract_count': fields.function(_count_all, type='integer', string='Contracts', multi=True),
'service_count': fields.function(_count_all, type='integer', string='Services', multi=True),
'fuel_logs_count': fields.function(_count_all, type='integer', string='Fuel Logs', multi=True),
'odometer_count': fields.function(_count_all, type='integer', string='Odometer', multi=True),
'acquisition_date': fields.date('Acquisition Date', required=False, help='Date when the vehicle has been bought'),
'color': fields.char('Color', help='Color of the vehicle'),
'state_id': fields.many2one('fleet.vehicle.state', 'State', help='Current state of the vehicle', ondelete="set null"),
'location': fields.char('Location', help='Location of the vehicle (garage, ...)'),
'seats': fields.integer('Seats Number', help='Number of seats of the vehicle'),
'doors': fields.integer('Doors Number', help='Number of doors of the vehicle'),
'tag_ids' :fields.many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id','tag_id', 'Tags', copy=False),
'odometer': fields.function(_get_odometer, fnct_inv=_set_odometer, type='float', string='Last Odometer', help='Odometer measure of the vehicle at the moment of this log'),
'odometer_unit': fields.selection([('kilometers', 'Kilometers'),('miles','Miles')], 'Odometer Unit', help='Unit of the odometer ',required=True),
'transmission': fields.selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle'),
'fuel_type': fields.selection([('gasoline', 'Gasoline'), ('diesel', 'Diesel'), ('electric', 'Electric'), ('hybrid', 'Hybrid')], 'Fuel Type', help='Fuel Used by the vehicle'),
'horsepower': fields.integer('Horsepower'),
'horsepower_tax': fields.float('Horsepower Taxation'),
'power': fields.integer('Power', help='Power in kW of the vehicle'),
'co2': fields.float('CO2 Emissions', help='CO2 emissions of the vehicle'),
'image': fields.related('model_id', 'image', type="binary", string="Logo"),
'image_medium': fields.related('model_id', 'image_medium', type="binary", string="Logo (medium)"),
'image_small': fields.related('model_id', 'image_small', type="binary", string="Logo (small)"),
'contract_renewal_due_soon': fields.function(_get_contract_reminder_fnc, fnct_search=_search_contract_renewal_due_soon, type="boolean", string='Has Contracts to renew', multi='contract_info'),
'contract_renewal_overdue': fields.function(_get_contract_reminder_fnc, fnct_search=_search_get_overdue_contract_reminder, type="boolean", string='Has Contracts Overdued', multi='contract_info'),
'contract_renewal_name': fields.function(_get_contract_reminder_fnc, type="text", string='Name of contract to renew soon', multi='contract_info'),
'contract_renewal_total': fields.function(_get_contract_reminder_fnc, type="integer", string='Total of contracts due or overdue minus one', multi='contract_info'),
'car_value': fields.float('Car Value', help='Value of the bought vehicle'),
}
_defaults = {
'doors': 5,
'odometer_unit': 'kilometers',
'state_id': _get_default_state,
}
def on_change_model(self, cr, uid, ids, model_id, context=None):
if not model_id:
return {}
model = self.pool.get('fleet.vehicle.model').browse(cr, uid, model_id, context=context)
return {
'value': {
'image_medium': model.image,
}
}
def create(self, cr, uid, data, context=None):
context = dict(context or {}, mail_create_nolog=True)
vehicle_id = super(fleet_vehicle, self).create(cr, uid, data, context=context)
vehicle = self.browse(cr, uid, vehicle_id, context=context)
self.message_post(cr, uid, [vehicle_id], body=_('%s %s has been added to the fleet!') % (vehicle.model_id.name,vehicle.license_plate), context=context)
return vehicle_id
def write(self, cr, uid, ids, vals, context=None):
"""
This function write an entry in the openchatter whenever we change important information
on the vehicle like the model, the drive, the state of the vehicle or its license plate
"""
for vehicle in self.browse(cr, uid, ids, context):
changes = []
if 'model_id' in vals and vehicle.model_id.id != vals['model_id']:
value = self.pool.get('fleet.vehicle.model').browse(cr,uid,vals['model_id'],context=context).name
oldmodel = vehicle.model_id.name or _('None')
changes.append(_("Model: from '%s' to '%s'") %(oldmodel, value))
if 'driver_id' in vals and vehicle.driver_id.id != vals['driver_id']:
value = self.pool.get('res.partner').browse(cr,uid,vals['driver_id'],context=context).name
olddriver = (vehicle.driver_id.name) or _('None')
changes.append(_("Driver: from '%s' to '%s'") %(olddriver, value))
if 'state_id' in vals and vehicle.state_id.id != vals['state_id']:
value = self.pool.get('fleet.vehicle.state').browse(cr,uid,vals['state_id'],context=context).name
oldstate = vehicle.state_id.name or _('None')
changes.append(_("State: from '%s' to '%s'") %(oldstate, value))
if 'license_plate' in vals and vehicle.license_plate != vals['license_plate']:
old_license_plate = vehicle.license_plate or _('None')
changes.append(_("License Plate: from '%s' to '%s'") %(old_license_plate, vals['license_plate']))
if len(changes) > 0:
self.message_post(cr, uid, [vehicle.id], body=", ".join(changes), context=context)
vehicle_id = super(fleet_vehicle,self).write(cr, uid, ids, vals, context)
return True
class fleet_vehicle_odometer(osv.Model):
_name='fleet.vehicle.odometer'
_description='Odometer log for a vehicle'
_order='date desc'
def _vehicle_log_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if not name:
name = record.date
elif record.date:
name += ' / '+ record.date
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'unit': odometer_unit,
}
}
_columns = {
'name': fields.function(_vehicle_log_name_get_fnc, type="char", string='Name', store=True),
'date': fields.date('Date'),
'value': fields.float('Odometer Value', group_operator="max"),
'vehicle_id': fields.many2one('fleet.vehicle', 'Vehicle', required=True),
'unit': fields.related('vehicle_id', 'odometer_unit', type="char", string="Unit", readonly=True),
}
_defaults = {
'date': fields.date.context_today,
}
class fleet_vehicle_log_fuel(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def on_change_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
else :
return {}
def on_change_price_per_liter(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value' : {'amount' : round(liter * price_per_liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value' : {'liter' : round(amount / price_per_liter,2),}}
elif amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value' : {'price_per_liter' : round(amount / liter,2),}}
else :
return {}
def on_change_amount(self, cr, uid, ids, liter, price_per_liter, amount, context=None):
#need to cast in float because the value receveid from web client maybe an integer (Javascript and JSON do not
#make any difference between 3.0 and 3). This cause a problem if you encode, for example, 2 liters at 1.5 per
#liter => total is computed as 3.0, then trigger an onchange that recomputes price_per_liter as 3/2=1 (instead
#of 3.0/2=1.5)
#If there is no change in the result, we return an empty dict to prevent an infinite loop due to the 3 intertwine
#onchange. And in order to verify that there is no change in the result, we have to limit the precision of the
#computation to 2 decimal
liter = float(liter)
price_per_liter = float(price_per_liter)
amount = float(amount)
if amount > 0 and liter > 0 and round(amount/liter,2) != price_per_liter:
return {'value': {'price_per_liter': round(amount / liter,2),}}
elif amount > 0 and price_per_liter > 0 and round(amount/price_per_liter,2) != liter:
return {'value': {'liter': round(amount / price_per_liter,2),}}
elif liter > 0 and price_per_liter > 0 and round(liter*price_per_liter,2) != amount:
return {'value': {'amount': round(liter * price_per_liter,2),}}
else :
return {}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_refueling')
except ValueError:
model_id = False
return model_id
_name = 'fleet.vehicle.log.fuel'
_description = 'Fuel log for vehicles'
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_columns = {
'liter': fields.float('Liter'),
'price_per_liter': fields.float('Price Per Liter'),
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference', size=64),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'fuel',
}
class fleet_vehicle_log_services(osv.Model):
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
vehicle = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context)
odometer_unit = vehicle.odometer_unit
driver = vehicle.driver_id.id
return {
'value': {
'odometer_unit': odometer_unit,
'purchaser_id': driver,
}
}
def _get_default_service_type(self, cr, uid, context):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_service_service_8')
except ValueError:
model_id = False
return model_id
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.services'
_description = 'Services for vehicles'
_columns = {
'purchaser_id': fields.many2one('res.partner', 'Purchaser', domain="['|',('customer','=',True),('employee','=',True)]"),
'inv_ref': fields.char('Invoice Reference'),
'vendor_id': fields.many2one('res.partner', 'Supplier', domain="[('supplier','=',True)]"),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
'notes': fields.text('Notes'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
}
_defaults = {
'date': fields.date.context_today,
'cost_subtype_id': _get_default_service_type,
'cost_type': 'services'
}
class fleet_service_type(osv.Model):
_name = 'fleet.service.type'
_description = 'Type of services available on a vehicle'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'category': fields.selection([('contract', 'Contract'), ('service', 'Service'), ('both', 'Both')], 'Category', required=True, help='Choose wheter the service refer to contracts, vehicle services or both'),
}
class fleet_vehicle_log_contract(osv.Model):
def scheduler_manage_auto_costs(self, cr, uid, context=None):
#This method is called by a cron task
#It creates costs for contracts having the "recurring cost" field setted, depending on their frequency
#For example, if a contract has a reccuring cost of 200 with a weekly frequency, this method creates a cost of 200 on the first day of each week, from the date of the last recurring costs in the database to today
#If the contract has not yet any recurring costs in the database, the method generates the recurring costs from the start_date to today
#The created costs are associated to a contract thanks to the many2one field contract_id
#If the contract has no start_date, no cost will be created, even if the contract has recurring costs
vehicle_cost_obj = self.pool.get('fleet.vehicle.cost')
d = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT).date()
contract_ids = self.pool.get('fleet.vehicle.log.contract').search(cr, uid, [('state','!=','closed')], offset=0, limit=None, order=None,context=None, count=False)
deltas = {'yearly': relativedelta(years=+1), 'monthly': relativedelta(months=+1), 'weekly': relativedelta(weeks=+1), 'daily': relativedelta(days=+1)}
for contract in self.pool.get('fleet.vehicle.log.contract').browse(cr, uid, contract_ids, context=context):
if not contract.start_date or contract.cost_frequency == 'no':
continue
found = False
last_cost_date = contract.start_date
if contract.generated_cost_ids:
last_autogenerated_cost_id = vehicle_cost_obj.search(cr, uid, ['&', ('contract_id','=',contract.id), ('auto_generated','=',True)], offset=0, limit=1, order='date desc',context=context, count=False)
if last_autogenerated_cost_id:
found = True
last_cost_date = vehicle_cost_obj.browse(cr, uid, last_autogenerated_cost_id[0], context=context).date
startdate = datetime.datetime.strptime(last_cost_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()
if found:
startdate += deltas.get(contract.cost_frequency)
while (startdate <= d) & (startdate <= datetime.datetime.strptime(contract.expiration_date, tools.DEFAULT_SERVER_DATE_FORMAT).date()):
data = {
'amount': contract.cost_generated,
'date': startdate.strftime(tools.DEFAULT_SERVER_DATE_FORMAT),
'vehicle_id': contract.vehicle_id.id,
'cost_subtype_id': contract.cost_subtype_id.id,
'contract_id': contract.id,
'auto_generated': True
}
cost_id = self.pool.get('fleet.vehicle.cost').create(cr, uid, data, context=context)
startdate += deltas.get(contract.cost_frequency)
return True
def scheduler_manage_contract_expiration(self, cr, uid, context=None):
#This method is called by a cron task
#It manages the state of a contract, possibly by posting a message on the vehicle concerned and updating its status
datetime_today = datetime.datetime.strptime(fields.date.context_today(self, cr, uid, context=context), tools.DEFAULT_SERVER_DATE_FORMAT)
limit_date = (datetime_today + relativedelta(days=+15)).strftime(tools.DEFAULT_SERVER_DATE_FORMAT)
ids = self.search(cr, uid, ['&', ('state', '=', 'open'), ('expiration_date', '<', limit_date)], offset=0, limit=None, order=None, context=context, count=False)
res = {}
for contract in self.browse(cr, uid, ids, context=context):
if contract.vehicle_id.id in res:
res[contract.vehicle_id.id] += 1
else:
res[contract.vehicle_id.id] = 1
for vehicle, value in res.items():
self.pool.get('fleet.vehicle').message_post(cr, uid, vehicle, body=_('%s contract(s) need(s) to be renewed and/or closed!') % (str(value)), context=context)
return self.write(cr, uid, ids, {'state': 'toclose'}, context=context)
def run_scheduler(self, cr, uid, context=None):
self.scheduler_manage_auto_costs(cr, uid, context=context)
self.scheduler_manage_contract_expiration(cr, uid, context=context)
return True
def _vehicle_contract_name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
name = record.vehicle_id.name
if record.cost_subtype_id.name:
name += ' / '+ record.cost_subtype_id.name
if record.date:
name += ' / '+ record.date
res[record.id] = name
return res
def on_change_vehicle(self, cr, uid, ids, vehicle_id, context=None):
if not vehicle_id:
return {}
odometer_unit = self.pool.get('fleet.vehicle').browse(cr, uid, vehicle_id, context=context).odometer_unit
return {
'value': {
'odometer_unit': odometer_unit,
}
}
def compute_next_year_date(self, strdate):
oneyear = datetime.timedelta(days=365)
curdate = str_to_datetime(strdate)
return datetime.datetime.strftime(curdate + oneyear, tools.DEFAULT_SERVER_DATE_FORMAT)
def on_change_start_date(self, cr, uid, ids, strdate, enddate, context=None):
if (strdate):
return {'value': {'expiration_date': self.compute_next_year_date(strdate),}}
return {}
def get_days_left(self, cr, uid, ids, prop, unknow_none, context=None):
"""return a dict with as value for each contract an integer
if contract is in an open state and is overdue, return 0
if contract is in a closed state, return -1
otherwise return the number of days before the contract expires
"""
res = {}
for record in self.browse(cr, uid, ids, context=context):
if (record.expiration_date and (record.state == 'open' or record.state == 'toclose')):
today = str_to_datetime(time.strftime(tools.DEFAULT_SERVER_DATE_FORMAT))
renew_date = str_to_datetime(record.expiration_date)
diff_time = (renew_date-today).days
res[record.id] = diff_time > 0 and diff_time or 0
else:
res[record.id] = -1
return res
def act_renew_contract(self, cr, uid, ids, context=None):
assert len(ids) == 1, "This operation should only be done for 1 single contract at a time, as it it suppose to open a window as result"
for element in self.browse(cr, uid, ids, context=context):
#compute end date
startdate = str_to_datetime(element.start_date)
enddate = str_to_datetime(element.expiration_date)
diffdate = (enddate - startdate)
default = {
'date': fields.date.context_today(self, cr, uid, context=context),
'start_date': datetime.datetime.strftime(str_to_datetime(element.expiration_date) + datetime.timedelta(days=1), tools.DEFAULT_SERVER_DATE_FORMAT),
'expiration_date': datetime.datetime.strftime(enddate + diffdate, tools.DEFAULT_SERVER_DATE_FORMAT),
}
newid = super(fleet_vehicle_log_contract, self).copy(cr, uid, element.id, default, context=context)
mod, modid = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'fleet_vehicle_log_contract_form')
return {
'name':_("Renew Contract"),
'view_mode': 'form',
'view_id': modid,
'view_type': 'tree,form',
'res_model': 'fleet.vehicle.log.contract',
'type': 'ir.actions.act_window',
'nodestroy': True,
'domain': '[]',
'res_id': newid,
'context': {'active_id':newid},
}
def _get_default_contract_type(self, cr, uid, context=None):
try:
model, model_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'fleet', 'type_contract_leasing')
except ValueError:
model_id = False
return model_id
def on_change_indic_cost(self, cr, uid, ids, cost_ids, context=None):
totalsum = 0.0
for element in cost_ids:
if element and len(element) == 3 and isinstance(element[2], dict):
totalsum += element[2].get('amount', 0.0)
return {
'value': {
'sum_cost': totalsum,
}
}
def _get_sum_cost(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for contract in self.browse(cr, uid, ids, context=context):
totalsum = 0
for cost in contract.cost_ids:
totalsum += cost.amount
res[contract.id] = totalsum
return res
_inherits = {'fleet.vehicle.cost': 'cost_id'}
_name = 'fleet.vehicle.log.contract'
_description = 'Contract information on a vehicle'
_order='state desc,expiration_date'
_columns = {
'name': fields.function(_vehicle_contract_name_get_fnc, type="text", string='Name', store=True),
'start_date': fields.date('Contract Start Date', help='Date when the coverage of the contract begins'),
'expiration_date': fields.date('Contract Expiration Date', help='Date when the coverage of the contract expirates (by default, one year after begin date)'),
'days_left': fields.function(get_days_left, type='integer', string='Warning Date'),
'insurer_id' :fields.many2one('res.partner', 'Supplier'),
'purchaser_id': fields.many2one('res.partner', 'Contractor', help='Person to which the contract is signed for'),
'ins_ref': fields.char('Contract Reference', size=64, copy=False),
'state': fields.selection([('open', 'In Progress'), ('toclose','To Close'), ('closed', 'Terminated')],
'Status', readonly=True, help='Choose wheter the contract is still valid or not',
copy=False),
'notes': fields.text('Terms and Conditions', help='Write here all supplementary informations relative to this contract', copy=False),
'cost_generated': fields.float('Recurring Cost Amount', help="Costs paid at regular intervals, depending on the cost frequency. If the cost frequency is set to unique, the cost will be logged at the start date"),
'cost_frequency': fields.selection([('no','No'), ('daily', 'Daily'), ('weekly','Weekly'), ('monthly','Monthly'), ('yearly','Yearly')], 'Recurring Cost Frequency', help='Frequency of the recuring cost', required=True),
'generated_cost_ids': fields.one2many('fleet.vehicle.cost', 'contract_id', 'Generated Costs'),
'sum_cost': fields.function(_get_sum_cost, type='float', string='Indicative Costs Total'),
'cost_id': fields.many2one('fleet.vehicle.cost', 'Cost', required=True, ondelete='cascade'),
'cost_amount': fields.related('cost_id', 'amount', string='Amount', type='float', store=True), #we need to keep this field as a related with store=True because the graph view doesn't support (1) to address fields from inherited table and (2) fields that aren't stored in database
}
_defaults = {
'purchaser_id': lambda self, cr, uid, ctx: self.pool.get('res.users').browse(cr, uid, uid, context=ctx).partner_id.id or False,
'date': fields.date.context_today,
'start_date': fields.date.context_today,
'state':'open',
'expiration_date': lambda self, cr, uid, ctx: self.compute_next_year_date(fields.date.context_today(self, cr, uid, context=ctx)),
'cost_frequency': 'no',
'cost_subtype_id': _get_default_contract_type,
'cost_type': 'contract',
}
def contract_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'closed'}, context=context)
def contract_open(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'open'}, context=context)
class fleet_contract_state(osv.Model):
_name = 'fleet.contract.state'
_description = 'Contains the different possible status of a leasing contract'
_columns = {
'name':fields.char('Contract Status', required=True),
}
| agpl-3.0 |
jehutting/kivy | examples/widgets/carousel_buttons.py | 40 | 1031 | '''
Carousel example with button inside.
This is a tiny test for testing the scroll distance/timeout
And ensure the down/up are dispatched if no gesture is done.
'''
from kivy.uix.carousel import Carousel
from kivy.uix.gridlayout import GridLayout
from kivy.app import App
from kivy.lang import Builder
Builder.load_string('''
<Page>:
cols: 3
Label:
text: str(id(root))
Button
Button
Button
Button
text: 'load(page 3)'
on_release:
carousel = root.parent.parent
carousel.load_slide(carousel.slides[2])
Button
Button
text: 'prev'
on_release:
root.parent.parent.load_previous()
Button
Button
text: 'next'
on_release:
root.parent.parent.load_next()
''')
class Page(GridLayout):
pass
class TestApp(App):
def build(self):
root = Carousel()
for x in range(10):
root.add_widget(Page())
return root
if __name__ == '__main__':
TestApp().run()
| mit |
tivek/conan | conans/test/integration/install_selected_packages_test.py | 3 | 4982 | import unittest
from conans.test.utils.tools import TestClient, TestServer
import os
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.paths import CONANFILE
from conans.model.ref import ConanFileReference, PackageReference
from conans.util.files import load
class InstallSelectedPackagesTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
self.client = TestClient(servers=self.servers, users={"default": [("lasote", "mypass")]})
self.package_ids = self._upload_some_packages(self.client)
self.new_client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]})
def install_all_test(self):
# Should retrieve the three packages
self.new_client.run("download Hello0/0.1@lasote/stable")
p1 = os.path.join(self.new_client.paths.packages(self.ref))
packages = os.listdir(p1)
self.assertEquals(len(packages), 3)
def install_some_reference_test(self):
# Should retrieve the specified packages
self.new_client.run("download Hello0/0.1@lasote/stable -p %s" % self.package_ids[0])
packages = os.listdir(self.new_client.paths.packages(self.ref))
self.assertEquals(len(packages), 1)
self.assertEquals(packages[0], self.package_ids[0])
self.new_client.run("download Hello0/0.1@lasote/stable -p %s -p %s" % (self.package_ids[0],
self.package_ids[1]))
packages = os.listdir(self.new_client.paths.packages(self.ref))
self.assertEquals(len(packages), 2)
def download_recipe_twice_test(self):
expected_conanfile_contents = self.files[CONANFILE]
self.new_client.run("download Hello0/0.1@lasote/stable")
got_conanfile = load(os.path.join(self.new_client.paths.export(self.ref), CONANFILE))
self.assertEquals(expected_conanfile_contents, got_conanfile)
self.new_client.run("download Hello0/0.1@lasote/stable")
got_conanfile = load(os.path.join(self.new_client.paths.export(self.ref), CONANFILE))
self.assertEquals(expected_conanfile_contents, got_conanfile)
self.new_client.run("download Hello0/0.1@lasote/stable")
got_conanfile = load(os.path.join(self.new_client.paths.export(self.ref), CONANFILE))
self.assertEquals(expected_conanfile_contents, got_conanfile)
def download_packages_twice_test(self):
expected_header_contents = self.files["helloHello0.h"]
package_folder = self.new_client.paths.package(PackageReference(self.ref, self.package_ids[0]))
self.new_client.run("download Hello0/0.1@lasote/stable")
got_header = load(os.path.join(package_folder, "include", "helloHello0.h"))
self.assertEquals(expected_header_contents, got_header)
self.new_client.run("download Hello0/0.1@lasote/stable")
got_header = load(os.path.join(package_folder, "include", "helloHello0.h"))
self.assertEquals(expected_header_contents, got_header)
self.new_client.run("download Hello0/0.1@lasote/stable")
got_header = load(os.path.join(package_folder, "include", "helloHello0.h"))
self.assertEquals(expected_header_contents, got_header)
def install_all_but_no_packages_test(self):
# Remove all from remote
self.new_client.run("remove Hello* -f -r default")
# Try to install all
self.new_client.run("download Hello0/0.1@lasote/stable", ignore_error=True)
self.assertIn("'Hello0/0.1@lasote/stable' not found in remote", self.new_client.user_io.out)
# Upload only the recipe
self.new_client.save(self.files)
self.new_client.run("export . lasote/stable")
self.new_client.run("upload Hello0/0.1@lasote/stable --all")
# And try to download all
self.new_client.run("download Hello0/0.1@lasote/stable")
self.assertIn("No remote binary packages found in remote", self.new_client.user_io.out)
def _upload_some_packages(self, client):
self.ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
self.files = cpp_hello_conan_files("Hello0", "0.1")
# No build.
self.files[CONANFILE] = self.files[CONANFILE].replace("def build(self):", "def build(self):\n return\n")
client.save(self.files)
client.run("export . lasote/stable")
client.run("install Hello0/0.1@lasote/stable -s os=Windows --build missing")
client.run("install Hello0/0.1@lasote/stable -s os=Linux --build missing")
client.run("install Hello0/0.1@lasote/stable -s os=Linux -s compiler=gcc -s "
"compiler.version=4.6 -s compiler.libcxx=libstdc++ --build missing")
client.run("upload Hello0/0.1@lasote/stable --all")
return os.listdir(self.client.paths.packages(self.ref))
| mit |
h-hirokawa/ansible | lib/ansible/plugins/connection/lxc.py | 12 | 7870 | # (c) 2015, Joerg Thalheim <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shutil
import traceback
import select
import fcntl
import errno
from ansible import errors
from ansible import constants as C
from ansible.plugins.connection import ConnectionBase
from ansible.utils.unicode import to_bytes
HAS_LIBLXC = False
try:
import lxc as _lxc
HAS_LIBLXC = True
except ImportError:
pass
class Connection(ConnectionBase):
''' Local lxc based connections '''
transport = 'lxc'
has_pipelining = True
become_methods = frozenset(C.BECOME_METHODS)
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.container_name = self._play_context.remote_addr
self.container = None
def _connect(self):
''' connect to the lxc; nothing to do here '''
super(Connection, self)._connect()
if not HAS_LIBLXC:
msg = "lxc bindings for python2 are not installed"
raise errors.AnsibleError(msg)
if self.container:
return
self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
self.container = _lxc.Container(self.container_name)
if self.container.state == "STOPPED":
raise errors.AnsibleError("%s is not running" % self.container_name)
def _communicate(self, pid, in_data, stdin, stdout, stderr):
buf = { stdout: [], stderr: [] }
read_fds = [stdout, stderr]
if in_data:
write_fds = [stdin]
else:
write_fds = []
while len(read_fds) > 0 or len(write_fds) > 0:
try:
ready_reads, ready_writes, _ = select.select(read_fds, write_fds, [])
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
for fd in ready_writes:
in_data = in_data[os.write(fd, in_data):]
if len(in_data) == 0:
write_fds.remove(fd)
for fd in ready_reads:
data = os.read(fd, 32768)
if not data:
read_fds.remove(fd)
buf[fd].append(data)
(pid, returncode) = os.waitpid(pid, 0)
return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
return fd
def exec_command(self, cmd, in_data=None, sudoable=False):
''' run a command on the chroot '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
executable = to_bytes(self._play_context.executable, errors='strict')
local_cmd = [executable, '-c', to_bytes(cmd, errors='strict')]
read_stdout, write_stdout = None, None
read_stderr, write_stderr = None, None
read_stdin, write_stdin = None, None
try:
read_stdout, write_stdout = os.pipe()
read_stderr, write_stderr = os.pipe()
kwargs = {
'stdout': self._set_nonblocking(write_stdout),
'stderr': self._set_nonblocking(write_stderr),
'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
}
if in_data:
read_stdin, write_stdin = os.pipe()
kwargs['stdin'] = self._set_nonblocking(read_stdin)
self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
if pid == -1:
msg = "failed to attach to container %s" % self.container_name
raise errors.AnsibleError(msg)
write_stdout = os.close(write_stdout)
write_stderr = os.close(write_stderr)
if read_stdin:
read_stdin = os.close(read_stdin)
return self._communicate(pid,
in_data,
write_stdin,
read_stdout,
read_stderr)
finally:
fds = [read_stdout,
write_stdout,
read_stderr,
write_stderr,
read_stdin,
write_stdin]
for fd in fds:
if fd:
os.close(fd)
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
super(Connection, self).put_file(in_path, out_path)
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='strict')
out_path = to_bytes(out_path, errors='strict')
if not os.path.exists(in_path):
msg = "file or module does not exist: %s" % in_path
raise errors.AnsibleFileNotFound(msg)
try:
src_file = open(in_path, "rb")
except IOError:
traceback.print_exc()
raise errors.AnsibleError("failed to open input file to %s" % in_path)
try:
def write_file(args):
with open(out_path, 'wb+') as dst_file:
shutil.copyfileobj(src_file, dst_file)
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file to %s" % out_path
raise errors.AnsibleError(msg)
finally:
src_file.close()
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
in_path = to_bytes(in_path, errors='strict')
out_path = to_bytes(out_path, errors='strict')
try:
dst_file = open(out_path, "wb")
except IOError:
traceback.print_exc()
msg = "failed to open output file %s" % out_path
raise errors.AnsibleError(msg)
try:
def write_file(args):
try:
with open(in_path, 'rb') as src_file:
shutil.copyfileobj(src_file, dst_file)
finally:
# this is needed in the lxc child process
# to flush internal python buffers
dst_file.close()
try:
self.container.attach_wait(write_file, None)
except IOError:
traceback.print_exc()
msg = "failed to transfer file from %s to %s" % (in_path, out_path)
raise errors.AnsibleError(msg)
finally:
dst_file.close()
def close(self):
''' terminate the connection; nothing to do here '''
super(Connection, self).close()
self._connected = False
| gpl-3.0 |
passiweinberger/nupic | examples/opf/experiments/multistep/hotgym_best_tp_5step/description.py | 32 | 3186 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'clVerbosity': 0},
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': ( 21,
3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
1),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tpParams': { 'activationThreshold': 13,
'minThreshold': 9,
'verbosity': 0}}}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
shashank971/edx-platform | common/lib/capa/capa/tests/test_hint_functionality.py | 41 | 34139 | # -*- coding: utf-8 -*-
"""
Tests of extended hints
"""
import unittest
from ddt import ddt, data, unpack
# With the use of ddt, some of the data expected_string cases below are naturally long stretches
# of text text without whitespace. I think it's best to leave such lines intact
# in the test code. Therefore:
# pylint: disable=line-too-long
# For out many ddt data cases, prefer a compact form of { .. }
# pylint: disable=bad-continuation
from . import new_loncapa_problem, load_fixture
class HintTest(unittest.TestCase):
"""Base class for tests of extended hinting functionality."""
def correctness(self, problem_id, choice):
"""Grades the problem and returns the 'correctness' string from cmap."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
return cmap[problem_id]['correctness']
def get_hint(self, problem_id, choice):
"""Grades the problem and returns its hint from cmap or the empty string."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
adict = cmap.cmap.get(problem_id)
if adict:
return adict['msg']
else:
return ''
# It is a little surprising how much more complicated TextInput is than all the other cases.
@ddt
class TextInputHintsTest(HintTest):
"""
Test Text Input Hints Test
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'Blue')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_2',
'trigger_type': 'single',
'hint_label': u'Correct',
'correctness': True,
'student_answer': [u'Blue'],
'question_type': 'stringresponse',
'hints': [{'text': 'The red light is scattered by water molecules leaving only blue light.'}]}
)
@data(
{'problem_id': u'1_2_1', u'choice': u'GermanyΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">I do not think so.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'franceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'FranceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'Mexico',
'expected_string': ''},
{'problem_id': u'1_2_1', u'choice': u'USAΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'usaΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'uSAxΩ',
'expected_string': u''},
{'problem_id': u'1_2_1', u'choice': u'NICKLANDΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">The country name does not end in LANDΩ</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'Blue',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">The red light is scattered by water molecules leaving only blue light.</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'blue',
'expected_string': u''},
{'problem_id': u'1_3_1', u'choice': u'b',
'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseInsensitive(HintTest):
"""Test Text Input Extended hints Case Insensitive"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_5_1', 'choice': 'abc', 'expected_string': ''}, # wrong answer yielding no hint
{'problem_id': u'1_5_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'a', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'b', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'c', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
# regexp cases
{'problem_id': u'1_5_1', 'choice': 'FGGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'fgG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseSensitive(HintTest):
"""Sometimes the semantics can be encoded in the class name."""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_6_1', 'choice': 'abc', 'expected_string': ''},
{'problem_id': u'1_6_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'a', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'b', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'c', 'expected_string': u''},
# regexp cases
{'problem_id': u'1_6_1', 'choice': 'FGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'fgG', 'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
@ddt
class TextInputExtendedHintsCompatible(HintTest):
"""
Compatibility test with mixed old and new style additional_answer tags.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_7_1', 'choice': 'A', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'B', 'correct': 'correct', 'expected_string': ''},
{'problem_id': u'1_7_1', 'choice': 'C', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'D', 'correct': 'incorrect', 'expected_string': ''},
# check going through conversion with difficult chars
{'problem_id': u'1_7_1', 'choice': """<&"'>""", 'correct': 'correct', 'expected_string': ''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class TextInputExtendedHintsRegex(HintTest):
"""
Extended hints where the answer is regex mode.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_8_1', 'choice': 'ABwrong', 'correct': 'incorrect', 'expected_string': ''},
{'problem_id': u'1_8_1', 'choice': 'ABC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'ABBBBC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'aBc', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'BBBB', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'bbb', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'C', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'c', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'D', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'd', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class NumericInputHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the numeric input problem represented by the XML below.
"""
xml = load_fixture('extended_hints_numeric_input.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
def test_tracking_log(self):
self.get_hint(u'1_2_1', u'1.141')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'hint_label': u'Nice',
'correctness': True,
'student_answer': [u'1.141'],
'question_type': 'numericalresponse',
'hints': [{'text': 'The square root of two turns up in the strangest places.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': '1.141',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
{'problem_id': u'1_3_1', 'choice': '4',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Pretty easy, uh?.</div></div>'},
# should get hint, when correct via numeric-tolerance
{'problem_id': u'1_2_1', 'choice': '1.15',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
# when they answer wrong, nothing
{'problem_id': u'1_2_1', 'choice': '2', 'expected_string': ''},
)
@unpack
def test_numeric_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class CheckboxHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the checkbox problem represented by the XML below.
"""
xml = load_fixture('extended_hints_checkbox.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
@data(
{'problem_id': u'1_2_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">Mushroom is a fungus, not a fruit.</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_4'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">I do not know what a Camero is but it is not a fruit.</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Almost right: </div><div class="hint-text">You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprouts are vegetables.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Very funny: </div><div class="hint-text">Making a banana split?</div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
# check for interaction between compoundhint and correct/incorrect
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AB</div></div>'},
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AC</div></div>'},
# check for labeling where multiple child hints have labels
# These are some tricky cases
{'problem_id': '1_5_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">AA: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_1'],
'expected_string': ''},
{'problem_id': '1_5_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">BB: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-text">compoundo</div></div>'},
# The user selects *nothing*, but can still get "unselected" feedback
{'problem_id': '1_7_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
# 100% not match of sel/unsel feedback
{'problem_id': '1_7_1', 'choice': ['choice_1'],
'expected_string': ''},
# Here we have the correct combination, and that makes feedback too
{'problem_id': '1_7_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
)
@unpack
def test_checkbox_hints(self, problem_id, choice, expected_string):
self.maxDiff = None # pylint: disable=invalid-name
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class CheckboxHintsTestTracking(HintTest):
"""
Test the rather complicated tracking log output for checkbox cases.
"""
xml = """
<problem>
<p>question</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">A true</choicehint>
<choicehint selected="false">A false</choicehint>
</choice>
<choice correct="false">Banana
</choice>
<choice correct="true">Cronut
<choicehint selected="true">C true</choicehint>
</choice>
<compoundhint value="A C">A C Compound</compoundhint>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test checkbox tracking log - by far the most complicated case"""
# A -> 1 hint
self.get_hint(u'1_2_1', [u'choice_0'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_0'],
'hints': [{'text': 'A true', 'trigger': [{'choice': 'choice_0', 'selected': True}]}],
'question_type': 'choiceresponse'}
)
# B C -> 2 hints
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_1', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_1', u'choice_2'],
'hints': [
{'text': 'A false', 'trigger': [{'choice': 'choice_0', 'selected': False}]},
{'text': 'C true', 'trigger': [{'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
# A C -> 1 Compound hint
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_0', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Correct',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': True,
'trigger_type': 'compound',
'student_answer': [u'choice_0', u'choice_2'],
'hints': [
{'text': 'A C Compound',
'trigger': [{'choice': 'choice_0', 'selected': True}, {'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
@ddt
class MultpleChoiceHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'choice_2')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'choice_2'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'OOPS', 'hints': [{'text': 'Apple is a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-text">Mushroom is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': ''},
{'problem_id': u'1_3_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Potato is a root vegetable.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">OUTSTANDING: </div><div class="hint-text">Apple is indeed a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">OOPS: </div><div class="hint-text">Apple is a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_9',
'expected_string': ''},
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class MultpleChoiceHintsWithHtmlTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice_with_html.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_2_1', u'choice_0')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'student_answer': [u'choice_0'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'Incorrect', 'hints': [{'text': 'Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Potato is <img src="#" ale="#"/> not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text"><a href="#">Apple</a> is a fruit.</div></div>'}
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class DropdownHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the drop down problem represented by the XML below.
"""
xml = load_fixture('extended_hints_dropdown.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'FACES')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'FACES'], 'correctness': True, 'question_type': 'optionresponse',
'hint_label': 'Correct', 'hints': [{'text': 'With lots of makeup, doncha know?'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': 'Multiple Choice',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Good Job: </div><div class="hint-text">Yes, multiple choice is the right answer.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Text Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, text input problems do not present options.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Numerical Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, numerical input problems do not present options.</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'FACES',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">With lots of makeup, doncha know?</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'dogs',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">NOPE: </div><div class="hint-text">Not dogs, not cats, not toads</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'wrongo',
'expected_string': ''},
# Regression case where feedback includes answer substring
{'problem_id': u'1_4_1', 'choice': 'AAA',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AAABBB1</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'BBB',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AAABBB2</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'not going to match',
'expected_string': ''},
)
@unpack
def test_dropdown_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class ErrorConditionsTest(HintTest):
"""
Erroneous xml should raise exception.
"""
def test_error_conditions_illegal_element(self):
xml_with_errors = load_fixture('extended_hints_with_errors.xml')
with self.assertRaises(Exception):
new_loncapa_problem(xml_with_errors) # this problem is improperly constructed
| agpl-3.0 |
felixma/nova | nova/tests/unit/api/openstack/compute/test_plugin_framework.py | 17 | 1307 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class PluginTest(test.NoDBTestCase):
@mock.patch("nova.api.openstack.APIRouterV21.api_extension_namespace")
def test_plugin_framework_index(self, mock_namespace):
mock_namespace.return_value = 'nova.api.v21.test_extensions'
app = fakes.wsgi_app_v21(init_only='test-basic')
req = fakes.HTTPRequest.blank('/v2/fake/test')
res = req.get_response(app)
self.assertEqual(200, res.status_int)
resp_json = jsonutils.loads(res.body)
self.assertEqual('val', resp_json['param'])
| apache-2.0 |
eatbyte/Swift | test/probe/test_object_metadata_replication.py | 3 | 9649 | #!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import StringIO
from tempfile import mkdtemp
from textwrap import dedent
import functools
import unittest
import os
import shutil
import uuid
from swift.common import internal_client, utils
from test.probe.brain import BrainSplitter
from test.probe.common import ReplProbeTest
def _sync_methods(object_server_config_paths):
"""
Get the set of all configured sync_methods for the object-replicator
sections in the list of config paths.
"""
sync_methods = set()
for config_path in object_server_config_paths:
options = utils.readconf(config_path, 'object-replicator')
sync_methods.add(options.get('sync_method', 'rsync'))
return sync_methods
def expected_failure_with_ssync(m):
"""
Wrapper for probetests that don't pass if you use ssync
"""
@functools.wraps(m)
def wrapper(self, *args, **kwargs):
obj_conf = self.configs['object-server']
config_paths = [v for k, v in obj_conf.items()
if k in self.brain.handoff_numbers]
using_ssync = 'ssync' in _sync_methods(config_paths)
failed = False
try:
return m(self, *args, **kwargs)
except AssertionError:
failed = True
if not using_ssync:
raise
finally:
if using_ssync and not failed:
self.fail('This test is expected to fail with ssync')
return wrapper
class Test(ReplProbeTest):
def setUp(self):
"""
Reset all environment and start all servers.
"""
super(Test, self).setUp()
self.container_name = 'container-%s' % uuid.uuid4()
self.object_name = 'object-%s' % uuid.uuid4()
self.brain = BrainSplitter(self.url, self.token, self.container_name,
self.object_name, 'object')
self.tempdir = mkdtemp()
conf_path = os.path.join(self.tempdir, 'internal_client.conf')
conf_body = """
[DEFAULT]
swift_dir = /etc/swift
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
object_post_as_copy = false
[filter:cache]
use = egg:swift#memcache
[filter:catch_errors]
use = egg:swift#catch_errors
"""
with open(conf_path, 'w') as f:
f.write(dedent(conf_body))
self.int_client = internal_client.InternalClient(conf_path, 'test', 1)
def tearDown(self):
super(Test, self).tearDown()
shutil.rmtree(self.tempdir)
def _put_object(self, headers=None):
headers = headers or {}
self.int_client.upload_object(StringIO(u'stuff'), self.account,
self.container_name,
self.object_name, headers)
def _post_object(self, headers):
self.int_client.set_object_metadata(self.account, self.container_name,
self.object_name, headers)
def _delete_object(self):
self.int_client.delete_object(self.account, self.container_name,
self.object_name)
def _get_object(self, headers=None, expect_statuses=(2,)):
return self.int_client.get_object(self.account,
self.container_name,
self.object_name,
headers,
acceptable_statuses=expect_statuses)
def _get_object_metadata(self):
return self.int_client.get_object_metadata(self.account,
self.container_name,
self.object_name)
def test_object_delete_is_replicated(self):
self.brain.put_container(policy_index=0)
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object()
self.brain.start_primary_half()
# delete object on second server subset
self.brain.stop_handoff_half()
self._delete_object()
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check object deletion has been replicated on first server set
self.brain.stop_primary_half()
self._get_object(expect_statuses=(4,))
self.brain.start_primary_half()
# check object deletion persists on second server set
self.brain.stop_handoff_half()
self._get_object(expect_statuses=(4,))
# put newer object to second server set
self._put_object()
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check new object has been replicated on first server set
self.brain.stop_primary_half()
self._get_object()
self.brain.start_primary_half()
# check new object persists on second server set
self.brain.stop_handoff_half()
self._get_object()
@expected_failure_with_ssync
def test_sysmeta_after_replication_with_subsequent_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object
self._put_object()
# put newer object with sysmeta to first server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# post some user meta to second server subset
self.brain.stop_handoff_half()
self._post_object(usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
for key in sysmeta:
self.assertFalse(key in metadata)
self.brain.start_handoff_half()
# run replicator
self.get_to_final_state()
# check user metadata has been replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
expected = dict(sysmeta)
expected.update(usermeta)
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
self.brain.start_primary_half()
# check user metadata and sysmeta both on second server subset
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in expected.keys():
self.assertTrue(key in metadata, key)
self.assertEqual(metadata[key], expected[key])
def test_sysmeta_after_replication_with_prior_post(self):
sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
usermeta = {'x-object-meta-bar': 'meta-bar'}
self.brain.put_container(policy_index=0)
# put object
self._put_object()
# put user meta to first server subset
self.brain.stop_handoff_half()
self._post_object(headers=usermeta)
metadata = self._get_object_metadata()
for key in usermeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], usermeta[key])
self.brain.start_handoff_half()
# put newer object with sysmeta to second server subset
self.brain.stop_primary_half()
self._put_object(headers=sysmeta)
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
self.brain.start_primary_half()
# run replicator
self.get_to_final_state()
# check stale user metadata is not replicated to first server subset
# and sysmeta is unchanged
self.brain.stop_primary_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
self.brain.start_primary_half()
# check stale user metadata is removed from second server subset
# and sysmeta is replicated
self.brain.stop_handoff_half()
metadata = self._get_object_metadata()
for key in sysmeta:
self.assertTrue(key in metadata)
self.assertEqual(metadata[key], sysmeta[key])
for key in usermeta:
self.assertFalse(key in metadata)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ArcherSys/ArcherSys | Lib/lib2to3/fixes/fix_itertools.py | 1 | 4784 | <<<<<<< HEAD
<<<<<<< HEAD
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in ('ifilterfalse', 'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node which contains ('.', 'function') with the
# function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
=======
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in ('ifilterfalse', 'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node which contains ('.', 'function') with the
# function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
imports from itertools are fixed in fix_itertools_import.py
If itertools is imported as something else (ie: import itertools as it;
it.izip(spam, eggs)) method calls will not get fixed.
"""
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixItertools(fixer_base.BaseFix):
BM_compatible = True
it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
PATTERN = """
power< it='itertools'
trailer<
dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
|
power< func=%(it_funcs)s trailer< '(' [any] ')' > >
""" %(locals())
# Needs to be run after fix_(map|zip|filter)
run_order = 6
def transform(self, node, results):
prefix = None
func = results['func'][0]
if ('it' in results and
func.value not in ('ifilterfalse', 'izip_longest')):
dot, it = (results['dot'], results['it'])
# Remove the 'itertools'
prefix = it.prefix
it.remove()
# Replace the node which contains ('.', 'function') with the
# function (to be consistent with the second part of the pattern)
dot.remove()
func.parent.replace(func)
prefix = prefix or func.prefix
func.replace(Name(func.value[1:], prefix=prefix))
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit |
kmoocdev2/edx-platform | openedx/features/course_experience/__init__.py | 1 | 3584 | """
Unified course experience settings and helper methods.
"""
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.util.user_messages import UserMessageCollection
from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag, WaffleFlagNamespace
# Namespace for course experience waffle flags.
WAFFLE_FLAG_NAMESPACE = WaffleFlagNamespace(name='course_experience')
# Waffle flag to enable the separate course outline page and full width content.
COURSE_OUTLINE_PAGE_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'course_outline_page', flag_undefined_default=True)
# Waffle flag to enable a single unified "Course" tab.
UNIFIED_COURSE_TAB_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'unified_course_tab', flag_undefined_default=True)
# Waffle flag to enable the sock on the footer of the home and courseware pages.
DISPLAY_COURSE_SOCK_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'display_course_sock')
# Waffle flag to let learners access a course before its start date.
COURSE_PRE_START_ACCESS_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'pre_start_access')
# Waffle flag to enable a review page link from the unified home page.
SHOW_REVIEWS_TOOL_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'show_reviews_tool')
# Waffle flag to enable the setting of course goals.
ENABLE_COURSE_GOALS = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'enable_course_goals')
# Waffle flag to control the display of the hero
SHOW_UPGRADE_MSG_ON_COURSE_HOME = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'show_upgrade_msg_on_course_home')
# Waffle flag to control the display of the upgrade deadline message
UPGRADE_DEADLINE_MESSAGE = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'upgrade_deadline_message')
# Waffle flag to switch between the 'welcome message' and 'latest update' on the course home page.
# Important Admin Note: This is meant to be configured using waffle_utils course
# override only. Either do not create the actual waffle flag, or be sure to unset the
# flag even for Superusers.
LATEST_UPDATE_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'latest_update')
# Waffle flag to enable the use of Bootstrap for course experience pages
USE_BOOTSTRAP_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'use_bootstrap', flag_undefined_default=True)
def course_home_page_title(course): # pylint: disable=unused-argument
"""
Returns the title for the course home page.
"""
return _('Course')
def default_course_url_name(course_id, type=None):
"""
Returns the default course URL name for the current user.
Arguments:
course_id (CourseKey): The course id of the current course.
"""
if type == 'video':
return 'video'
if COURSE_OUTLINE_PAGE_FLAG.is_enabled(course_id):
return 'openedx.course_experience.course_home'
else:
return 'courseware'
def course_home_url_name(course_key):
"""
Returns the course home page's URL name for the current user.
Arguments:
course_key (CourseKey): The course key for which the home url is being
requested.
"""
if UNIFIED_COURSE_TAB_FLAG.is_enabled(course_key):
return 'openedx.course_experience.course_home'
else:
return 'info'
class CourseHomeMessages(UserMessageCollection):
"""
This set of messages appear above the outline on the course home page.
"""
NAMESPACE = 'course_home_level_messages'
@classmethod
def get_namespace(cls):
"""
Returns the namespace of the message collection.
"""
return cls.NAMESPACE
| agpl-3.0 |
megawidget/ebb | vendor/gtest-1.7.0/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
ltilve/chromium | third_party/tlslite/tlslite/utils/openssl_tripledes.py | 202 | 1788 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""OpenSSL/M2Crypto 3DES implementation."""
from .cryptomath import *
from .tripledes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(ciphertext)
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return bytearray(plaintext) | bsd-3-clause |
i5o/openshot-sugar | openshot/openshot/blender/scripts/earth.py | 3 | 13247 | # OpenShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas
#
# This file is part of OpenShot Video Editor (http://launchpad.net/openshot/).
#
# OpenShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenShot Video Editor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
# Import Blender's python API. This only works when the script is being
# run from the context of Blender. Blender contains it's own version of Python
# with this library pre-installed.
import bpy
# Load a font
def load_font(font_path):
""" Load a new TTF font into Blender, and return the font object """
# get the original list of fonts (before we add a new one)
original_fonts = bpy.data.fonts.keys()
# load new font
bpy.ops.font.open(filepath=font_path)
# get the new list of fonts (after we added a new one)
for font_name in bpy.data.fonts.keys():
if font_name not in original_fonts:
return bpy.data.fonts[font_name]
# no new font was added
return None
# Debug Info:
# ./blender -b test.blend -P demo.py
# -b = background mode
# -P = run a Python script within the context of the project file
# Init all of the variables needed by this script. Because Blender executes
# this script, OpenShot will inject a dictionary of the required parameters
# before this script is executed.
params = {
'title' : 'Oh Yeah! OpenShot!',
'extrude' : 0.1,
'bevel_depth' : 0.02,
'spacemode' : 'CENTER',
'text_size' : 1.5,
'width' : 1.0,
'fontname' : 'Bfont',
'color' : [0.8,0.8,0.8],
'alpha' : 1.0,
'output_path' : '/tmp/',
'fps' : 24,
'quality' : 90,
'file_format' : 'PNG',
'color_mode' : 'RGBA',
'horizon_color' : [0.57, 0.57, 0.57],
'resolution_x' : 1920,
'resolution_y' : 1080,
'resolution_percentage' : 100,
'start_frame' : 20,
'end_frame' : 25,
'animation' : True,
}
#INJECT_PARAMS_HERE
# The remainder of this script will modify the current Blender .blend project
# file, and adjust the settings. The .blend file is specified in the XML file
# that defines this template in OpenShot.
#----------------------------------------------------------------------------
import math
#depart = {"title":"Paris",
# "lat_deg": 48, "lat_min": 51, "lat_sec": 24, "lat_dir": "N",
# "lon_deg": 2, "lon_min": 21, "lon_sec": 7, "lon_dir": "E",
# }
#
#arrive = {"title":"New York",
# "lat_deg": 40, "lat_min": 42, "lat_sec": 51, "lat_dir": "N",
# "lon_deg": 74, "lon_min": 0, "lon_sec": 23, "lon_dir": "O",
# }
depart = {
"lat_deg": params["depart_lat_deg"], "lat_min": params["depart_lat_min"], "lat_sec": params["depart_lat_sec"], "lat_dir": params["depart_lat_dir"],
"lon_deg": params["depart_lon_deg"], "lon_min": params["depart_lon_min"], "lon_sec": params["depart_lon_sec"], "lon_dir": params["depart_lon_dir"],
}
arrive = {
"lat_deg": params["arrive_lat_deg"], "lat_min": params["arrive_lat_min"], "lat_sec": params["arrive_lat_sec"], "lat_dir": params["arrive_lat_dir"],
"lon_deg": params["arrive_lon_deg"], "lon_min": params["arrive_lon_min"], "lon_sec": params["arrive_lon_sec"], "lon_dir": params["arrive_lon_dir"],
}
point_a = {}
point_b = {}
point_c = {}
point_d = {}
def get_latitude(direction, degrees, minutes, seconds):
latitude = 0.0
if direction == "N":
# North of the equator
latitude = -(degrees + minutes / 60.0 + seconds / 3600.0)
else:
# South of the equator
latitude = degrees + minutes / 60.0 + seconds / 3600.0
return latitude
def get_longitude(direction, degrees, minutes, seconds):
longitude = 0.0
if direction == "E":
# North of the equator
longitude = degrees + minutes / 60.0 + seconds / 3600.0
else:
# South of the equator
longitude = - (degrees + minutes / 60.0 + seconds / 3600.0)
return longitude
def check_longitude(depart_longitude, arrive_longitude):
if -180 < (arrive_longitude - depart_longitude) and (arrive_longitude - depart_longitude) < 180:
return depart_longitude
else:
if depart_longitude < 0:
return depart_longitude + 360
else:
return depart_longitude - 360
# Calculate latitude / longitude for depart and arrive points
sphere_radius = 10.0
point_a["lat"] = get_latitude(depart["lat_dir"], depart["lat_deg"], depart["lat_min"], depart["lat_sec"])
point_a["lon"] = get_longitude(depart["lon_dir"], depart["lon_deg"], depart["lon_min"], depart["lon_sec"])
point_b["lat"] = get_latitude(arrive["lat_dir"], arrive["lat_deg"], arrive["lat_min"], arrive["lat_sec"])
point_b["lon"] = get_longitude(arrive["lon_dir"], arrive["lon_deg"], arrive["lon_min"], arrive["lon_sec"])
point_a["lon_Z"] = check_longitude(point_a["lon"], point_b["lon"])
point_b["lon_Z"] = point_b["lon"]
point_a["x"] = sphere_radius * math.cos(math.radians(point_a["lat"])) * math.sin(math.radians(point_a["lon"]))
point_b["x"] = sphere_radius * math.cos(math.radians(point_b["lat"])) * math.sin(math.radians(point_b["lon"]))
point_a["y"] = sphere_radius * math.cos(math.radians(point_a["lat"])) * math.cos(math.radians(point_a["lon"]))
point_b["y"] = sphere_radius * math.cos(math.radians(point_b["lat"])) * math.cos(math.radians(point_b["lon"]))
point_a["z"] = sphere_radius * math.sin(math.radians(point_a["lat"]))
point_b["z"] = sphere_radius * math.sin(math.radians(point_b["lat"]))
# Get angle between A & B points
ab_angle_radians = math.acos((point_a["x"] * point_b["x"] + point_a["y"] * point_b["y"] + point_a["z"] * point_b["z"]) / (sphere_radius * sphere_radius))
ab_angle_degrees = ab_angle_radians * 180 / math.pi
# calculate points C & D
point_c["lat"] = point_a["lat"] + 0.25 * (point_b["lat"] - point_a["lat"])
point_c["lon"] = point_a["lon_Z"] + 0.25 * (point_b["lon_Z"] - point_a["lon_Z"])
point_d["lat"] = point_a["lat"] + 0.75 * (point_b["lat"] - point_a["lat"])
point_d["lon"] = point_a["lon_Z"] + 0.75 * (point_b["lon_Z"] - point_a["lon_Z"])
# radius of CD line segment
location_CD = (sphere_radius + 1.0) / math.cos(ab_angle_radians / 4.0)
print("EmptyPointA Transform Rotation: Y= %f Z= %f" % (point_a["lat"], point_a["lon_Z"]))
print("EmptyPointB Transform Rotation: Y= %f Z= %f" % (point_b["lat"], point_b["lon_Z"]))
print("EmptyPointC Transform Rotation: Y= %f Z= %f" % (point_c["lat"], point_c["lon"]))
print("EmptyPointD Transform Rotation: Y= %f Z= %f" % (point_d["lat"], point_d["lon"]))
print("EmptyPointC.001 Transform Location: X= %f" % location_CD)
print("EmptyPointD.001 Transform Location: X= %f" % location_CD)
print("EmptyCam Frame 20 ->Transform Rotation: Y= %f Z= %f And press I key" % (point_a["lat"], point_a["lon_Z"]))
print("EmptyCam Frame 80 ->Transform Rotation: Y= %f Z= %f And press I key" % (point_b["lat"], point_b["lon_Z"]))
# Set Blender properties
bpy.data.objects["EmptyPointA"].rotation_euler = (0.0, math.radians(point_a["lat"]), math.radians(point_a["lon_Z"]))
bpy.data.objects["EmptyPointB"].rotation_euler = (0.0, math.radians(point_b["lat"]), math.radians(point_b["lon_Z"]))
bpy.data.objects["EmptyPointC"].rotation_euler = (0.0, math.radians(point_c["lat"]), math.radians(point_c["lon"]))
bpy.data.objects["EmptyPointD"].rotation_euler = (0.0, math.radians(point_d["lat"]), math.radians(point_d["lon"]))
bpy.data.objects["EmptyPointC.001"].location.x = location_CD
bpy.data.objects["EmptyPointD.001"].location.x = location_CD
# set Y rotation on the camera
bpy.data.actions["EmptyCamAction"].fcurves[1].keyframe_points[0].co = (20.0, math.radians(point_a["lat"]))
bpy.data.actions["EmptyCamAction"].fcurves[1].keyframe_points[0].handle_left.y = math.radians(point_a["lat"])
bpy.data.actions["EmptyCamAction"].fcurves[1].keyframe_points[0].handle_right.y = math.radians(point_a["lat"])
bpy.data.actions["EmptyCamAction"].fcurves[1].keyframe_points[1].co = (80.0, math.radians(point_b["lat"]))
bpy.data.actions["EmptyCamAction"].fcurves[1].keyframe_points[1].handle_left.y = math.radians(point_b["lat"])
bpy.data.actions["EmptyCamAction"].fcurves[1].keyframe_points[1].handle_right.y = math.radians(point_b["lat"])
# set Z rotation on the camera
bpy.data.actions["EmptyCamAction"].fcurves[2].keyframe_points[0].co = (20.0, math.radians(point_a["lon_Z"]))
bpy.data.actions["EmptyCamAction"].fcurves[2].keyframe_points[0].handle_left.y = math.radians(point_a["lon_Z"])
bpy.data.actions["EmptyCamAction"].fcurves[2].keyframe_points[0].handle_right.y = math.radians(point_a["lon_Z"])
bpy.data.actions["EmptyCamAction"].fcurves[2].keyframe_points[1].co = (80.0, math.radians(point_b["lon_Z"]))
bpy.data.actions["EmptyCamAction"].fcurves[2].keyframe_points[1].handle_left.y = math.radians(point_b["lon_Z"])
bpy.data.actions["EmptyCamAction"].fcurves[2].keyframe_points[1].handle_right.y = math.radians(point_b["lon_Z"])
# set world texture (i.e. the globe texture)
if params["map_texture"]:
bpy.data.textures["Texture.002"].image.filepath = params["map_texture"]
# Get font object
font = None
if params["fontname"] != "Bfont":
# Add font so it's available to Blender
font = load_font(params["fontname"])
else:
# Get default font
font = bpy.data.fonts["Bfont"]
# Modify Text for Departure
text_object = bpy.data.curves["Text"]
text_object.body = params["depart_title"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
text_object.font = font
material_object = bpy.data.materials["Material.001"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
# Modify Text for Arrival
text_object = bpy.data.curves["Text.001"]
text_object.body = params["arrive_title"]
text_object.extrude = params["extrude"]
text_object.bevel_depth = params["bevel_depth"]
text_object.size = params["text_size"]
text_object.space_character = params["width"]
text_object.font = font
material_object = bpy.data.materials["Material.003"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
# Modify the Line Material and Pins
material_object = bpy.data.materials["Material.002"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
material_object = bpy.data.materials["Material.004"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
material_object = bpy.data.materials["Material.005"]
material_object.diffuse_color = params["diffuse_color"]
material_object.specular_color = params["specular_color"]
material_object.specular_intensity = params["specular_intensity"]
material_object.alpha = params["alpha"]
# Set the render options. It is important that these are set
# to the same values as the current OpenShot project. These
# params are automatically set by OpenShot
bpy.context.scene.render.filepath = params["output_path"]
bpy.context.scene.render.fps = params["fps"]
#bpy.context.scene.render.quality = params["quality"]
try:
bpy.context.scene.render.file_format = params["file_format"]
bpy.context.scene.render.color_mode = params["color_mode"]
except:
bpy.context.scene.render.image_settings.file_format = params["file_format"]
bpy.context.scene.render.image_settings.color_mode = params["color_mode"]
bpy.data.worlds[0].horizon_color = params["horizon_color"]
bpy.context.scene.render.resolution_x = params["resolution_x"]
bpy.context.scene.render.resolution_y = params["resolution_y"]
bpy.context.scene.render.resolution_percentage = params["resolution_percentage"]
bpy.context.scene.frame_start = params["start_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Animation Speed (use Blender's time remapping to slow or speed up animation)
animation_speed = int(params["animation_speed"]) # time remapping multiplier
new_length = int(params["end_frame"]) * animation_speed # new length (in frames)
bpy.context.scene.frame_end = new_length
bpy.context.scene.render.frame_map_old = 1
bpy.context.scene.render.frame_map_new = animation_speed
if params["start_frame"] == params["end_frame"]:
bpy.context.scene.frame_start = params["end_frame"]
bpy.context.scene.frame_end = params["end_frame"]
# Render the current animation to the params["output_path"] folder
bpy.ops.render.render(animation=params["animation"])
| gpl-3.0 |
batra-mlp-lab/DIGITS | tools/download_data/downloader.py | 19 | 2242 | # Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
import os
import shutil
import urllib
class DataDownloader(object):
"""Base class for downloading data and setting it up for DIGITS"""
def __init__(self, outdir, clean=False, file_extension='png'):
"""
Arguments:
outdir -- directory where to download and create the dataset
if this directory doesn't exist, it will be created
Keyword arguments:
clean -- delete outdir first if it exists
file_extension -- image format for output images
"""
self.outdir = outdir
self.mkdir(self.outdir, clean=clean)
self.file_extension = file_extension.lower()
def getData(self):
"""
This is the main function that should be called by the users!
Downloads the dataset and prepares it for DIGITS consumption
"""
for url in self.urlList():
self.__downloadFile(url)
self.uncompressData()
self.processData()
print "Dataset directory is created successfully at '%s'" % self.outdir
def urlList(self):
"""
return a list of (url, output_file) tuples
"""
raise NotImplementedError
def uncompressData(self):
"""
uncompress the downloaded files
"""
raise NotImplementedError
def processData(self):
"""
Process the downloaded files and prepare the data for DIGITS
"""
raise NotImplementedError
def __downloadFile(self, url):
"""
Downloads the url
"""
download_path = os.path.join(self.outdir, os.path.basename(url))
if not os.path.exists(download_path):
print "Downloading url=%s ..." % url
urllib.urlretrieve(url, download_path)
def mkdir(self, d, clean=False):
"""
Safely create a directory
Arguments:
d -- the directory name
Keyword arguments:
clean -- if True and the directory already exists, it will be deleted and recreated
"""
if os.path.exists(d):
if clean:
shutil.rmtree(d)
else:
return
os.mkdir(d)
| bsd-3-clause |
AMOboxTV/AMOBox.LegoBuild | script.module.unidecode/lib/unidecode/x060.py | 250 | 4642 | data = (
'Huai ', # 0x00
'Tai ', # 0x01
'Song ', # 0x02
'Wu ', # 0x03
'Ou ', # 0x04
'Chang ', # 0x05
'Chuang ', # 0x06
'Ju ', # 0x07
'Yi ', # 0x08
'Bao ', # 0x09
'Chao ', # 0x0a
'Min ', # 0x0b
'Pei ', # 0x0c
'Zuo ', # 0x0d
'Zen ', # 0x0e
'Yang ', # 0x0f
'Kou ', # 0x10
'Ban ', # 0x11
'Nu ', # 0x12
'Nao ', # 0x13
'Zheng ', # 0x14
'Pa ', # 0x15
'Bu ', # 0x16
'Tie ', # 0x17
'Gu ', # 0x18
'Hu ', # 0x19
'Ju ', # 0x1a
'Da ', # 0x1b
'Lian ', # 0x1c
'Si ', # 0x1d
'Chou ', # 0x1e
'Di ', # 0x1f
'Dai ', # 0x20
'Yi ', # 0x21
'Tu ', # 0x22
'You ', # 0x23
'Fu ', # 0x24
'Ji ', # 0x25
'Peng ', # 0x26
'Xing ', # 0x27
'Yuan ', # 0x28
'Ni ', # 0x29
'Guai ', # 0x2a
'Fu ', # 0x2b
'Xi ', # 0x2c
'Bi ', # 0x2d
'You ', # 0x2e
'Qie ', # 0x2f
'Xuan ', # 0x30
'Cong ', # 0x31
'Bing ', # 0x32
'Huang ', # 0x33
'Xu ', # 0x34
'Chu ', # 0x35
'Pi ', # 0x36
'Xi ', # 0x37
'Xi ', # 0x38
'Tan ', # 0x39
'Koraeru ', # 0x3a
'Zong ', # 0x3b
'Dui ', # 0x3c
'[?] ', # 0x3d
'Ki ', # 0x3e
'Yi ', # 0x3f
'Chi ', # 0x40
'Ren ', # 0x41
'Xun ', # 0x42
'Shi ', # 0x43
'Xi ', # 0x44
'Lao ', # 0x45
'Heng ', # 0x46
'Kuang ', # 0x47
'Mu ', # 0x48
'Zhi ', # 0x49
'Xie ', # 0x4a
'Lian ', # 0x4b
'Tiao ', # 0x4c
'Huang ', # 0x4d
'Die ', # 0x4e
'Hao ', # 0x4f
'Kong ', # 0x50
'Gui ', # 0x51
'Heng ', # 0x52
'Xi ', # 0x53
'Xiao ', # 0x54
'Shu ', # 0x55
'S ', # 0x56
'Kua ', # 0x57
'Qiu ', # 0x58
'Yang ', # 0x59
'Hui ', # 0x5a
'Hui ', # 0x5b
'Chi ', # 0x5c
'Jia ', # 0x5d
'Yi ', # 0x5e
'Xiong ', # 0x5f
'Guai ', # 0x60
'Lin ', # 0x61
'Hui ', # 0x62
'Zi ', # 0x63
'Xu ', # 0x64
'Chi ', # 0x65
'Xiang ', # 0x66
'Nu ', # 0x67
'Hen ', # 0x68
'En ', # 0x69
'Ke ', # 0x6a
'Tong ', # 0x6b
'Tian ', # 0x6c
'Gong ', # 0x6d
'Quan ', # 0x6e
'Xi ', # 0x6f
'Qia ', # 0x70
'Yue ', # 0x71
'Peng ', # 0x72
'Ken ', # 0x73
'De ', # 0x74
'Hui ', # 0x75
'E ', # 0x76
'Kyuu ', # 0x77
'Tong ', # 0x78
'Yan ', # 0x79
'Kai ', # 0x7a
'Ce ', # 0x7b
'Nao ', # 0x7c
'Yun ', # 0x7d
'Mang ', # 0x7e
'Yong ', # 0x7f
'Yong ', # 0x80
'Yuan ', # 0x81
'Pi ', # 0x82
'Kun ', # 0x83
'Qiao ', # 0x84
'Yue ', # 0x85
'Yu ', # 0x86
'Yu ', # 0x87
'Jie ', # 0x88
'Xi ', # 0x89
'Zhe ', # 0x8a
'Lin ', # 0x8b
'Ti ', # 0x8c
'Han ', # 0x8d
'Hao ', # 0x8e
'Qie ', # 0x8f
'Ti ', # 0x90
'Bu ', # 0x91
'Yi ', # 0x92
'Qian ', # 0x93
'Hui ', # 0x94
'Xi ', # 0x95
'Bei ', # 0x96
'Man ', # 0x97
'Yi ', # 0x98
'Heng ', # 0x99
'Song ', # 0x9a
'Quan ', # 0x9b
'Cheng ', # 0x9c
'Hui ', # 0x9d
'Wu ', # 0x9e
'Wu ', # 0x9f
'You ', # 0xa0
'Li ', # 0xa1
'Liang ', # 0xa2
'Huan ', # 0xa3
'Cong ', # 0xa4
'Yi ', # 0xa5
'Yue ', # 0xa6
'Li ', # 0xa7
'Nin ', # 0xa8
'Nao ', # 0xa9
'E ', # 0xaa
'Que ', # 0xab
'Xuan ', # 0xac
'Qian ', # 0xad
'Wu ', # 0xae
'Min ', # 0xaf
'Cong ', # 0xb0
'Fei ', # 0xb1
'Bei ', # 0xb2
'Duo ', # 0xb3
'Cui ', # 0xb4
'Chang ', # 0xb5
'Men ', # 0xb6
'Li ', # 0xb7
'Ji ', # 0xb8
'Guan ', # 0xb9
'Guan ', # 0xba
'Xing ', # 0xbb
'Dao ', # 0xbc
'Qi ', # 0xbd
'Kong ', # 0xbe
'Tian ', # 0xbf
'Lun ', # 0xc0
'Xi ', # 0xc1
'Kan ', # 0xc2
'Kun ', # 0xc3
'Ni ', # 0xc4
'Qing ', # 0xc5
'Chou ', # 0xc6
'Dun ', # 0xc7
'Guo ', # 0xc8
'Chan ', # 0xc9
'Liang ', # 0xca
'Wan ', # 0xcb
'Yuan ', # 0xcc
'Jin ', # 0xcd
'Ji ', # 0xce
'Lin ', # 0xcf
'Yu ', # 0xd0
'Huo ', # 0xd1
'He ', # 0xd2
'Quan ', # 0xd3
'Tan ', # 0xd4
'Ti ', # 0xd5
'Ti ', # 0xd6
'Nie ', # 0xd7
'Wang ', # 0xd8
'Chuo ', # 0xd9
'Bu ', # 0xda
'Hun ', # 0xdb
'Xi ', # 0xdc
'Tang ', # 0xdd
'Xin ', # 0xde
'Wei ', # 0xdf
'Hui ', # 0xe0
'E ', # 0xe1
'Rui ', # 0xe2
'Zong ', # 0xe3
'Jian ', # 0xe4
'Yong ', # 0xe5
'Dian ', # 0xe6
'Ju ', # 0xe7
'Can ', # 0xe8
'Cheng ', # 0xe9
'De ', # 0xea
'Bei ', # 0xeb
'Qie ', # 0xec
'Can ', # 0xed
'Dan ', # 0xee
'Guan ', # 0xef
'Duo ', # 0xf0
'Nao ', # 0xf1
'Yun ', # 0xf2
'Xiang ', # 0xf3
'Zhui ', # 0xf4
'Die ', # 0xf5
'Huang ', # 0xf6
'Chun ', # 0xf7
'Qiong ', # 0xf8
'Re ', # 0xf9
'Xing ', # 0xfa
'Ce ', # 0xfb
'Bian ', # 0xfc
'Hun ', # 0xfd
'Zong ', # 0xfe
'Ti ', # 0xff
)
| gpl-2.0 |
IAmTheOneTheyCallNeo/vigor_aosp_kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
paulnovo/ITK | Utilities/Doxygen/mcdoc.py | 6 | 5562 | #!/usr/bin/env python
import sys, os, re, glob, cStringIO
def usage():
print >> sys.stderr, """usage: mdoc.py set group file [files...]
Add the tag "\\ingroup group" to all the doxygen comment with a \\class
tag in it.
usage: mdoc.py check group file [files...]
Check that the tag "\\ingroup group" is in all the doxygen comment with a \\class
tag in it. If the tag is not there, a warning is displayed with the file name, the
line number and the class name. The return value is 0 when all the doxygen comments
have the tag, and 1 when at least one doxygen comment don't have it.
usage: mdoc.py massive-set [ITK-source]
Add the tag "\\ingroup module" to all the headers in ITK, where 'module' is the
module name of the header.
usage: mdoc.py massive-check [ITK-source]
Check that all the headers in ITK have their module name in their \\ingroup tag.
As for 'check', a warning is displayed if the tag is missing and 1 is returned.
"""
def setGroup( fname, group ):
# print >> sys.stderr, "Processing", fname
f = open( fname, "r" )
out = cStringIO.StringIO()
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
last = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
# write what is before the doxygen field to the output
out.write(fcontent[last:m.start(1)])
last = m.end(1)
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != " \class classname ":
# do we have a line with the expected content?
if re.search(r"\ingroup .*"+group+"( |$)", dcontent, re.MULTILINE):
# yes - just keep the content unchanged
out.write(dcontent)
else:
# add the expected group
if "\n" in dcontent:
# this is a multiline content. Find the indent
indent = re.search("( *)(\*|$)", dcontent).group(1)
lastLine = dcontent.splitlines()[-1]
if re.match(r'^ *$', lastLine):
out.write(dcontent+"* \\ingroup "+group+"\n"+indent)
else:
out.write(dcontent.rstrip()+"\n"+indent+"* \\ingroup "+group+"\n"+indent)
else:
out.write(dcontent+" \\ingroup "+group+" ")
else:
out.write(dcontent)
out.write(fcontent[last:])
# we can save the content to the original file
f = open( fname, "w" )
f.write( out.getvalue() )
f.close()
def checkGroup( fname, group ):
# print >> sys.stderr, "Checking", fname
f = open( fname, "r" )
# load everything in memory
fcontent = f.read()
f.close()
# now parse all the doxygen fields
ret = 0
for m in re.finditer(r"/\*\*(.*?)\*/", fcontent, re.DOTALL):
dcontent = m.group(1)
# we don't care about doxygen fields not about a class
if r"\class" in dcontent and dcontent != " \class classname ":
# do we have a line with the expected content?
if not re.search(r"\ingroup .*"+group+"( |$)", dcontent, re.MULTILINE):
# get class name and the line for debug output
cname = re.search(r"\class +([^ ]*)", dcontent).group(1).strip()
line = len(fcontent[:m.start(1)].splitlines())
print >> sys.stderr, r'%s:%s: error: "\ingroup %s" not set in class %s.' % (fname, line, group, cname)
ret = 1
return ret
def main():
# first arg is the command
command = sys.argv[1]
if command == "set":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
for fname in files:
setGroup(fname, module)
return 0
elif command == "massive-set":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0]+"/../.."
cmm = os.path.abspath(d+"/*/*/*/itk-module.cmake")
for fname in glob.glob(cmm):
f = file(fname, "r")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname+"/include/*.h"):
setGroup(fname2, module)
return 0
elif command == "check":
if len(sys.argv) < 4:
usage()
return 1
# second arg is the module name, and the rest are the files to process
module = sys.argv[2]
files = sys.argv[3:]
ret = 0
count = 0
for fname in files:
if os.path.isdir(fname):
for fname2 in glob.glob(fname+"/*.h"):
count += 1
ret = max( ret, checkGroup(fname2, module) )
else:
count += 1
ret = max( ret, checkGroup(fname, module) )
print >> sys.stderr, count, "headers checked."
return ret
elif command == "massive-check":
if len(sys.argv) < 2:
usage()
return 1
if len(sys.argv) >= 3:
d = sys.argv[2]
else:
d = sys.path[0]+"/../.."
cmm = os.path.abspath(d+"/*/*/*/itk-module.cmake")
ret = 0
count = 0
for fname in glob.glob(cmm):
f = file(fname, "r")
mcontent = f.read()
f.close()
module = re.search(r"itk_module\(([^ )]+)", mcontent).group(1)
dname = os.path.dirname(fname)
for fname2 in glob.glob(dname+"/include/*.h"):
count += 1
ret = max( ret, checkGroup(fname2, module) )
print >> sys.stderr, count, "headers checked."
return ret
else:
print >> sys.stderr, "Unknown command", command
usage()
return 1
if __name__ == "__main__":
ret = main()
sys.exit(ret)
| apache-2.0 |
mihirkelkar/BuildingMachineLearningSystemsWithPython | ch02/figure4_5_sklearn.py | 22 | 2475 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def plot_decision(features, labels, num_neighbors=1):
'''Plots decision boundary for KNN
Parameters
----------
features : ndarray
labels : sequence
Returns
-------
fig : Matplotlib Figure
ax : Matplotlib Axes
'''
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 1000)
Y = np.linspace(y0, y1, 1000)
X, Y = np.meshgrid(X, Y)
model = KNeighborsClassifier(num_neighbors)
model.fit(features[:, (0,2)], labels)
C = model.predict(np.vstack([X.ravel(), Y.ravel()]).T).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .7, .7), (.7, 1., .7), (.7, .7, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
fig,ax = plt.subplots()
ax.set_xlim(x0, x1)
ax.set_ylim(y0, y1)
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[2])
ax.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.1, .6, .1), (.0, .0, 1.)])
ax.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
ax.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.), ms=6)
return fig,ax
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
fig,ax = plot_decision(features, labels)
fig.tight_layout()
fig.savefig('figure4sklearn.png')
features -= features.mean(0)
features /= features.std(0)
fig,ax = plot_decision(features, labels)
fig.tight_layout()
fig.savefig('figure5sklearn.png')
fig,ax = plot_decision(features, labels, 11)
fig.tight_layout()
fig.savefig('figure5sklearn_with_11_neighbors.png')
| mit |
kohnle-lernmodule/exeLearningPlus1_04 | twisted/web/html.py | 14 | 1101 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""I hold HTML generation helpers.
"""
#t.w imports
from twisted.web import resource
import traceback, string
from cStringIO import StringIO
from microdom import escape
def PRE(text):
"Wrap <pre> tags around some text and HTML-escape it."
return "<pre>"+escape(text)+"</pre>"
def UL(lst):
io = StringIO()
io.write("<ul>\n")
for el in lst:
io.write("<li> %s</li>\n" % el)
io.write("</ul>")
return io.getvalue()
def linkList(lst):
io = StringIO()
io.write("<ul>\n")
for hr, el in lst:
io.write('<li> <a href="%s">%s</a></li>\n' % (hr, el))
io.write("</ul>")
return io.getvalue()
def output(func, *args, **kw):
"""output(func, *args, **kw) -> html string
Either return the result of a function (which presumably returns an
HTML-legal string) or an HTMLized traceback describing why that function
didn't run.
"""
try:
return apply(func, args, kw)
except:
io = StringIO()
return PRE(io.getvalue())
| gpl-2.0 |
mitar/django | django/contrib/messages/storage/__init__.py | 116 | 1185 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
def get_storage(import_path):
"""
Imports the message storage class described by import_path, where
import_path is the full Python path to the class.
"""
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a Python path." % import_path)
module, classname = import_path[:dot], import_path[dot + 1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, classname))
# Callable with the same interface as the storage classes i.e. accepts a
# 'request' object. It is wrapped in a lambda to stop 'settings' being used at
# the module level
default_storage = lambda request: get_storage(settings.MESSAGE_STORAGE)(request)
| bsd-3-clause |
cocoloco69/pynet | ANSIBLE/library/eos_vxlan_vtep.py | 5 | 13989 | #!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_vxlan_vtep
short_description: Manages Vxlan global VTEP flood list
description:
- The eos_vxlan_vtep module manages the Vxlan global VTEP flood list
configure on Arista EOS nodes that are operating as VTEPs
version_added: 1.0.0
category: VXLAN
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The unique interface identifier name. The interface name must use
the full interface name (no abbreviated names). For example,
interfaces should be specified as Ethernet1 not Et1
- "Note: The name parameter only accepts Vxlan1 as the identifier"
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
vtep:
description:
- Specifes the remote endpoing IP address to add to the global VTEP
flood list. Valid values for the vtep parameter are unicast IPv4
addresses
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
vlan:
description:
- Specifies the VLAN ID to associate the VTEP with. If the VLAN
argument is not used, the the VTEP is confgured on the global
flood list.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.1
"""
EXAMPLES = """
- name: Ensures that 1.1.1.1 is in the global flood list
eos_vxlan_vtep: name=Vxlan1 state=present vtep=1.1.1.1
- name: Ensures that 2.2.2.2 is not in the global flood list
eos_vxlan_vtep: name=Vxlan1 state=absent vtep=2.2.2.2
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Vlan based on vlanid
"""
name = module.attributes['name']
vtep = module.attributes['vtep']
vlan = module.attributes['vlan']
result = module.node.api('interfaces').get(name)
_instance = dict(name=name, vtep=vtep, vlan=vlan, state='absent')
if vlan in result['vlans']:
if vtep in result['vlans'][vlan]['flood_list']:
_instance['state'] = 'present'
elif not vlan:
if vtep in result['flood_list']:
_instance['state'] = 'present'
return _instance
def create(module):
""" Creates a new vtep in the global flood list
"""
name = module.attributes['name']
vtep = module.attributes['vtep']
vlan = module.attributes['vlan']
module.log('Invoked create for eos_vxlan_vtep[%s]' % vtep)
module.node.api('interfaces').add_vtep(name, vtep, vlan=vlan)
def remove(module):
""" Removes an existing vtep from the global flood list
"""
name = module.attributes['name']
vtep = module.attributes['vtep']
vlan = module.attributes['vlan']
module.log('Invoked remove for eos_vxlan_vtep[%s]' % vtep)
module.node.api('interfaces').remove_vtep(name, vtep, vlan=vlan)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
vtep=dict(required=True),
vlan=dict()
)
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main() | apache-2.0 |
littledogboy/zulip | zerver/worker/queue_processors.py | 115 | 13794 | from __future__ import absolute_import
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.base import BaseHandler
from zerver.models import get_user_profile_by_email, \
get_user_profile_by_id, get_prereg_user_by_email, get_client
from zerver.lib.context_managers import lockfile
from zerver.lib.queue import SimpleQueueClient, queue_json_publish
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.lib.notifications import handle_missedmessage_emails, enqueue_welcome_emails, \
clear_followup_emails_queue, send_local_email_template_with_delay
from zerver.lib.actions import do_send_confirmation_email, \
do_update_user_activity, do_update_user_activity_interval, do_update_user_presence, \
internal_send_message, check_send_message, extract_recipients, \
handle_push_notification
from zerver.lib.digest import handle_digest_email
from zerver.lib.email_mirror import process_message as mirror_email
from zerver.decorator import JsonableError
from zerver.lib.socket import req_redis_key
from confirmation.models import Confirmation
from zerver.lib.db import reset_queries
from django.core.mail import EmailMessage
from zerver.lib.redis_utils import get_redis_client
import os
import sys
import ujson
from collections import defaultdict
import email
import time
import datetime
import logging
import simplejson
import StringIO
def assign_queue(queue_name, enabled=True):
def decorate(clazz):
clazz.queue_name = queue_name
if enabled:
register_worker(queue_name, clazz)
return clazz
return decorate
worker_classes = {}
def register_worker(queue_name, clazz):
worker_classes[queue_name] = clazz
def get_worker(queue_name):
return worker_classes[queue_name]()
def get_active_worker_queues():
return worker_classes.iterkeys()
class QueueProcessingWorker(object):
def __init__(self):
self.q = SimpleQueueClient()
def consume_wrapper(self, data):
try:
self.consume(data)
except Exception:
self._log_problem()
if not os.path.exists(settings.QUEUE_ERROR_DIR):
os.mkdir(settings.QUEUE_ERROR_DIR)
fname = '%s.errors' % (self.queue_name,)
fn = os.path.join(settings.QUEUE_ERROR_DIR, fname)
line = '%s\t%s\n' % (time.asctime(), ujson.dumps(data))
lock_fn = fn + '.lock'
with lockfile(lock_fn):
with open(fn, 'a') as f:
f.write(line)
reset_queries()
def _log_problem(self):
logging.exception("Problem handling data on queue %s" % (self.queue_name,))
def start(self):
self.q.register_json_consumer(self.queue_name, self.consume_wrapper)
self.q.start_consuming()
def stop(self):
self.q.stop_consuming()
if settings.MAILCHIMP_API_KEY:
from postmonkey import PostMonkey, MailChimpException
@assign_queue('signups')
class SignupWorker(QueueProcessingWorker):
def __init__(self):
super(SignupWorker, self).__init__()
if settings.MAILCHIMP_API_KEY:
self.pm = PostMonkey(settings.MAILCHIMP_API_KEY, timeout=10)
def consume(self, data):
merge_vars=data['merge_vars']
# This should clear out any invitation reminder emails
clear_followup_emails_queue(data["EMAIL"])
if settings.MAILCHIMP_API_KEY and settings.PRODUCTION:
try:
self.pm.listSubscribe(
id=settings.ZULIP_FRIENDS_LIST_ID,
email_address=data['EMAIL'],
merge_vars=merge_vars,
double_optin=False,
send_welcome=False)
except MailChimpException, e:
if e.code == 214:
logging.warning("Attempted to sign up already existing email to list: %s" % (data['EMAIL'],))
else:
raise e
email = data.get("EMAIL")
name = merge_vars.get("NAME")
enqueue_welcome_emails(email, name)
@assign_queue('invites')
class ConfirmationEmailWorker(QueueProcessingWorker):
def consume(self, data):
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
do_send_confirmation_email(invitee, referrer)
# queue invitation reminder for two days from now.
link = Confirmation.objects.get_link_for_object(invitee)
send_local_email_template_with_delay([{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
{'activate_url': link,
'referrer': referrer,
'voyager': settings.VOYAGER,
'external_host': settings.EXTERNAL_HOST,
'support_email': settings.ZULIP_ADMINISTRATOR},
datetime.timedelta(days=2),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
@assign_queue('user_activity')
class UserActivityWorker(QueueProcessingWorker):
def consume(self, event):
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
query = event["query"]
do_update_user_activity(user_profile, client, query, log_time)
@assign_queue('user_activity_interval')
class UserActivityIntervalWorker(QueueProcessingWorker):
def consume(self, event):
user_profile = get_user_profile_by_id(event["user_profile_id"])
log_time = timestamp_to_datetime(event["time"])
do_update_user_activity_interval(user_profile, log_time)
@assign_queue('user_presence')
class UserPresenceWorker(QueueProcessingWorker):
def consume(self, event):
logging.info("Received event: %s" % (event),)
user_profile = get_user_profile_by_id(event["user_profile_id"])
client = get_client(event["client"])
log_time = timestamp_to_datetime(event["time"])
status = event["status"]
do_update_user_presence(user_profile, client, log_time, status)
@assign_queue('missedmessage_emails')
class MissedMessageWorker(QueueProcessingWorker):
def start(self):
while True:
missed_events = self.q.drain_queue("missedmessage_emails", json=True)
by_recipient = defaultdict(list)
for event in missed_events:
logging.info("Received event: %s" % (event,))
by_recipient[event['user_profile_id']].append(event)
for user_profile_id, events in by_recipient.items():
handle_missedmessage_emails(user_profile_id, events)
reset_queries()
# Aggregate all messages received every 2 minutes to let someone finish sending a batch
# of messages
time.sleep(2 * 60)
@assign_queue('missedmessage_mobile_notifications')
class PushNotificationsWorker(QueueProcessingWorker):
def consume(self, data):
handle_push_notification(data['user_profile_id'], data)
def make_feedback_client():
sys.path.append(os.path.join(os.path.dirname(__file__), '../../api'))
import zulip
return zulip.Client(
client="ZulipFeedback/0.1",
email=settings.DEPLOYMENT_ROLE_NAME,
api_key=settings.DEPLOYMENT_ROLE_KEY,
verbose=True,
site=settings.FEEDBACK_TARGET)
# We probably could stop running this queue worker at all if ENABLE_FEEDBACK is False
@assign_queue('feedback_messages')
class FeedbackBot(QueueProcessingWorker):
def start(self):
if settings.ENABLE_FEEDBACK and settings.FEEDBACK_EMAIL is None:
self.staging_client = make_feedback_client()
self.staging_client._register(
'forward_feedback',
method='POST',
url='deployments/feedback',
make_request=(lambda request: {'message': simplejson.dumps(request)}),
)
QueueProcessingWorker.start(self)
def consume(self, event):
if not settings.ENABLE_FEEDBACK:
return
if settings.FEEDBACK_EMAIL is not None:
to_email = settings.FEEDBACK_EMAIL
subject = "Zulip feedback from %s" % (event["sender_email"],)
content = event["content"]
from_email = '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])
headers = {'Reply-To' : '"%s" <%s>' % (event["sender_full_name"], event["sender_email"])}
msg = EmailMessage(subject, content, from_email, [to_email], headers=headers)
msg.send()
else:
self.staging_client.forward_feedback(event)
@assign_queue('error_reports')
class ErrorReporter(QueueProcessingWorker):
def start(self):
if settings.DEPLOYMENT_ROLE_KEY:
self.staging_client = make_feedback_client()
self.staging_client._register(
'forward_error',
method='POST',
url='deployments/report_error',
make_request=(lambda type, report: {'type': type, 'report': simplejson.dumps(report)}),
)
QueueProcessingWorker.start(self)
def consume(self, event):
if not settings.DEPLOYMENT_ROLE_KEY:
return
self.staging_client.forward_error(event['type'], event['report'])
@assign_queue('slow_queries')
class SlowQueryWorker(QueueProcessingWorker):
def start(self):
while True:
self.process_one_batch()
# Aggregate all slow query messages in 1-minute chunks to avoid message spam
time.sleep(1 * 60)
def process_one_batch(self):
slow_queries = self.q.drain_queue("slow_queries", json=True)
if settings.ERROR_BOT is None:
return
if len(slow_queries) > 0:
topic = "%s: slow queries" % (settings.STATSD_PREFIX,)
content = ""
for query in slow_queries:
content += " %s\n" % (query,)
internal_send_message(settings.ERROR_BOT, "stream", "logs", topic, content)
reset_queries()
@assign_queue("message_sender")
class MessageSenderWorker(QueueProcessingWorker):
def __init__(self):
super(MessageSenderWorker, self).__init__()
self.redis_client = get_redis_client()
self.handler = BaseHandler()
self.handler.load_middleware()
def consume(self, event):
server_meta = event['server_meta']
environ = {'REQUEST_METHOD': 'SOCKET',
'SCRIPT_NAME': '',
'PATH_INFO': '/json/send_message',
'SERVER_NAME': 'localhost',
'SERVER_PORT': 9993,
'SERVER_PROTOCOL': 'ZULIP_SOCKET/1.0',
'wsgi.version': (1, 0),
'wsgi.input': StringIO.StringIO(),
'wsgi.errors': sys.stderr,
'wsgi.multithread': False,
'wsgi.multiprocess': True,
'wsgi.run_once': False,
'zulip.emulated_method': 'POST'}
# We're mostly using a WSGIRequest for convenience
environ.update(server_meta['request_environ'])
request = WSGIRequest(environ)
request._request = event['request']
request.csrf_processing_done = True
user_profile = get_user_profile_by_id(server_meta['user_id'])
request._cached_user = user_profile
resp = self.handler.get_response(request)
server_meta['time_request_finished'] = time.time()
server_meta['worker_log_data'] = request._log_data
resp_content = resp.content
result = {'response': ujson.loads(resp_content), 'req_id': event['req_id'],
'server_meta': server_meta}
redis_key = req_redis_key(event['req_id'])
self.redis_client.hmset(redis_key, {'status': 'complete',
'response': resp_content});
queue_json_publish(server_meta['return_queue'], result, lambda e: None)
@assign_queue('digest_emails')
class DigestWorker(QueueProcessingWorker):
# Who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
logging.info("Received digest event: %s" % (event,))
handle_digest_email(event["user_profile_id"], event["cutoff"])
@assign_queue('email_mirror')
class MirrorWorker(QueueProcessingWorker):
# who gets a digest is entirely determined by the enqueue_digest_emails
# management command, not here.
def consume(self, event):
mirror_email(email.message_from_string(event["message"].encode("utf-8")),
rcpt_to=event["rcpt_to"], pre_checked=True)
@assign_queue('test')
class TestWorker(QueueProcessingWorker):
# This worker allows you to test the queue worker infrastructure without
# creating significant side effects. It can be useful in development or
# for troubleshooting prod/staging. It pulls a message off the test queue
# and appends it to a file in /tmp.
def consume(self, event):
fn = settings.ZULIP_WORKER_TEST_FILE
message = ujson.dumps(event)
logging.info("TestWorker should append this message to %s: %s" % (fn, message))
with open(fn, 'a') as f:
f.write(message + '\n')
| apache-2.0 |
saurabh6790/pow-lib | webnotes/utils/nestedset.py | 24 | 7568 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Tree (Hierarchical) Nested Set Model (nsm)
#
# To use the nested set model,
# use the following pattern
# 1. name your parent field as "parent_item_group" if not have a property nsm_parent_field as your field name in the document class
# 2. have a field called "old_parent" in your fields list - this identifies whether the parent has been changed
# 3. call update_nsm(doc_obj) in the on_upate method
# ------------------------------------------
from __future__ import unicode_literals
import webnotes
from webnotes import msgprint, _
# called in the on_update method
def update_nsm(doc_obj):
# get fields, data from the DocType
pf, opf = 'parent_node', 'old_parent'
if str(doc_obj.__class__)=='webnotes.model.doc.Document':
# passed as a Document object
d = doc_obj
else:
# passed as a DocType object
d = doc_obj.doc
if hasattr(doc_obj,'nsm_parent_field'):
pf = doc_obj.nsm_parent_field
if hasattr(doc_obj,'nsm_oldparent_field'):
opf = doc_obj.nsm_oldparent_field
p, op = d.fields.get(pf, ''), d.fields.get(opf, '')
# has parent changed (?) or parent is None (root)
if not d.lft and not d.rgt:
update_add_node(d, p or '', pf)
elif op != p:
update_move_node(d, pf)
# set old parent
d.fields[opf] = p
webnotes.conn.set_value(d.doctype, d.name, opf, p or '')
# reload
d._loadfromdb()
def update_add_node(doc, parent, parent_field):
"""
insert a new node
"""
from webnotes.utils import now
n = now()
doctype = doc.doctype
name = doc.name
# get the last sibling of the parent
if parent:
left, right = webnotes.conn.sql("select lft, rgt from `tab%s` where name=%s" \
% (doctype, "%s"), parent)[0]
validate_loop(doc.doctype, doc.name, left, right)
else: # root
right = webnotes.conn.sql("select ifnull(max(rgt),0)+1 from `tab%s` where ifnull(`%s`,'') =''" % (doctype, parent_field))[0][0]
right = right or 1
# update all on the right
webnotes.conn.sql("update `tab%s` set rgt = rgt+2, modified='%s' where rgt >= %s" %(doctype,n,right))
webnotes.conn.sql("update `tab%s` set lft = lft+2, modified='%s' where lft >= %s" %(doctype,n,right))
# update index of new node
if webnotes.conn.sql("select * from `tab%s` where lft=%s or rgt=%s"% (doctype, right, right+1)):
webnotes.msgprint("Nested set error. Please send mail to support")
raise Exception
webnotes.conn.sql("update `tab%s` set lft=%s, rgt=%s, modified='%s' where name='%s'" % (doctype,right,right+1,n,name))
return right
def update_move_node(doc, parent_field):
parent = doc.fields.get(parent_field)
if parent:
new_parent = webnotes.conn.sql("""select lft, rgt from `tab%s`
where name = %s""" % (doc.doctype, '%s'), parent, as_dict=1)[0]
validate_loop(doc.doctype, doc.name, new_parent.lft, new_parent.rgt)
# move to dark side
webnotes.conn.sql("""update `tab%s` set lft = -lft, rgt = -rgt
where lft >= %s and rgt <= %s"""% (doc.doctype, '%s', '%s'), (doc.lft, doc.rgt))
# shift left
diff = doc.rgt - doc.lft + 1
webnotes.conn.sql("""update `tab%s` set lft = lft -%s, rgt = rgt - %s
where lft > %s"""% (doc.doctype, '%s', '%s', '%s'), (diff, diff, doc.rgt))
# shift left rgts of ancestors whose only rgts must shift
webnotes.conn.sql("""update `tab%s` set rgt = rgt - %s
where lft < %s and rgt > %s"""% (doc.doctype, '%s', '%s', '%s'),
(diff, doc.lft, doc.rgt))
if parent:
new_parent = webnotes.conn.sql("""select lft, rgt from `tab%s`
where name = %s""" % (doc.doctype, '%s'), parent, as_dict=1)[0]
# set parent lft, rgt
webnotes.conn.sql("""update `tab%s` set rgt = rgt + %s
where name = %s"""% (doc.doctype, '%s', '%s'), (diff, parent))
# shift right at new parent
webnotes.conn.sql("""update `tab%s` set lft = lft + %s, rgt = rgt + %s
where lft > %s""" % (doc.doctype, '%s', '%s', '%s'),
(diff, diff, new_parent.rgt))
# shift right rgts of ancestors whose only rgts must shift
webnotes.conn.sql("""update `tab%s` set rgt = rgt + %s
where lft < %s and rgt > %s""" % (doc.doctype, '%s', '%s', '%s'),
(diff, new_parent.lft, new_parent.rgt))
new_diff = new_parent.rgt - doc.lft
else:
# new root
max_rgt = webnotes.conn.sql("""select max(rgt) from `tab%s`""" % doc.doctype)[0][0]
new_diff = max_rgt + 1 - doc.lft
# bring back from dark side
webnotes.conn.sql("""update `tab%s` set lft = -lft + %s, rgt = -rgt + %s
where lft < 0"""% (doc.doctype, '%s', '%s'), (new_diff, new_diff))
def rebuild_tree(doctype, parent_field):
"""
call rebuild_node for all root nodes
"""
# get all roots
webnotes.conn.auto_commit_on_many_writes = 1
right = 1
result = webnotes.conn.sql("SELECT name FROM `tab%s` WHERE `%s`='' or `%s` IS NULL ORDER BY name ASC" % (doctype, parent_field, parent_field))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
webnotes.conn.auto_commit_on_many_writes = 0
def rebuild_node(doctype, parent, left, parent_field):
"""
reset lft, rgt and recursive call for all children
"""
from webnotes.utils import now
n = now()
# the right value of this node is the left value + 1
right = left+1
# get all children of this node
result = webnotes.conn.sql("SELECT name FROM `tab%s` WHERE `%s`='%s'" % (doctype, parent_field, parent))
for r in result:
right = rebuild_node(doctype, r[0], right, parent_field)
# we've got the left value, and now that we've processed
# the children of this node we also know the right value
webnotes.conn.sql("UPDATE `tab%s` SET lft=%s, rgt=%s, modified='%s' WHERE name='%s'" % (doctype,left,right,n,parent))
#return the right value of this node + 1
return right+1
def validate_loop(doctype, name, lft, rgt):
"""check if item not an ancestor (loop)"""
if name in webnotes.conn.sql_list("""select name from `tab%s` where lft <= %s and rgt >= %s""" % (doctype,
"%s", "%s"), (lft, rgt)):
webnotes.throw("""Item cannot be added to its own descendents.""")
class DocTypeNestedSet(object):
def on_update(self):
update_nsm(self)
self.validate_ledger()
def on_trash(self):
parent = self.doc.fields[self.nsm_parent_field]
if not parent:
msgprint(_("Root ") + self.doc.doctype + _(" cannot be deleted."), raise_exception=1)
self.doc.fields[self.nsm_parent_field] = ""
update_nsm(self)
def before_rename(self, newdn, olddn, merge=False, group_fname="is_group"):
if merge:
is_group = webnotes.conn.get_value(self.doc.doctype, newdn, group_fname)
if self.doc.fields[group_fname] != is_group:
webnotes.throw(_("""Merging is only possible between Group-to-Group or
Ledger-to-Ledger"""))
def after_rename(self, olddn, newdn, merge=False):
if merge:
parent_field = "parent_" + self.doc.doctype.replace(" ", "_").lower()
rebuild_tree(self.doc.doctype, parent_field)
def validate_one_root(self):
if not self.doc.fields[self.nsm_parent_field]:
if webnotes.conn.sql("""select count(*) from `tab%s` where
ifnull(%s, '')=''""" % (self.doc.doctype, self.nsm_parent_field))[0][0] > 1:
webnotes.throw(_("""Multiple root nodes not allowed."""))
def validate_ledger(self, group_identifier="is_group"):
if self.doc.fields.get(group_identifier) == "No":
if webnotes.conn.sql("""select name from `tab%s` where %s=%s and docstatus!=2""" %
(self.doc.doctype, self.nsm_parent_field, '%s'), (self.doc.name)):
webnotes.throw(self.doc.doctype + ": " + self.doc.name +
_(" can not be marked as a ledger as it has existing child"))
| mit |
40223119/2015cda | static/Brython3.1.3-20150514-095342/Lib/unittest/signals.py | 1016 | 2403 | import signal
import weakref
from functools import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.original_handler = default_handler
if isinstance(default_handler, int):
if default_handler == signal.SIG_DFL:
# Pretend it's signal.default_int_handler instead.
default_handler = signal.default_int_handler
elif default_handler == signal.SIG_IGN:
# Not quite the same thing as SIG_IGN, but the closest we
# can make it: do nothing.
def default_handler(unused_signum, unused_frame):
pass
else:
raise TypeError("expected SIGINT signal handler to be "
"signal.SIG_IGN, signal.SIG_DFL, or a "
"callable object")
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.WeakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
| gpl-3.0 |
piqoni/onadata | onadata/apps/api/viewsets/team_viewset.py | 3 | 4075 | from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from rest_framework import filters
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import DjangoObjectPermissions
from onadata.libs.mixins.last_modified_mixin import LastModifiedMixin
from onadata.libs.serializers.team_serializer import TeamSerializer
from onadata.apps.api.models import Team
from onadata.apps.api.tools import add_user_to_team, remove_user_from_team
class TeamViewSet(LastModifiedMixin, ModelViewSet):
"""
This endpoint allows you to create, update and view team information.
## GET List of Teams
Provides a json list of teams and the projects the team is assigned to.
<pre class="prettyprint">
<b>GET</b> /api/v1/teams
</pre>
> Example
>
> curl -X GET https://ona.io/api/v1/teams
> Response
>
> [
> {
> "url": "https://ona.io/api/v1/teams/1",
> "name": "Owners",
> "organization": "bruize",
> "projects": []
> },
> {
> "url": "https://ona.io/api/v1/teams/2",
> "name": "demo team",
> "organization": "bruize",
> "projects": []
> }
> ]
## GET Team Info for a specific team.
Shows teams details and the projects the team is assigned to, where:
* `pk` - unique identifier for the team
<pre class="prettyprint">
<b>GET</b> /api/v1/teams/<code>{pk}</code>
</pre>
> Example
>
> curl -X GET https://ona.io/api/v1/teams/1
> Response
>
> {
> "url": "https://ona.io/api/v1/teams/1",
> "name": "Owners",
> "organization": "bruize",
> "projects": []
> }
## List members of a team
A list of usernames is the response for members of the team.
<pre class="prettyprint">
<b>GET</b> /api/v1/teams/<code>{pk}/members</code>
</pre>
> Example
>
> curl -X GET https://ona.io/api/v1/teams/1/members
> Response
>
> ["member1"]
>
## Add a user to a team
POST `{"username": "someusername"}`
to `/api/v1/teams/<pk>/members` to add a user to
the specified team.
A list of usernames is the response for members of the team.
<pre class="prettyprint">
<b>POST</b> /api/v1/teams/<code>{pk}</code>/members
</pre>
> Response
>
> ["someusername"]
"""
queryset = Team.objects.all()
serializer_class = TeamSerializer
lookup_field = 'pk'
extra_lookup_fields = None
permission_classes = [DjangoObjectPermissions]
filter_backends = (filters.DjangoObjectPermissionsFilter,)
@action(methods=['DELETE', 'GET', 'POST'])
def members(self, request, *args, **kwargs):
team = self.get_object()
data = {}
status_code = status.HTTP_200_OK
if request.method in ['DELETE', 'POST']:
username = request.DATA.get('username') or\
request.QUERY_PARAMS.get('username')
if username:
try:
user = User.objects.get(username__iexact=username)
except User.DoesNotExist:
status_code = status.HTTP_400_BAD_REQUEST
data['username'] = [
_(u"User `%(username)s` does not exist."
% {'username': username})]
else:
if request.method == 'POST':
add_user_to_team(team, user)
elif request.method == 'DELETE':
remove_user_from_team(team, user)
status_code = status.HTTP_201_CREATED
else:
status_code = status.HTTP_400_BAD_REQUEST
data['username'] = [_(u"This field is required.")]
if status_code in [status.HTTP_200_OK, status.HTTP_201_CREATED]:
data = [u.username for u in team.user_set.all()]
return Response(data, status=status_code)
| bsd-2-clause |
pierrelb/RMG-Py | scripts/standardizeModelSpeciesNames.py | 8 | 3903 | #!/usr/bin/env python
# encoding: utf-8
"""
This script enables the automatic renaming of species names of of two or more Chemkin files (and
associated species dictionaries) so that they use consistent, matching names. Simply
pass the paths of the Chemkin files and species dictionaries on the
command-line, e.g.
$ python standardizeModelSpeciesNames.py --model1 /path/to/chem1.inp /path/to/species_dictionary1.txt --model2 /path/to/chem2.inp /path/to/species_dictionary2.txt
The resulting files are saved as ``chem1.inp`` and
``species_dictionary1.txt``, ``chem2.inp``, ``species_dictionary2.txt`` and so forth in the execution directory.
"""
import os.path
import argparse
from rmgpy.chemkin import loadChemkinFile, saveChemkinFile, saveSpeciesDictionary, saveTransportFile
from rmgpy.rmg.model import ReactionModel
################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model1', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the first model')
parser.add_argument('--model2', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the second model')
parser.add_argument('--model3', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the third model')
parser.add_argument('--model4', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the fourth model')
parser.add_argument('--model5', metavar='FILE', type=str, nargs='+',
help='the Chemkin files and species dictionaries of the fifth model')
args = parser.parse_args()
transport = False
inputModelFiles = []
for model in [args.model1, args.model2, args.model3, args.model4, args.model5]:
if model is None: continue
if len(model) == 2:
inputModelFiles.append((model[0], model[1], None))
elif len(model) == 3:
transport = True
inputModelFiles.append((model[0], model[1], model[2]))
else:
raise Exception
outputChemkinFile = 'chem.inp'
outputSpeciesDictionary = 'species_dictionary.txt'
outputTransportFile = 'tran.dat' if transport else None
# Load the models to merge
models = []
for chemkin, speciesPath, transportPath in inputModelFiles:
print 'Loading model #{0:d}...'.format(len(models)+1)
model = ReactionModel()
model.species, model.reactions = loadChemkinFile(chemkin, speciesPath, transportPath=transportPath)
models.append(model)
allSpecies = []
speciesIndices = [[] for i in range(len(models))]
for i, model in enumerate(models):
speciesIndices[i] = []
for j, species in enumerate(model.species):
for index, species0 in enumerate(allSpecies):
if species0.isIsomorphic(species):
speciesIndices[i].append(index)
break;
else:
allSpecies.append(species)
speciesIndices[i].append(allSpecies.index(species))
# Reassign species names and labels according to the list of all species in all models
# We must retain the original thermochemistry
for i, model in enumerate(models):
for j, species in enumerate(model.species):
index = speciesIndices[i][j]
species.label = allSpecies[index].label
species.index = allSpecies[index].index
# Resave the models
saveChemkinFile('chem{0}.inp'.format(i+1), model.species, model.reactions)
saveSpeciesDictionary('species_dictionary{0}.txt'.format(i+1), model.species)
print 'Saving of new models with consistent names is complete!' | mit |
aprefontaine/TMScheduler | django/contrib/comments/moderation.py | 23 | 13333 | """
A generic comment-moderation system which allows configuration of
moderation options on a per-model basis.
To use, do two things:
1. Create or import a subclass of ``CommentModerator`` defining the
options you want.
2. Import ``moderator`` from this module and register one or more
models, passing the models and the ``CommentModerator`` options
class you want to use.
Example
-------
First, we define a simple model class which might represent entries in
a weblog::
from django.db import models
class Entry(models.Model):
title = models.CharField(maxlength=250)
body = models.TextField()
pub_date = models.DateField()
enable_comments = models.BooleanField()
Then we create a ``CommentModerator`` subclass specifying some
moderation options::
from django.contrib.comments.moderation import CommentModerator, moderator
class EntryModerator(CommentModerator):
email_notification = True
enable_field = 'enable_comments'
And finally register it for moderation::
moderator.register(Entry, EntryModerator)
This sample class would apply two moderation steps to each new
comment submitted on an Entry:
* If the entry's ``enable_comments`` field is set to ``False``, the
comment will be rejected (immediately deleted).
* If the comment is successfully posted, an email notification of the
comment will be sent to site staff.
For a full list of built-in moderation options and other
configurability, see the documentation for the ``CommentModerator``
class.
"""
import datetime
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.comments import signals
from django.db.models.base import ModelBase
from django.template import Context, loader
from django.contrib import comments
from django.contrib.sites.models import Site
class AlreadyModerated(Exception):
"""
Raised when a model which is already registered for moderation is
attempting to be registered again.
"""
pass
class NotModerated(Exception):
"""
Raised when a model which is not registered for moderation is
attempting to be unregistered.
"""
pass
class CommentModerator(object):
"""
Encapsulates comment-moderation options for a given model.
This class is not designed to be used directly, since it doesn't
enable any of the available moderation options. Instead, subclass
it and override attributes to enable different options::
``auto_close_field``
If this is set to the name of a ``DateField`` or
``DateTimeField`` on the model for which comments are
being moderated, new comments for objects of that model
will be disallowed (immediately deleted) when a certain
number of days have passed after the date specified in
that field. Must be used in conjunction with
``close_after``, which specifies the number of days past
which comments should be disallowed. Default value is
``None``.
``auto_moderate_field``
Like ``auto_close_field``, but instead of outright
deleting new comments when the requisite number of days
have elapsed, it will simply set the ``is_public`` field
of new comments to ``False`` before saving them. Must be
used in conjunction with ``moderate_after``, which
specifies the number of days past which comments should be
moderated. Default value is ``None``.
``close_after``
If ``auto_close_field`` is used, this must specify the
number of days past the value of the field specified by
``auto_close_field`` after which new comments for an
object should be disallowed. Default value is ``None``.
``email_notification``
If ``True``, any new comment on an object of this model
which survives moderation will generate an email to site
staff. Default value is ``False``.
``enable_field``
If this is set to the name of a ``BooleanField`` on the
model for which comments are being moderated, new comments
on objects of that model will be disallowed (immediately
deleted) whenever the value of that field is ``False`` on
the object the comment would be attached to. Default value
is ``None``.
``moderate_after``
If ``auto_moderate_field`` is used, this must specify the number
of days past the value of the field specified by
``auto_moderate_field`` after which new comments for an
object should be marked non-public. Default value is
``None``.
Most common moderation needs can be covered by changing these
attributes, but further customization can be obtained by
subclassing and overriding the following methods. Each method will
be called with three arguments: ``comment``, which is the comment
being submitted, ``content_object``, which is the object the
comment will be attached to, and ``request``, which is the
``HttpRequest`` in which the comment is being submitted::
``allow``
Should return ``True`` if the comment should be allowed to
post on the content object, and ``False`` otherwise (in
which case the comment will be immediately deleted).
``email``
If email notification of the new comment should be sent to
site staff or moderators, this method is responsible for
sending the email.
``moderate``
Should return ``True`` if the comment should be moderated
(in which case its ``is_public`` field will be set to
``False`` before saving), and ``False`` otherwise (in
which case the ``is_public`` field will not be changed).
Subclasses which want to introspect the model for which comments
are being moderated can do so through the attribute ``_model``,
which will be the model class.
"""
auto_close_field = None
auto_moderate_field = None
close_after = None
email_notification = False
enable_field = None
moderate_after = None
def __init__(self, model):
self._model = model
def _get_delta(self, now, then):
"""
Internal helper which will return a ``datetime.timedelta``
representing the time between ``now`` and ``then``. Assumes
``now`` is a ``datetime.date`` or ``datetime.datetime`` later
than ``then``.
If ``now`` and ``then`` are not of the same type due to one of
them being a ``datetime.date`` and the other being a
``datetime.datetime``, both will be coerced to
``datetime.date`` before calculating the delta.
"""
if now.__class__ is not then.__class__:
now = datetime.date(now.year, now.month, now.day)
then = datetime.date(then.year, then.month, then.day)
if now < then:
raise ValueError("Cannot determine moderation rules because date field is set to a value in the future")
return now - then
def allow(self, comment, content_object, request):
"""
Determine whether a given comment is allowed to be posted on
a given object.
Return ``True`` if the comment should be allowed, ``False
otherwise.
"""
if self.enable_field:
if not getattr(content_object, self.enable_field):
return False
if self.auto_close_field and self.close_after:
if self._get_delta(datetime.datetime.now(), getattr(content_object, self.auto_close_field)).days >= self.close_after:
return False
return True
def moderate(self, comment, content_object, request):
"""
Determine whether a given comment on a given object should be
allowed to show up immediately, or should be marked non-public
and await approval.
Return ``True`` if the comment should be moderated (marked
non-public), ``False`` otherwise.
"""
if self.auto_moderate_field and self.moderate_after:
if self._get_delta(datetime.datetime.now(), getattr(content_object, self.auto_moderate_field)).days >= self.moderate_after:
return True
return False
def email(self, comment, content_object, request):
"""
Send email notification of a new comment to site staff when email
notifications have been requested.
"""
if not self.email_notification:
return
recipient_list = [manager_tuple[1] for manager_tuple in settings.MANAGERS]
t = loader.get_template('comments/comment_notification_email.txt')
c = Context({ 'comment': comment,
'content_object': content_object })
subject = '[%s] New comment posted on "%s"' % (Site.objects.get_current().name,
content_object)
message = t.render(c)
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, recipient_list, fail_silently=True)
class Moderator(object):
"""
Handles moderation of a set of models.
An instance of this class will maintain a list of one or more
models registered for comment moderation, and their associated
moderation classes, and apply moderation to all incoming comments.
To register a model, obtain an instance of ``Moderator`` (this
module exports one as ``moderator``), and call its ``register``
method, passing the model class and a moderation class (which
should be a subclass of ``CommentModerator``). Note that both of
these should be the actual classes, not instances of the classes.
To cease moderation for a model, call the ``unregister`` method,
passing the model class.
For convenience, both ``register`` and ``unregister`` can also
accept a list of model classes in place of a single model; this
allows easier registration of multiple models with the same
``CommentModerator`` class.
The actual moderation is applied in two phases: one prior to
saving a new comment, and the other immediately after saving. The
pre-save moderation may mark a comment as non-public or mark it to
be removed; the post-save moderation may delete a comment which
was disallowed (there is currently no way to prevent the comment
being saved once before removal) and, if the comment is still
around, will send any notification emails the comment generated.
"""
def __init__(self):
self._registry = {}
self.connect()
def connect(self):
"""
Hook up the moderation methods to pre- and post-save signals
from the comment models.
"""
signals.comment_will_be_posted.connect(self.pre_save_moderation, sender=comments.get_model())
signals.comment_was_posted.connect(self.post_save_moderation, sender=comments.get_model())
def register(self, model_or_iterable, moderation_class):
"""
Register a model or a list of models for comment moderation,
using a particular moderation class.
Raise ``AlreadyModerated`` if any of the models are already
registered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model in self._registry:
raise AlreadyModerated("The model '%s' is already being moderated" % model._meta.module_name)
self._registry[model] = moderation_class(model)
def unregister(self, model_or_iterable):
"""
Remove a model or a list of models from the list of models
whose comments will be moderated.
Raise ``NotModerated`` if any of the models are not currently
registered for moderation.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotModerated("The model '%s' is not currently being moderated" % model._meta.module_name)
del self._registry[model]
def pre_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary pre-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
content_object = comment.content_object
moderation_class = self._registry[model]
# Comment will be disallowed outright (HTTP 403 response)
if not moderation_class.allow(comment, content_object, request):
return False
if moderation_class.moderate(comment, content_object, request):
comment.is_public = False
def post_save_moderation(self, sender, comment, request, **kwargs):
"""
Apply any necessary post-save moderation steps to new
comments.
"""
model = comment.content_type.model_class()
if model not in self._registry:
return
self._registry[model].email(comment, comment.content_object, request)
# Import this instance in your own code to use in registering
# your models for moderation.
moderator = Moderator()
| bsd-3-clause |
polaris/boids | lib/googletest-82b11b8/googletest/test/gtest_xml_test_utils.py | 1815 | 8876 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for gtest_xml_output"""
__author__ = '[email protected] (Sean Mcafee)'
import re
from xml.dom import minidom, Node
import gtest_test_utils
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
class GTestXMLTestCase(gtest_test_utils.TestCase):
"""
Base class for tests of Google Test's XML output functionality.
"""
def AssertEquivalentNodes(self, expected_node, actual_node):
"""
Asserts that actual_node (a DOM node object) is equivalent to
expected_node (another DOM node object), in that either both of
them are CDATA nodes and have the same value, or both are DOM
elements and actual_node meets all of the following conditions:
* It has the same tag name as expected_node.
* It has the same set of attributes as expected_node, each with
the same value as the corresponding attribute of expected_node.
Exceptions are any attribute named "time", which needs only be
convertible to a floating-point number and any attribute named
"type_param" which only has to be non-empty.
* It has an equivalent set of child nodes (including elements and
CDATA sections) as expected_node. Note that we ignore the
order of the children as they are not guaranteed to be in any
particular order.
"""
if expected_node.nodeType == Node.CDATA_SECTION_NODE:
self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType)
self.assertEquals(expected_node.nodeValue, actual_node.nodeValue)
return
self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType)
self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType)
self.assertEquals(expected_node.tagName, actual_node.tagName)
expected_attributes = expected_node.attributes
actual_attributes = actual_node .attributes
self.assertEquals(
expected_attributes.length, actual_attributes.length,
'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % (
actual_node.tagName, expected_attributes.keys(),
actual_attributes.keys()))
for i in range(expected_attributes.length):
expected_attr = expected_attributes.item(i)
actual_attr = actual_attributes.get(expected_attr.name)
self.assert_(
actual_attr is not None,
'expected attribute %s not found in element %s' %
(expected_attr.name, actual_node.tagName))
self.assertEquals(
expected_attr.value, actual_attr.value,
' values of attribute %s in element %s differ: %s vs %s' %
(expected_attr.name, actual_node.tagName,
expected_attr.value, actual_attr.value))
expected_children = self._GetChildren(expected_node)
actual_children = self._GetChildren(actual_node)
self.assertEquals(
len(expected_children), len(actual_children),
'number of child elements differ in element ' + actual_node.tagName)
for child_id, child in expected_children.iteritems():
self.assert_(child_id in actual_children,
'<%s> is not in <%s> (in element %s)' %
(child_id, actual_children, actual_node.tagName))
self.AssertEquivalentNodes(child, actual_children[child_id])
identifying_attribute = {
'testsuites': 'name',
'testsuite': 'name',
'testcase': 'name',
'failure': 'message',
}
def _GetChildren(self, element):
"""
Fetches all of the child nodes of element, a DOM Element object.
Returns them as the values of a dictionary keyed by the IDs of the
children. For <testsuites>, <testsuite> and <testcase> elements, the ID
is the value of their "name" attribute; for <failure> elements, it is
the value of the "message" attribute; CDATA sections and non-whitespace
text nodes are concatenated into a single CDATA section with ID
"detail". An exception is raised if any element other than the above
four is encountered, if two child elements with the same identifying
attributes are encountered, or if any other type of node is encountered.
"""
children = {}
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.assert_(child.tagName in self.identifying_attribute,
'Encountered unknown element <%s>' % child.tagName)
childID = child.getAttribute(self.identifying_attribute[child.tagName])
self.assert_(childID not in children)
children[childID] = child
elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]:
if 'detail' not in children:
if (child.nodeType == Node.CDATA_SECTION_NODE or
not child.nodeValue.isspace()):
children['detail'] = child.ownerDocument.createCDATASection(
child.nodeValue)
else:
children['detail'].nodeValue += child.nodeValue
else:
self.fail('Encountered unexpected node type %d' % child.nodeType)
return children
def NormalizeXml(self, element):
"""
Normalizes Google Test's XML output to eliminate references to transient
information that may change from run to run.
* The "time" attribute of <testsuites>, <testsuite> and <testcase>
elements is replaced with a single asterisk, if it contains
only digit characters.
* The "timestamp" attribute of <testsuites> elements is replaced with a
single asterisk, if it contains a valid ISO8601 datetime value.
* The "type_param" attribute of <testcase> elements is replaced with a
single asterisk (if it sn non-empty) as it is the type name returned
by the compiler and is platform dependent.
* The line info reported in the first line of the "message"
attribute and CDATA section of <failure> elements is replaced with the
file's basename and a single asterisk for the line number.
* The directory names in file paths are removed.
* The stack traces are removed.
"""
if element.tagName == 'testsuites':
timestamp = element.getAttributeNode('timestamp')
timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$',
'*', timestamp.value)
if element.tagName in ('testsuites', 'testsuite', 'testcase'):
time = element.getAttributeNode('time')
time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value)
type_param = element.getAttributeNode('type_param')
if type_param and type_param.value:
type_param.value = '*'
elif element.tagName == 'failure':
source_line_pat = r'^.*[/\\](.*:)\d+\n'
# Replaces the source line information with a normalized form.
message = element.getAttributeNode('message')
message.value = re.sub(source_line_pat, '\\1*\n', message.value)
for child in element.childNodes:
if child.nodeType == Node.CDATA_SECTION_NODE:
# Replaces the source line information with a normalized form.
cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue)
# Removes the actual stack trace.
child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*',
'', cdata)
for child in element.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.NormalizeXml(child)
| mit |
Donkyhotay/MoonPy | zope/index/topic/interfaces.py | 1 | 1798 | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Basic interfaces shared between different types of index.
$Id: interfaces.py 28610 2004-12-09 20:56:05Z jim $
"""
from zope.interface import Interface
class ITopicQuerying(Interface):
"""Query over topics, seperated by white space."""
def search(query, operator='and'):
"""Execute a search given by 'query' as a list/tuple of filter ids.
'operator' can be 'and' or 'or' to search for matches in all
or any filter.
Return an IISet of docids
"""
class ITopicFilteredSet(Interface):
"""Interface for filtered sets used by topic indexes."""
def clear():
"""Remove all entries from the index."""
def index_doc(docid, context):
"""Add an object's info to the index."""
def unindex_doc(docid):
"""Remove an object with id 'docid' from the index."""
def getId():
"""Return the id of the filter itself."""
def setExpression(expr):
"""Set the filter expression, e.g. 'context.meta_type=='...'"""
def getExpression():
"""Return the filter expression."""
def getIds():
"""Return an IISet of docids."""
| gpl-3.0 |
neudesk/neucloud | openstack_dashboard/dashboards/project/stacks/sro.py | 2 | 1640 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import title # noqa
from django.template.loader import render_to_string # noqa
from horizon.utils import filters
def stack_info(stack, stack_image):
stack.stack_status_desc = title(
filters.replace_underscores(stack.stack_status))
if stack.stack_status_reason:
stack.stack_status_reason = title(
filters.replace_underscores(stack.stack_status_reason)
)
context = {}
context['stack'] = stack
context['stack_image'] = stack_image
return render_to_string('project/stacks/_stack_info.html',
context)
def resource_info(resource):
resource.resource_status_desc = title(
filters.replace_underscores(resource.resource_status)
)
if resource.resource_status_reason:
resource.resource_status_reason = title(
filters.replace_underscores(resource.resource_status_reason)
)
context = {}
context['resource'] = resource
return render_to_string('project/stacks/_resource_info.html',
context)
| apache-2.0 |
albmarvil/The-Eternal-Sorrow | dependencies/luabind/boost-build/test/conditionals.py | 4 | 1328 | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test conditional properties
from BoostBuild import Tester, List
import os
from string import strip
t = Tester()
# Arrange a project which will build only if
# 'a.cpp' is compiled with "STATIC" define.
t.write("project-root.jam", "import gcc ;")
t.write("a.cpp", """
#ifdef STATIC
int main() { return 0; }
#endif
""")
t.write("Jamfile", "exe a : a.cpp : <link>static:<define>STATIC ;")
t.run_build_system("link=static")
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.write("Jamfile", """
project : requirements <link>static:<define>STATIC ;
exe a : a.cpp ;
""")
t.rm("bin")
t.run_build_system("link=static")
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
# Regression test for a bug found by Ali Azarbayejani.
# Conditionals inside usage requirement were not evaluated.
# This breaks
t.write("Jamfile", """
lib l : l.cpp : : : <link>static:<define>STATIC ;
exe a : a.cpp l ;
""")
t.write("l.cpp", "")
t.write("l.cpp", """
int i;
""")
t.rm("bin")
t.run_build_system("link=static")
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.cleanup()
| apache-2.0 |
swarna-k/MyDiary | flask/lib/python2.7/site-packages/sqlalchemy/orm/path_registry.py | 60 | 8370 | # orm/path_registry.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Path tracking utilities, representing mapper graph traversals.
"""
from .. import inspection
from .. import util
from .. import exc
from itertools import chain
from .base import class_mapper
import logging
log = logging.getLogger(__name__)
def _unreduce_path(path):
return PathRegistry.deserialize(path)
_WILDCARD_TOKEN = "*"
_DEFAULT_TOKEN = "_sa_default"
class PathRegistry(object):
"""Represent query load paths and registry functions.
Basically represents structures like:
(<User mapper>, "orders", <Order mapper>, "items", <Item mapper>)
These structures are generated by things like
query options (joinedload(), subqueryload(), etc.) and are
used to compose keys stored in the query._attributes dictionary
for various options.
They are then re-composed at query compile/result row time as
the query is formed and as rows are fetched, where they again
serve to compose keys to look up options in the context.attributes
dictionary, which is copied from query._attributes.
The path structure has a limited amount of caching, where each
"root" ultimately pulls from a fixed registry associated with
the first mapper, that also contains elements for each of its
property keys. However paths longer than two elements, which
are the exception rather than the rule, are generated on an
as-needed basis.
"""
is_token = False
is_root = False
def __eq__(self, other):
return other is not None and \
self.path == other.path
def set(self, attributes, key, value):
log.debug("set '%s' on path '%s' to '%s'", key, self, value)
attributes[(key, self.path)] = value
def setdefault(self, attributes, key, value):
log.debug("setdefault '%s' on path '%s' to '%s'", key, self, value)
attributes.setdefault((key, self.path), value)
def get(self, attributes, key, value=None):
key = (key, self.path)
if key in attributes:
return attributes[key]
else:
return value
def __len__(self):
return len(self.path)
@property
def length(self):
return len(self.path)
def pairs(self):
path = self.path
for i in range(0, len(path), 2):
yield path[i], path[i + 1]
def contains_mapper(self, mapper):
for path_mapper in [
self.path[i] for i in range(0, len(self.path), 2)
]:
if path_mapper.is_mapper and \
path_mapper.isa(mapper):
return True
else:
return False
def contains(self, attributes, key):
return (key, self.path) in attributes
def __reduce__(self):
return _unreduce_path, (self.serialize(), )
def serialize(self):
path = self.path
return list(zip(
[m.class_ for m in [path[i] for i in range(0, len(path), 2)]],
[path[i].key for i in range(1, len(path), 2)] + [None]
))
@classmethod
def deserialize(cls, path):
if path is None:
return None
p = tuple(chain(*[(class_mapper(mcls),
class_mapper(mcls).attrs[key]
if key is not None else None)
for mcls, key in path]))
if p and p[-1] is None:
p = p[0:-1]
return cls.coerce(p)
@classmethod
def per_mapper(cls, mapper):
return EntityRegistry(
cls.root, mapper
)
@classmethod
def coerce(cls, raw):
return util.reduce(lambda prev, next: prev[next], raw, cls.root)
def token(self, token):
if token.endswith(':' + _WILDCARD_TOKEN):
return TokenRegistry(self, token)
elif token.endswith(":" + _DEFAULT_TOKEN):
return TokenRegistry(self.root, token)
else:
raise exc.ArgumentError("invalid token: %s" % token)
def __add__(self, other):
return util.reduce(
lambda prev, next: prev[next],
other.path, self)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.path, )
class RootRegistry(PathRegistry):
"""Root registry, defers to mappers so that
paths are maintained per-root-mapper.
"""
path = ()
has_entity = False
is_aliased_class = False
is_root = True
def __getitem__(self, entity):
return entity._path_registry
PathRegistry.root = RootRegistry()
class TokenRegistry(PathRegistry):
def __init__(self, parent, token):
self.token = token
self.parent = parent
self.path = parent.path + (token,)
has_entity = False
is_token = True
def generate_for_superclasses(self):
if not self.parent.is_aliased_class and not self.parent.is_root:
for ent in self.parent.mapper.iterate_to_root():
yield TokenRegistry(self.parent.parent[ent], self.token)
else:
yield self
def __getitem__(self, entity):
raise NotImplementedError()
class PropRegistry(PathRegistry):
def __init__(self, parent, prop):
# restate this path in terms of the
# given MapperProperty's parent.
insp = inspection.inspect(parent[-1])
if not insp.is_aliased_class or insp._use_mapper_path:
parent = parent.parent[prop.parent]
elif insp.is_aliased_class and insp.with_polymorphic_mappers:
if prop.parent is not insp.mapper and \
prop.parent in insp.with_polymorphic_mappers:
subclass_entity = parent[-1]._entity_for_mapper(prop.parent)
parent = parent.parent[subclass_entity]
self.prop = prop
self.parent = parent
self.path = parent.path + (prop,)
def __str__(self):
return " -> ".join(
str(elem) for elem in self.path
)
@util.memoized_property
def has_entity(self):
return hasattr(self.prop, "mapper")
@util.memoized_property
def entity(self):
return self.prop.mapper
@util.memoized_property
def _wildcard_path_loader_key(self):
"""Given a path (mapper A, prop X), replace the prop with the wildcard,
e.g. (mapper A, 'relationship:.*') or (mapper A, 'column:.*'), then
return within the ("loader", path) structure.
"""
return ("loader",
self.parent.token(
"%s:%s" % (
self.prop.strategy_wildcard_key, _WILDCARD_TOKEN)
).path
)
@util.memoized_property
def _default_path_loader_key(self):
return ("loader",
self.parent.token(
"%s:%s" % (self.prop.strategy_wildcard_key,
_DEFAULT_TOKEN)
).path
)
@util.memoized_property
def _loader_key(self):
return ("loader", self.path)
@property
def mapper(self):
return self.entity
@property
def entity_path(self):
return self[self.entity]
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return EntityRegistry(
self, entity
)
class EntityRegistry(PathRegistry, dict):
is_aliased_class = False
has_entity = True
def __init__(self, parent, entity):
self.key = entity
self.parent = parent
self.is_aliased_class = entity.is_aliased_class
self.entity = entity
self.path = parent.path + (entity,)
self.entity_path = self
@property
def mapper(self):
return inspection.inspect(self.entity).mapper
def __bool__(self):
return True
__nonzero__ = __bool__
def __getitem__(self, entity):
if isinstance(entity, (int, slice)):
return self.path[entity]
else:
return dict.__getitem__(self, entity)
def __missing__(self, key):
self[key] = item = PropRegistry(self, key)
return item
| bsd-3-clause |
sogis/Quantum-GIS | tests/src/python/test_qgspallabeling_tests.py | 12 | 10817 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsPalLabeling: base suite of render check tests
Class is meant to be inherited by classes that test different labeling outputs
See <qgis-src-dir>/tests/testdata/labeling/README.rst for description.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Larry Shaffer'
__date__ = '07/16/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from PyQt4.QtCore import Qt, QPointF
from PyQt4.QtGui import QFont
from qgis.core import QgsPalLayerSettings
from utilities import svgSymbolsPath
# noinspection PyPep8Naming
class TestPointBase(object):
def __init__(self):
"""Dummy assignments, intended to be overridden in subclasses"""
self.lyr = QgsPalLayerSettings()
""":type: QgsPalLayerSettings"""
# noinspection PyArgumentList
self._TestFont = QFont() # will become a standard test font
self._Pal = None
""":type: QgsPalLabeling"""
self._Canvas = None
""":type: QgsMapCanvas"""
# custom mismatches per group/test (should not mask any needed anomaly)
# e.g. self._Mismatches['TestClassName'] = 300
# check base output class's checkTest() or sublcasses for any defaults
self._Mismatches = dict()
# custom color tolerances per group/test: 1 - 20 (0 default, 20 max)
# (should not mask any needed anomaly)
# e.g. self._ColorTols['TestClassName'] = 10
# check base output class's checkTest() or sublcasses for any defaults
self._ColorTols = dict()
# noinspection PyMethodMayBeStatic
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
def test_default_label(self):
# Default label placement, with text size in points
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_text_size_map_unit(self):
# Label text size in map units
self.lyr.fontSizeInMapUnits = True
font = QFont(self._TestFont)
font.setPointSizeF(460)
self.lyr.textFont = font
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_text_color(self):
self._Mismatches['TestCanvasPoint'] = 774
self._ColorTols['TestComposerPdfPoint'] = 2
# Label color change
self.lyr.textColor = Qt.blue
self.checkTest()
def test_background_rect(self):
self._Mismatches['TestComposerImageVsCanvasPoint'] = 800
self._Mismatches['TestComposerImagePoint'] = 800
self.lyr.shapeDraw = True
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 1
self.checkTest()
def test_background_rect_w_offset(self):
# Label rectangular background
self._Mismatches['TestComposerImageVsCanvasPoint'] = 800
self._Mismatches['TestComposerImagePoint'] = 800
# verify fix for issues
# http://hub.qgis.org/issues/9057
# http://gis.stackexchange.com/questions/86900
self.lyr.fontSizeInMapUnits = True
font = QFont(self._TestFont)
font.setPointSizeF(460)
self.lyr.textFont = font
self.lyr.shapeDraw = True
self.lyr.shapeOffsetUnits = QgsPalLayerSettings.MapUnits
self.lyr.shapeOffset = QPointF(-2900.0, -450.0)
self._Mismatches['TestCanvasPoint'] = 774
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_background_svg(self):
# Label SVG background
self.lyr.fontSizeInMapUnits = True
font = QFont(self._TestFont)
font.setPointSizeF(460)
self.lyr.textFont = font
self.lyr.shapeDraw = True
self.lyr.shapeType = QgsPalLayerSettings.ShapeSVG
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
self.lyr.shapeSVGFile = svg
self.lyr.shapeSizeUnits = QgsPalLayerSettings.MapUnits
self.lyr.shapeSizeType = QgsPalLayerSettings.SizeBuffer
self.lyr.shapeSize = QPointF(100.0, 0.0)
self._Mismatches['TestComposerPdfVsComposerPoint'] = 580
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_background_svg_w_offset(self):
# Label SVG background
self.lyr.fontSizeInMapUnits = True
font = QFont(self._TestFont)
font.setPointSizeF(460)
self.lyr.textFont = font
self.lyr.shapeDraw = True
self.lyr.shapeType = QgsPalLayerSettings.ShapeSVG
svg = os.path.join(
svgSymbolsPath(), 'backgrounds', 'background_square.svg')
self.lyr.shapeSVGFile = svg
self.lyr.shapeSizeUnits = QgsPalLayerSettings.MapUnits
self.lyr.shapeSizeType = QgsPalLayerSettings.SizeBuffer
self.lyr.shapeSize = QPointF(100.0, 0.0)
self.lyr.shapeOffsetUnits = QgsPalLayerSettings.MapUnits
self.lyr.shapeOffset = QPointF(-2850.0, 500.0)
self._Mismatches['TestComposerPdfVsComposerPoint'] = 760
self._Mismatches['TestCanvasPoint'] = 776
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_partials_labels_enabled(self):
# Set Big font size
font = QFont(self._TestFont)
font.setPointSizeF(84)
self.lyr.textFont = font
# Enable partials labels
self._Pal.setShowingPartialsLabels(True)
self._Pal.saveEngineSettings()
self._Mismatches['TestCanvasPoint'] = 779
self._ColorTols['TestComposerPdfPoint'] = 2
self.checkTest()
def test_partials_labels_disabled(self):
# Set Big font size
font = QFont(self._TestFont)
font.setPointSizeF(84)
self.lyr.textFont = font
# Disable partials labels
self._Pal.setShowingPartialsLabels(False)
self._Pal.saveEngineSettings()
self.checkTest()
def test_buffer(self):
# Label with buffer
self.lyr.bufferDraw = True
self.lyr.bufferSize = 2
self.checkTest()
def test_shadow(self):
# Label with shadow
self.lyr.shadowDraw = True
self.lyr.shadowOffsetDist = 2
self.lyr.shadowTransparency = 0
self.checkTest()
# noinspection PyPep8Naming
class TestLineBase(object):
def __init__(self):
"""Dummy assignments, intended to be overridden in subclasses"""
self.lyr = QgsPalLayerSettings()
""":type: QgsPalLayerSettings"""
# noinspection PyArgumentList
self._TestFont = QFont() # will become a standard test font
self._Pal = None
""":type: QgsPalLabeling"""
self._Canvas = None
""":type: QgsMapCanvas"""
# custom mismatches per group/test (should not mask any needed anomaly)
# e.g. self._Mismatches['TestClassName'] = 300
# check base output class's checkTest() or sublcasses for any defaults
self._Mismatches = dict()
# custom color tolerances per group/test: 1 - 20 (0 default, 20 max)
# (should not mask any needed anomaly)
# e.g. self._ColorTols['TestClassName'] = 10
# check base output class's checkTest() or sublcasses for any defaults
self._ColorTols = dict()
# noinspection PyMethodMayBeStatic
def checkTest(self, **kwargs):
"""Intended to be overridden in subclasses"""
pass
def test_line_placement_above_line_orientation(self):
# Line placement, above, follow line orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine
self.checkTest()
def test_line_placement_online(self):
# Line placement, on line
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.OnLine
self.checkTest()
def test_line_placement_below_line_orientation(self):
# Line placement, below, follow line orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine
self.checkTest()
def test_line_placement_above_map_orientation(self):
# Line placement, above, follow map orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_line_placement_below_map_orientation(self):
# Line placement, below, follow map orientation
self.lyr.placement = QgsPalLayerSettings.Line
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_curved_placement_online(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.OnLine
self.checkTest()
def test_curved_placement_above(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.AboveLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
def test_curved_placement_below(self):
# Curved placement, on line
self.lyr.placement = QgsPalLayerSettings.Curved
self.lyr.placementFlags = QgsPalLayerSettings.BelowLine | QgsPalLayerSettings.MapOrientation
self.checkTest()
# noinspection PyPep8Naming
def suiteTests():
"""
Use to define which tests are run when PAL_SUITE is set.
Use sp_vs_suite for comparison of server and composer outputs to canvas
"""
sp_suite = [
# 'test_default_label',
# 'test_text_size_map_unit',
# 'test_text_color',
# 'test_background_rect',
# 'test_background_rect_w_offset',
# 'test_background_svg',
# 'test_background_svg_w_offset',
# 'test_partials_labels_enabled',
# 'test_partials_labels_disabled',
]
sp_vs_suite = [
#'test_something_specific',
]
# extended separately for finer control of PAL_SUITE (comment-out undesired)
sp_vs_suite.extend(sp_suite)
return {
'sp_suite': sp_suite,
'sp_vs_suite': sp_vs_suite
}
if __name__ == '__main__':
pass
| gpl-2.0 |
abdullah2891/remo | vendor-local/lib/python/unidecode/x0fa.py | 252 | 4406 | data = (
'Chey ', # 0x00
'Thak ', # 0x01
'Thak ', # 0x02
'Thang ', # 0x03
'Thayk ', # 0x04
'Thong ', # 0x05
'Pho ', # 0x06
'Phok ', # 0x07
'Hang ', # 0x08
'Hang ', # 0x09
'Hyen ', # 0x0a
'Hwak ', # 0x0b
'Wu ', # 0x0c
'Huo ', # 0x0d
'[?] ', # 0x0e
'[?] ', # 0x0f
'Zhong ', # 0x10
'[?] ', # 0x11
'Qing ', # 0x12
'[?] ', # 0x13
'[?] ', # 0x14
'Xi ', # 0x15
'Zhu ', # 0x16
'Yi ', # 0x17
'Li ', # 0x18
'Shen ', # 0x19
'Xiang ', # 0x1a
'Fu ', # 0x1b
'Jing ', # 0x1c
'Jing ', # 0x1d
'Yu ', # 0x1e
'[?] ', # 0x1f
'Hagi ', # 0x20
'[?] ', # 0x21
'Zhu ', # 0x22
'[?] ', # 0x23
'[?] ', # 0x24
'Yi ', # 0x25
'Du ', # 0x26
'[?] ', # 0x27
'[?] ', # 0x28
'[?] ', # 0x29
'Fan ', # 0x2a
'Si ', # 0x2b
'Guan ', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'[?]', # 0x31
'[?]', # 0x32
'[?]', # 0x33
'[?]', # 0x34
'[?]', # 0x35
'[?]', # 0x36
'[?]', # 0x37
'[?]', # 0x38
'[?]', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'[?]', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'[?]', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| bsd-3-clause |
google/grr | grr/core/grr_response_core/lib/type_info.py | 1 | 14909 | #!/usr/bin/env python
"""Typing information for flow arguments.
This contains objects that are used to provide type annotations for flow
parameters. These annotations are used to assist in rendering the UI for
starting flows and for validating arguments.
"""
import logging
from typing import Optional
from typing import Text
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import serialization
from grr_response_core.lib.registry import MetaclassRegistry
from grr_response_core.lib.util import precondition
class Error(Exception):
"""Base error class."""
# TODO(hanuszczak): Consider getting rid of this class. Standard `ValueError`
# and `TypeError` should be used instead.
class TypeValueError(Error, ValueError):
"""Value is not valid."""
class UnknownArg(TypeValueError):
"""Raised for unknown flow args."""
class TypeInfoObject(metaclass=MetaclassRegistry):
"""Definition of the interface for flow arg typing information."""
# The delegate type this TypeInfoObject manages.
_type = None
def __init__(self,
name="",
default=None,
description="",
friendly_name="",
hidden=False):
"""Build a TypeInfo type descriptor.
Args:
name: The name of the parameter that this Type info corresponds to.
default: The default value that should be specified if the parameter was
not set.
description: A string describing this flow argument.
friendly_name: A human readable name which may be provided.
hidden: Should the argument be hidden from the UI.
"""
self.name = name
self.default = default
self.description = description
self.hidden = hidden
if not friendly_name:
friendly_name = name.replace("_", " ").capitalize()
self.friendly_name = friendly_name
# It is generally impossible to check the default value here
# because this happens before any configuration is loaded (i.e. at
# import time). Hence default values which depend on the config
# system can't be tested. We just assume that the default value is
# sensible since its hard coded in the code.
def GetType(self):
"""Returns the type class described by this type info."""
return self._type
def GetDefault(self):
"""Return the default value for this TypeInfoObject."""
return self.default
def Validate(self, value):
"""Confirm that the value is valid for this type.
Args:
value: The value being used to initialize the flow.
Raises:
TypeValueError: On value not conforming to type.
Returns:
A potentially modified value if we can use the provided value to construct
a valid input.
"""
return value
def FromString(self, string):
return string
def ToString(self, value):
return str(value)
def Help(self):
"""Returns a helpful string describing this type info."""
return "%s\n Description: %s\n Default: %s" % (self.name,
self.description,
self.GetDefault())
class RDFValueType(TypeInfoObject):
"""An arg which must be an RDFValue."""
rdfclass = rdfvalue.RDFValue
def __init__(self, rdfclass=None, **kwargs):
"""An arg which must be an RDFValue.
Args:
rdfclass: The RDFValue class that this arg must be.
**kwargs: Passthrough to base class.
"""
super().__init__(**kwargs)
self._type = self.rdfclass = rdfclass
def Validate(self, value):
"""Validate an RDFValue instance.
Args:
value: An RDFValue instance or something which may be used to instantiate
the correct instance.
Raises:
TypeValueError: If the value is not a valid RDFValue instance or the
required type.
Returns:
A Valid RDFValue instance.
"""
# Allow None as a default.
if value is None:
return
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
return self.rdfclass(value)
except rdfvalue.InitializeError:
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value
def FromString(self, string):
return serialization.FromHumanReadable(self.rdfclass, string)
# This file can't depend on rdf_structs (cyclic dependency), so
# args/return values of type EnumContainer are not type annotated.
class RDFEnumType(TypeInfoObject):
"""An arg which must be an stringified enum value."""
def __init__(self, enum_container, **kwargs):
super().__init__(**kwargs)
self._enum_container = enum_container
def Value(self, value: Optional[str]):
if value is None:
return
if isinstance(value, str):
return self.FromString(value)
raise ValueError("Invalid value {value} for RDFEnumType.")
def FromString(self, string: str):
return self._enum_container.FromString(string)
class RDFStructDictType(TypeInfoObject):
"""An arg which must be a dict that maps into an RDFStruct."""
rdfclass = rdfvalue.RDFValue
def __init__(self, rdfclass=None, **kwargs):
"""An arg which must be an RDFStruct.
Args:
rdfclass: The RDFStruct subclass that this arg must be.
**kwargs: Passthrough to base class.
"""
super().__init__(**kwargs)
self._type = self.rdfclass = rdfclass
def Validate(self, value):
"""Validate the value.
Args:
value: Value is expected to be a dict-like object that a given RDFStruct
can be initialized from.
Raises:
TypeValueError: If the value is not a valid dict-like object that a given
RDFStruct can be initialized from.
Returns:
A valid instance of self.rdfclass or None.
"""
if value is None:
return None
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
r = self.rdfclass()
r.FromDict(value)
return r
except (AttributeError, TypeError, rdfvalue.InitializeError):
# AttributeError is raised if value contains items that don't
# belong to the given rdfstruct.
# TypeError will be raised if value is not a dict-like object.
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value
def FromString(self, string):
return self.rdfclass.FromSerializedBytes(string)
class TypeDescriptorSet(object):
"""This is a collection of type descriptors.
This collections is effectively immutable. Add/Remove operations create new
set instead of modifying existing one.
"""
def __init__(self, *descriptors):
self.descriptors = list(descriptors)
self.descriptor_names = [x.name for x in descriptors]
self.descriptor_map = dict([(desc.name, desc) for desc in descriptors])
def __getitem__(self, item):
return self.descriptor_map[item]
def __contains__(self, item):
return item in self.descriptor_map
def get(self, item, default=None): # pylint: disable=g-bad-name
return self.descriptor_map.get(item, default)
def __iter__(self):
return iter(self.descriptors)
def __str__(self):
result = "\n ".join(
["%s: %s" % (x.name, x.description) for x in self.descriptors])
return "<TypeDescriptorSet for %s>\n %s\n</TypeDescriptorSet>\n" % (
self.__class__.__name__, result)
def __add__(self, other):
return self.Add(other)
def __radd__(self, other):
return self.Add(other)
def __iadd__(self, other):
return self.Add(other)
def Add(self, other):
"""Returns a copy of this set with a new element added."""
new_descriptors = []
for desc in self.descriptors + other.descriptors:
if desc not in new_descriptors:
new_descriptors.append(desc)
return TypeDescriptorSet(*new_descriptors)
def Append(self, desc):
"""Append the descriptor to this set."""
if desc not in self.descriptors:
self.descriptors.append(desc)
self.descriptor_map[desc.name] = desc
self.descriptor_names.append(desc.name)
def HasDescriptor(self, descriptor_name):
"""Checks whether this set has an element with the given name."""
return descriptor_name in self.descriptor_map
def Remove(self, *descriptor_names):
"""Returns a copy of this set without elements with given names."""
new_descriptor_map = self.descriptor_map.copy()
for name in descriptor_names:
new_descriptor_map.pop(name, None)
new_descriptors = [
desc for desc in self.descriptors
if desc in new_descriptor_map.values()
]
return TypeDescriptorSet(*new_descriptors)
def ParseArgs(self, args):
"""Parse and validate the args.
Note we pop all the args we consume here - so if there are any args we dont
know about, args will not be an empty dict after this. This allows the same
args to be parsed by several TypeDescriptorSets.
Args:
args: A dictionary of arguments that this TypeDescriptorSet might use. If
this dict does not have a required parameter, we still yield its default
value.
Yields:
A (name, value) tuple of the parsed args.
"""
for descriptor in self:
# Get the value from the kwargs or, if not specified, the default.
value = args.pop(descriptor.name, None)
if value is None:
# No need to validate the default value.
value = descriptor.default
else:
try:
# Validate this value - this should raise if the value provided is not
# acceptable to the type descriptor.
value = descriptor.Validate(value)
except Exception:
logging.error("Invalid value %s for arg %s", value, descriptor.name)
raise
yield descriptor.name, value
class Bool(TypeInfoObject):
"""A True or False value."""
_type = bool
def Validate(self, value):
if value not in [True, False]:
raise TypeValueError("Value must be True or False")
return value
def FromString(self, string):
"""Parse a bool from a string."""
if string.lower() in ("false", "no", "n"):
return False
if string.lower() in ("true", "yes", "y"):
return True
raise TypeValueError("%s is not recognized as a boolean value." % string)
class List(TypeInfoObject):
"""A list type. Turns another type into a list of those types."""
_type = list
def __init__(self, validator=None, **kwargs):
self.validator = validator
super().__init__(**kwargs)
def Validate(self, value):
"""Validate a potential list."""
if isinstance(value, str):
raise TypeValueError("Value must be an iterable not a string.")
elif not isinstance(value, (list, tuple)):
raise TypeValueError("%r not a valid List" % value)
# Validate each value in the list validates against our type.
return [self.validator.Validate(val) for val in value]
def FromString(self, string):
result = []
if string:
for x in string.split(","):
x = x.strip()
result.append(self.validator.FromString(x))
return result
def ToString(self, value):
return ",".join([self.validator.ToString(x) for x in value])
class String(TypeInfoObject):
"""A String type."""
_type = Text
def __init__(self, default: Text = "", **kwargs):
precondition.AssertType(default, Text)
super().__init__(default=default, **kwargs)
def Validate(self, value: Text) -> Text:
if not isinstance(value, Text):
raise TypeValueError("'{}' is not a valid string".format(value))
return value
def FromString(self, string: Text) -> Text:
precondition.AssertType(string, Text)
return string
def ToString(self, value: Text) -> Text:
precondition.AssertType(value, Text)
return value
class Bytes(TypeInfoObject):
"""A Bytes type."""
_type = bytes
def __init__(self, default: bytes = b"", **kwargs):
precondition.AssertType(default, bytes)
super().__init__(default=default, **kwargs)
def Validate(self, value: bytes) -> bytes:
if not isinstance(value, bytes):
raise TypeValueError("%s not a valid string" % value)
return value
def FromString(self, string: Text) -> bytes:
precondition.AssertType(string, Text)
return string.encode("utf-8")
def ToString(self, value: bytes) -> Text:
precondition.AssertType(value, bytes)
return value.decode("utf-8")
class Integer(TypeInfoObject):
"""An Integer number type."""
_type = int
def Validate(self, value):
if value is None:
value = 0
if not isinstance(value, int):
raise TypeValueError("Invalid value %s for Integer" % value)
return value
def FromString(self, string):
try:
return int(string)
except ValueError:
raise TypeValueError("Invalid value %s for Integer" % string)
class Float(Integer):
"""Type info describing a float."""
_type = float
def Validate(self, value):
try:
value = float(value)
except (ValueError, TypeError):
raise TypeValueError("Invalid value %s for Float" % value)
return value
def FromString(self, string):
try:
return float(string)
except (ValueError, TypeError):
raise TypeValueError("Invalid value %s for Float" % string)
class Choice(TypeInfoObject):
"""A choice from a set of allowed values."""
def __init__(self, choices=None, validator=None, **kwargs):
self.choices = choices
self.validator = validator or String()
super().__init__(**kwargs)
def Validate(self, value):
self.validator.Validate(value)
if value not in self.choices:
raise TypeValueError("%s not a valid instance string." % value)
return value
class MultiChoice(TypeInfoObject):
"""Choose a list of values from a set of allowed values."""
def __init__(self, choices=None, validator=None, **kwargs):
"""Create a multichoice object and validate choices.
Args:
choices: list of available choices
validator: validator to use for each of the list *items* the validator for
the top level is a list.
**kwargs: passed through to parent class.
"""
self.choices = choices
subvalidator = validator or String()
self.validator = List(validator=subvalidator)
# Check the choices match the validator
for choice in self.choices:
subvalidator.Validate(choice)
super().__init__(**kwargs)
def Validate(self, values):
self.validator.Validate(values)
for value in values:
if value not in self.choices:
raise TypeValueError("%s not a valid instance string." % value)
if len(values) != len(set(values)):
raise TypeValueError("Duplicate choice in: %s." % values)
return values
| apache-2.0 |
weylin/CloudBot | plugins/wikipedia.py | 2 | 1810 | """Searches wikipedia and returns first sentence of article
Scaevolus 2009"""
import re
import requests
from lxml import etree
from cloudbot import hook
from cloudbot.util import formatting
# security
parser = etree.XMLParser(resolve_entities=False, no_network=True)
api_prefix = "http://en.wikipedia.org/w/api.php"
search_url = api_prefix + "?action=opensearch&format=xml"
random_url = api_prefix + "?action=query&format=xml&list=random&rnlimit=1&rnnamespace=0"
paren_re = re.compile('\s*\(.*\)$')
@hook.command("wiki", "wikipedia", "w")
def wiki(text, reply):
"""<phrase> - Gets first sentence of Wikipedia article on <phrase>."""
try:
request = requests.get(search_url, params={'search': text.strip()})
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
reply("Could not get Wikipedia page: {}".format(e))
raise
x = etree.fromstring(request.text, parser=parser)
ns = '{http://opensearch.org/searchsuggest2}'
items = x.findall(ns + 'Section/' + ns + 'Item')
if not items:
if x.find('error') is not None:
return 'Could not get Wikipedia page: %(code)s: %(info)s' % x.find('error').attrib
else:
return 'No results found.'
def extract(item):
return [item.find(ns + i).text for i in
('Text', 'Description', 'Url')]
title, desc, url = extract(items[0])
if 'may refer to' in desc:
title, desc, url = extract(items[1])
title = paren_re.sub('', title)
if title.lower() not in desc.lower():
desc = title + desc
desc = ' '.join(desc.split()) # remove excess spaces
desc = formatting.truncate(desc, 200)
return '{} :: {}'.format(desc, requests.utils.quote(url, ':/%'))
| gpl-3.0 |
mikoim/funstuff | system simulation/2015/cell automaton/forest/bmp.py | 2 | 1356 | __author__ = 'Eshin Kunishima'
__license__ = 'MIT'
from PIL import Image
from forest import Forest
wood = Image.open('tile/wood.png')
bamboo = Image.open('tile/bamboo.png')
fire = Image.open('tile/fire.png')
soil = Image.open('tile/soil.png')
pool = Image.open('tile/pool.png')
road = Image.open('tile/road.png')
mountain = Image.open('tile/mountain.png')
def forest2bmp(forest: Forest, filename: str):
bmp = Image.new('RGB', (forest.x * 32, forest.y * 32), (255, 255, 255))
for y in range(forest.y):
for x in range(forest.x):
cell = str(forest.get_cell(x, y))
dx = x * 32
dy = y * 32
if cell == 'W':
bmp.paste(wood, (dx, dy))
elif cell == 'B':
bmp.paste(bamboo, (dx, dy))
elif cell == 'F':
bmp.paste(fire, (dx, dy))
elif cell == 'S':
bmp.paste(soil, (dx, dy))
elif cell == 'P':
bmp.paste(pool, (dx, dy))
elif cell == 'R':
bmp.paste(road, (dx, dy))
elif cell == 'M':
bmp.paste(mountain, (dx, dy))
bmp.save(filename, 'PNG')
f = Forest()
f.loads(open('default.txt', mode='r').read())
forest2bmp(f, 'r_0.png')
for t in range(24 * 7):
f.next_generation()
forest2bmp(f, 'r_%d.png' % (t + 1))
| mit |
dfunckt/django | tests/servers/test_liveserverthread.py | 96 | 1123 | from django.db import DEFAULT_DB_ALIAS, connections
from django.test import LiveServerTestCase, TestCase
class LiveServerThreadTest(TestCase):
def run_live_server_thread(self, connections_override=None):
thread = LiveServerTestCase._create_server_thread(connections_override)
thread.daemon = True
thread.start()
thread.is_ready.wait()
thread.terminate()
def test_closes_connections(self):
conn = connections[DEFAULT_DB_ALIAS]
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
self.skipTest("the sqlite backend's close() method is a no-op when using an in-memory database")
# Pass a connection to the thread to check they are being closed.
connections_override = {DEFAULT_DB_ALIAS: conn}
saved_sharing = conn.allow_thread_sharing
try:
conn.allow_thread_sharing = True
self.assertTrue(conn.is_usable())
self.run_live_server_thread(connections_override)
self.assertFalse(conn.is_usable())
finally:
conn.allow_thread_sharing = saved_sharing
| bsd-3-clause |
bhargav2408/python-for-android | python-build/python-libs/gdata/tests/gdata_tests/books/service_test.py | 127 | 2096 | #!/usr/bin/python
__author__ = "James Sams <[email protected]>"
import unittest
import getpass
import atom
import gdata.books
import gdata.books.service
from gdata import test_data
username = ""
password = ""
class BookCRUDTests(unittest.TestCase):
def setUp(self):
self.service = gdata.books.service.BookService(email=username,
password=password, source="Google-PythonGdataTest-1")
if username and password:
self.authenticated = True
self.service.ProgrammaticLogin()
else:
self.authenticated = False
def testPublicSearch(self):
entry = self.service.get_by_google_id("b7GZr5Btp30C")
self.assertEquals((entry.creator[0].text, entry.dc_title[0].text),
('John Rawls', 'A theory of justice'))
feed = self.service.search_by_keyword(isbn="9780198250548")
feed1 = self.service.search("9780198250548")
self.assertEquals(len(feed.entry), 1)
self.assertEquals(len(feed1.entry), 1)
def testLibraryCrd(self):
"""
the success of the create operations assumes the book was not already
in the library. if it was, there will not be a failure, but a successful
add will not actually be tested.
"""
if not self.authenticated:
return
entry = self.service.get_by_google_id("b7GZr5Btp30C")
entry = self.service.add_item_to_library(entry)
lib = list(self.service.get_library())
self.assert_(entry.to_dict()['title'] in
[x.to_dict()['title'] for x in lib])
self.service.remove_item_from_library(entry)
lib = list(self.service.get_library())
self.assert_(entry.to_dict()['title'] not in
[x.to_dict()['title'] for x in lib])
def testAnnotations(self):
"annotations do not behave as expected"
pass
if __name__ == "__main__":
print "Please use a test account. May cause data loss."
username = raw_input("Google Username: ").strip()
password = getpass.getpass()
unittest.main()
| apache-2.0 |
mtagle/airflow | tests/providers/google/cloud/hooks/test_compute.py | 4 | 40568 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
import unittest
import mock
from mock import PropertyMock
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook, GceOperationStatus
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST, mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
GCE_ZONE = 'zone'
GCE_INSTANCE = 'instance'
GCE_INSTANCE_TEMPLATE = 'instance-template'
GCE_REQUEST_ID = 'request_id'
GCE_INSTANCE_GROUP_MANAGER = 'instance_group_manager'
class TestGcpComputeHookNoDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.providers.google.cloud.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_no_default_project_id):
self.gce_hook_no_project_id = ComputeEngineHook(gcp_conn_id='test')
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.compute.build")
def test_gce_client_creation(self, mock_build, mock_authorize):
result = self.gce_hook_no_project_id.get_conn()
mock_build.assert_called_once_with(
'compute', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
self.assertEqual(mock_build.return_value, result)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.start_instance(
project_id='example-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
start_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance_no_project_id(self, wait_for_operation_to_complete, get_conn, mock_project_id):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.start_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
start_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.stop_instance(
project_id='example-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
stop_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance_no_project_id(self, wait_for_operation_to_complete, get_conn, mock_project_id):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.stop_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
stop_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
set_machine_type_method = get_conn.return_value.instances.return_value.setMachineType
execute_method = set_machine_type_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.set_machine_type(
body={},
project_id='example-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
set_machine_type_method.assert_called_once_with(body={}, instance='instance',
project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_no_project_id(self, wait_for_operation_to_complete, get_conn, mock_project_id):
set_machine_type_method = get_conn.return_value.instances.return_value.setMachineType
execute_method = set_machine_type_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.set_machine_type(
body={},
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
set_machine_type_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.get_instance_template(
resource_id=GCE_INSTANCE_TEMPLATE,
project_id='example-project'
)
self.assertIsNotNone(res)
get_method.assert_called_once_with(instanceTemplate='instance-template', project='example-project')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template_no_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.get_instance_template(
resource_id=GCE_INSTANCE_TEMPLATE
)
get_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.insert_instance_template(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
body={},
request_id=GCE_REQUEST_ID
)
self.assertIsNone(res)
insert_method.assert_called_once_with(body={}, project='example-project', requestId='request_id')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template_no_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.insert_instance_template(
body={},
request_id=GCE_REQUEST_ID
)
insert_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.get_instance_group_manager(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER
)
self.assertIsNotNone(res)
get_method.assert_called_once_with(instanceGroupManager='instance_group_manager',
project='example-project',
zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager_no_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.get_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER
)
get_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager_overridden_project_id(self,
wait_for_operation_to_complete, get_conn):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook_no_project_id.patch_instance_group_manager(
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID
)
self.assertIsNone(res)
patch_method.assert_called_once_with(
body={},
instanceGroupManager='instance_group_manager',
project='example-project',
requestId='request_id',
zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id',
project_id='example-project',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager_no_project_id(
self, wait_for_operation_to_complete, get_conn, mock_project_id
):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
with self.assertRaises(AirflowException) as cm:
self.gce_hook_no_project_id.patch_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID
)
patch_method.assert_not_called()
execute_method.assert_not_called()
err = cm.exception
self.assertIn("The project id must be passed", str(err))
wait_for_operation_to_complete.assert_not_called()
class TestGcpComputeHookDefaultProjectId(unittest.TestCase):
def setUp(self):
with mock.patch('airflow.providers.google.cloud.hooks.base.CloudBaseHook.__init__',
new=mock_base_gcp_hook_default_project_id):
self.gce_hook = ComputeEngineHook(gcp_conn_id='test')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.start_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
start_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id',
zone='zone')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_start_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.start_instance(
project_id='new-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
start_method.assert_called_once_with(instance='instance', project='new-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='new-project',
operation_name='operation_id',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.stop_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
stop_method.assert_called_once_with(instance='instance', project='example-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id',
zone='zone')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_stop_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.stop_instance(
project_id='new-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
stop_method.assert_called_once_with(instance='instance', project='new-project', zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='new-project',
operation_name='operation_id',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
execute_method = get_conn.return_value.instances.return_value.setMachineType.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.set_machine_type(
body={},
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id',
zone='zone')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_set_machine_type_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
execute_method = get_conn.return_value.instances.return_value.setMachineType.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.set_machine_type(
project_id='new-project',
body={},
zone=GCE_ZONE,
resource_id=GCE_INSTANCE)
self.assertIsNone(res)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='new-project',
operation_name='operation_id',
zone='zone')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template(self, wait_for_operation_to_complete, get_conn, mock_project_id):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_template(
resource_id=GCE_INSTANCE_TEMPLATE)
self.assertIsNotNone(res)
get_method.assert_called_once_with(instanceTemplate='instance-template', project='example-project')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceTemplates.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_template(
project_id='new-project',
resource_id=GCE_INSTANCE_TEMPLATE)
self.assertIsNotNone(res)
get_method.assert_called_once_with(instanceTemplate='instance-template', project='new-project')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template(self, wait_for_operation_to_complete, get_conn, mock_project_id):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.insert_instance_template(
body={},
request_id=GCE_REQUEST_ID
)
self.assertIsNone(res)
insert_method.assert_called_once_with(body={}, project='example-project', requestId='request_id')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='example-project',
operation_name='operation_id')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_insert_instance_template_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
insert_method = get_conn.return_value.instanceTemplates.return_value.insert
execute_method = insert_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.insert_instance_template(
project_id='new-project',
body={},
request_id=GCE_REQUEST_ID
)
self.assertIsNone(res)
insert_method.assert_called_once_with(body={}, project='new-project', requestId='request_id')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(project_id='new-project',
operation_name='operation_id')
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager(self, wait_for_operation_to_complete, get_conn, mock_project_id):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER
)
self.assertIsNotNone(res)
get_method.assert_called_once_with(instanceGroupManager='instance_group_manager',
project='example-project',
zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_get_instance_group_manager_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
get_method = get_conn.return_value.instanceGroupManagers.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.get_instance_group_manager(
project_id='new-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER
)
self.assertIsNotNone(res)
get_method.assert_called_once_with(instanceGroupManager='instance_group_manager',
project='new-project',
zone='zone')
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_not_called()
@mock.patch(
'airflow.providers.google.cloud.hooks.base.CloudBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager(self, wait_for_operation_to_complete, get_conn, mock_project_id):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.patch_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID
)
self.assertIsNone(res)
patch_method.assert_called_once_with(
body={},
instanceGroupManager='instance_group_manager',
project='example-project',
requestId='request_id',
zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id',
project_id='example-project',
zone='zone')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete'
)
def test_patch_instance_group_manager_overridden_project_id(self,
wait_for_operation_to_complete,
get_conn):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.patch_instance_group_manager(
project_id='new-project',
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID
)
self.assertIsNone(res)
patch_method.assert_called_once_with(
body={},
instanceGroupManager='instance_group_manager',
project='new-project',
requestId='request_id',
zone='zone'
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(operation_name='operation_id',
project_id='new-project',
zone='zone')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_global_operation_status'
)
def test_wait_for_operation_to_complete_no_zone(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
num_retries = self.gce_hook.num_retries
# Test success
mock_get_conn.return_value = service
mock_operation_status.return_value = {'status': GceOperationStatus.DONE, 'error': None}
self.gce_hook._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=None
)
mock_operation_status.assert_called_once_with(service=service,
operation_name=operation_name,
project_id=project_id,
num_retries=num_retries
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch(
'airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_global_operation_status'
)
def test_wait_for_operation_to_complete_no_zone_error(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
# Test error
mock_get_conn.return_value = service
mock_operation_status.return_value = {'status': GceOperationStatus.DONE,
'error': {'errors': "some nasty errors"},
'httpErrorStatusCode': 400,
'httpErrorMessage': 'sample msg'
}
with self.assertRaises(AirflowException):
self.gce_hook._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=None
)
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn')
@mock.patch('airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_zone_operation_status')
def test_wait_for_operation_to_complete_with_zone(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
zone = 'west-europe3'
num_retries = self.gce_hook.num_retries
# Test success
mock_get_conn.return_value = service
mock_operation_status.return_value = {'status': GceOperationStatus.DONE, 'error': None}
self.gce_hook._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name,
zone=zone
)
mock_operation_status.assert_called_once_with(service, operation_name, project_id, zone, num_retries)
| apache-2.0 |
mhogg/scipy | scipy/sparse/bsr.py | 66 | 20937 | """Compressed Block Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats
from .sputils import isshape, getdtype, to_native, upcast, get_index_dtype
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1,
bsr_matmat_pass2, bsr_transpose, bsr_sort_indices)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self.shape = arg1
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
idx_dtype = get_index_dtype(maxval=N//C)
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize))
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
idx_dtype = get_index_dtype((indices, indptr), check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
R,C = self.blocksize
self.shape = (M*R,N*C)
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self.shape = shape
if dtype is not None:
self.data = self.data.astype(dtype)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self):
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
nnz = property(fget=getnnz)
def __repr__(self):
nnz = self.getnnz()
format = self.getformat()
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (blocksize = %dx%d) in %s format>" % \
(self.shape + (self.dtype.type, nnz) + self.blocksize +
(_formats[format][1],))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
M,N = self.shape
R,C = self.blocksize
y = np.empty(min(M,N), dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(M//R, N//C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
##########################
# NotImplemented methods #
##########################
def getdata(self,ind):
raise NotImplementedError
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
def matvec(self, other):
return self * other
def matmat(self, other):
return self * other
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=(M//R)*(N//C))
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
csr_matmat_pass1(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
indptr)
bnnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = indptr.astype(idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat_pass2(M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self,blocksize=None,copy=False):
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self):
return self.tocoo(copy=False).tocsr()
# TODO make this more efficient
def tocsc(self):
return self.tocoo(copy=False).tocsc()
def tocoo(self,copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def transpose(self):
R,C = self.blocksize
M,N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N,M), blocksize=(C,R),
dtype=self.dtype)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK,C,R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data,indices,indptr), shape=(N,M))
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
if len(nonzero_blocks) == 0:
return # nothing to do
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
from .csr import csr_matrix
# modifies self.indptr and self.indices *in place*
# since CSR constructor may end up in making copies (in case
# our index arrays are invalid in some way), play it safe
proxy = csr_matrix((mask,self.indices,self.indptr),shape=(M//R,N//C))
proxy.indices = self.indices
proxy.indptr = self.indptr
proxy.eliminate_zeros()
self.prune()
def sum_duplicates(self):
raise NotImplementedError
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
return isinstance(x, bsr_matrix)
| bsd-3-clause |
techaddict/spark | examples/src/main/python/ml/imputer_example.py | 79 | 1513 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating Imputer.
Run with:
bin/spark-submit examples/src/main/python/ml/imputer_example.py
"""
# $example on$
from pyspark.ml.feature import Imputer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("ImputerExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(1.0, float("nan")),
(2.0, float("nan")),
(float("nan"), 3.0),
(4.0, 4.0),
(5.0, 5.0)
], ["a", "b"])
imputer = Imputer(inputCols=["a", "b"], outputCols=["out_a", "out_b"])
model = imputer.fit(df)
model.transform(df).show()
# $example off$
spark.stop()
| apache-2.0 |
pinterest/pinball | tests/pinball/workflow/job_executor_test.py | 1 | 13681 | # Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation tests for the job executor."""
import mock
import os
import subprocess
import time
import unittest
from pinball.config.pinball_config import PinballConfig
from pinball.workflow.event import Event
from pinball.workflow.job import ShellJob
from pinball.workflow.job_executor import ExecutionRecord
from pinball.workflow.job_executor import ShellJobExecutor
__author__ = 'Pawel Garbacki'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
class ShellJobExecutorTestCase(unittest.TestCase):
def setUp(self):
self._data_builder = mock.Mock()
self._emailer = mock.Mock()
job = ShellJob(name='some_job',
command='printf "line1\\nline2\\nline3";'
'printf "line1\\nline2" >&2',
emails=['[email protected]'],
warn_timeout_sec=10,
abort_timeout_sec=20)
self._executor = ShellJobExecutor('some_workflow', '123', 'some_job',
job, self._data_builder,
self._emailer)
# Set PinballConfig to enable s3 log saver
PinballConfig.S3_LOGS_DIR_PREFIX = 's3n://pinball/tmp/'
PinballConfig.S3_LOGS_DIR = \
PinballConfig.S3_LOGS_DIR_PREFIX \
+ PinballConfig.JOB_LOG_PATH_PREFIX
@mock.patch('pinball.workflow.log_saver.S3FileLogSaver._get_or_create_s3_key')
@mock.patch('__builtin__.open')
def test_token_lost(self, open_mock, get_s3_key_mock):
file_mock = mock.MagicMock()
open_mock.return_value = file_mock
file_mock.__enter__.return_value = file_mock
s3_key_mock = mock.MagicMock()
get_s3_key_mock.return_value = s3_key_mock
s3_key_mock.__enter__.return_value = s3_key_mock
execution_record = ExecutionRecord(start_time=10)
self._executor.job.history = [execution_record]
self.assertFalse(self._executor.prepare())
file_mock.write.assert_called_once_with('executor failed to renew job '
'ownership on time\n')
get_s3_key_mock.assert_called_once_with('s3n://pinball/tmp/pinball_job_logs/'
'some_workflow/123/some_job.10.pinlog')
@mock.patch('pinball.workflow.log_saver.S3FileLogSaver.open')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def test_events(self, open_mock, exists_mock, s3_open_mock):
file_mock = mock.MagicMock()
open_mock.return_value = file_mock
file_mock.__enter__.return_value = file_mock
exists_mock.return_value = True
some_event = Event(creator='some_creator')
some_other_event = Event(creator='some_other_creator')
self._executor.job.events = [some_event, some_other_event]
self.assertTrue(self._executor.prepare())
self.assertEqual(1, len(self._executor.job.history))
execution_record = self._executor.job.history[0]
self.assertEqual([some_event, some_other_event],
execution_record.events)
self.assertEqual(s3_open_mock.call_count, 2)
def test_disabled(self):
self._executor.job.disabled = True
self.assertTrue(self._executor.prepare())
self.assertEqual(1, len(self._executor.job.history))
execution_record = self._executor.job.history[0]
self.assertEqual('DISABLED', execution_record.info)
@mock.patch('subprocess.Popen')
def test_execute_cleanup(self, subprocess_mock):
self._executor.job.cleanup_template = 'cleanup %(kill_id)s'
execution_record = ExecutionRecord()
execution_record.properties['kill_id'] = ['123', '456']
self._executor.job.history = [execution_record]
self._executor._execute_cleanup()
env = os.environ.copy()
env.pop('DJANGO_SETTINGS_MODULE', None)
subprocess_mock.assert_called_with('cleanup 123,456',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
env=env,
preexec_fn=os.setsid)
@mock.patch('time.time')
def test_check_timeout_noop(self, time_mock):
execution_record = ExecutionRecord(start_time=10)
self._executor.job.history = [execution_record]
time_mock.return_value = 15
self._executor._check_timeouts()
self.assertEqual(
0, self._emailer.send_job_timeout_warning_message.call_count)
time_mock.return_value = 25
self._data_builder.get_schedule.return_value = None
job_execution_data = mock.Mock()
self._data_builder.get_execution.return_value = job_execution_data
self._executor._check_timeouts()
self._data_builder.get_schedule.assert_called_once_with(
'some_workflow')
self._data_builder.get_execution.assert_called_once_with(
'some_workflow', '123', 'some_job', 0)
self._emailer.send_job_timeout_warning_message.assert_called_once_with(
['[email protected]'], job_execution_data)
time_mock.return_value = 35
self._executor._check_timeouts()
self.assertTrue(self._executor._aborted)
@mock.patch('pinball.workflow.log_saver.S3FileLogSaver._get_or_create_s3_key')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def test_execute(self, open_mock, exists_mock, get_s3_key_mock):
file_mock = mock.MagicMock()
open_mock.return_value = file_mock
file_mock.__enter__.return_value = file_mock
s3_key_mock = mock.MagicMock()
get_s3_key_mock.return_value = s3_key_mock
s3_key_mock.__enter__.return_value = s3_key_mock
self.assertTrue(self._executor.prepare())
self.assertTrue(self._executor.execute())
file_mock.write.assert_has_calls(
[mock.call('line1\n'), mock.call('line2\n'), mock.call('line3'),
mock.call('line1\n'), mock.call('line2')],
any_order=True)
self.assertEqual(file_mock.write.call_count, 5)
self.assertEqual(1, len(self._executor.job.history))
execution_record = self._executor.job.history[0]
self.assertEqual(0, execution_record.exit_code)
self.assertEqual(2, get_s3_key_mock.call_count)
@mock.patch('pinball.workflow.log_saver.S3FileLogSaver._get_or_create_s3_key')
@mock.patch('os.makedirs')
@mock.patch('__builtin__.open')
def test_abort(self, open_mock, _, get_s3_key_mock):
file_mock = mock.MagicMock()
open_mock.return_value = file_mock
file_mock.__enter__.return_value = file_mock
s3_key_mock = mock.MagicMock()
get_s3_key_mock.return_value = s3_key_mock
s3_key_mock.__enter__.return_value = s3_key_mock
self.assertTrue(self._executor.prepare())
self._executor.abort()
self.assertFalse(self._executor.execute())
self.assertEqual(1, len(self._executor.job.history))
execution_record = self._executor.job.history[0]
self.assertEqual(1, execution_record.exit_code)
@mock.patch('pinball.workflow.log_saver.S3FileLogSaver._get_or_create_s3_key')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def test_execute_long_line(self, open_mock, exists_mock, get_s3_key_mock):
file_mock = mock.MagicMock()
open_mock.return_value = file_mock
file_mock.__enter__.return_value = file_mock
s3_key_mock = mock.MagicMock()
get_s3_key_mock.return_value = s3_key_mock
s3_key_mock.__enter__.return_value = s3_key_mock
job = ShellJob(name='some_job',
command="printf \"%s\"" % ('a' * 20000),
emails=['[email protected]'],
warn_timeout_sec=10,
abort_timeout_sec=20)
executor = ShellJobExecutor('some_workflow', '123', 'some_job',
job, self._data_builder,
self._emailer)
self.assertTrue(executor.prepare())
self.assertTrue(executor.execute())
file_mock.write.assert_has_calls(
[mock.call('a' * 16384), mock.call('a' * 3616)])
self.assertEqual(1, len(executor.job.history))
execution_record = executor.job.history[0]
self.assertEqual(0, execution_record.exit_code)
self.assertEqual(2, get_s3_key_mock.call_count)
@mock.patch('pinball.workflow.log_saver.S3FileLogSaver._get_or_create_s3_key')
@mock.patch('os.path.exists')
@mock.patch('__builtin__.open')
def test_execute_env_var(self, open_mock, exists_mock, get_s3_key_mock):
file_mock = mock.MagicMock()
open_mock.return_value = file_mock
file_mock.__enter__.return_value = file_mock
s3_key_mock = mock.MagicMock()
get_s3_key_mock.return_value = s3_key_mock
s3_key_mock.__enter__.return_value = s3_key_mock
job_name = 'some_job'
workflow_name = 'some_workflow'
instance = '123'
job = ShellJob(name=job_name,
command="echo $PINBALL_WORKFLOW && "
"echo $PINBALL_JOB && "
"echo $PINBALL_INSTANCE && "
"echo $PINBALL_EXECUTION && "
"echo $PINBALL_BASE_URL",
emails=['[email protected]'],
warn_timeout_sec=10,
abort_timeout_sec=20)
executor = ShellJobExecutor(workflow_name, instance, job_name,
job, self._data_builder,
self._emailer)
execution_record = ExecutionRecord(instance=instance,
start_time=time.time())
execution_record.end_time = time.time()
execution_record.exit_code = 1
job.history.append(execution_record)
self.assertTrue(executor.prepare())
self.assertTrue(executor.execute())
file_mock.write.assert_has_calls(
[mock.call(workflow_name + '\n'),
mock.call(job_name + '\n'),
mock.call(instance + '\n'),
mock.call('1\n')])
self.assertEqual(len(executor.job.history), 2)
self.assertEqual(get_s3_key_mock.call_count, len(executor.job.history))
latest_execution_record = executor.job.history[1]
self.assertEqual(latest_execution_record.exit_code, 0)
exists_mock.assert_any_call(
'/tmp/pinball_job_logs/{wf}/{inst}'.format(
wf=workflow_name, inst=instance
),
)
exists_mock.assert_any_call(
'/tmp/pinball_job_logs/{wf}/{inst}/some_job.{ts}.stdout'.format(
wf=workflow_name, inst=instance, ts=int(latest_execution_record.start_time)
),
)
exists_mock.assert_any_call(
'/tmp/pinball_job_logs/{wf}/{inst}/some_job.{ts}.stderr'.format(
wf=workflow_name, inst=instance, ts=int(latest_execution_record.start_time)
),
)
def test_process_log_line(self):
job = ShellJob(name='some_job',
command="echo ok",
emails=['[email protected]'],
warn_timeout_sec=10,
abort_timeout_sec=20)
executor = ShellJobExecutor('some_workflow', '123', 'some_job', job,
self._data_builder,
self._emailer)
import time
execution_record = ExecutionRecord(instance=123456,
start_time=time.time())
executor.job.history.append(execution_record)
executor._process_log_line("PINBALL:kv_job_url=j_id1|j_url1\n")
executor._process_log_line("PINBALL:kv_job_url=j_id2|j_url2\n")
executor._process_log_line("PINBALL:kv_job_url=j_id2|j_url2\n")
executor._process_log_line("PINBALL:kill_id=qubole1/123\n")
executor._process_log_line("PINBALL:kill_id=qubole2/456\n")
executor._process_log_line("PINBALL:kill_id=qubole1/123\n")
erp = executor._get_last_execution_record().properties
self.assertEqual(len(erp), 2)
self.assertIn('kv_job_url', erp.keys())
self.assertEqual(type(erp['kv_job_url']), list)
self.assertEqual(len(erp['kv_job_url']), 2)
self.assertEqual(erp['kv_job_url'], ['j_id1|j_url1', 'j_id2|j_url2'])
self.assertIn('kill_id', erp.keys())
self.assertEqual(type(erp['kill_id']), list)
self.assertEqual(len(erp['kill_id']), 2)
self.assertEqual(erp['kill_id'], ['qubole1/123', 'qubole2/456'])
| apache-2.0 |
milokim/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 1891 | 3300 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
lthall/Leonard_ardupilot | Tools/LogAnalyzer/tests/TestCompass.py | 66 | 6392 | from LogAnalyzer import Test,TestResult
import DataflashLog
from functools import reduce
import math
class TestCompass(Test):
'''test for compass offsets and throttle interference'''
def __init__(self):
Test.__init__(self)
self.name = "Compass"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def vec_len(x):
return math.sqrt(x[0]**2+x[1]**2+x[2]**2)
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
warnOffset = 300
failOffset = 500
param_offsets = (
logdata.parameters["COMPASS_OFS_X"],
logdata.parameters["COMPASS_OFS_Y"],
logdata.parameters["COMPASS_OFS_Z"]
)
if vec_len(param_offsets) > failOffset:
FAIL()
self.result.statusMessage = "FAIL: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2])
elif vec_len(param_offsets) > warnOffset:
WARN()
self.result.statusMessage = "WARN: Large compass offset params (X:%.2f, Y:%.2f, Z:%.2f)\n" % (param_offsets[0],param_offsets[1],param_offsets[2])
if "MAG" in logdata.channels:
max_log_offsets = zip(
map(lambda x: x[1],logdata.channels["MAG"]["OfsX"].listData),
map(lambda x: x[1],logdata.channels["MAG"]["OfsY"].listData),
map(lambda x: x[1],logdata.channels["MAG"]["OfsZ"].listData)
)
max_log_offsets = reduce(lambda x,y: x if vec_len(x) > vec_len(y) else y, max_log_offsets)
if vec_len(max_log_offsets) > failOffset:
FAIL()
self.result.statusMessage += "FAIL: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2])
elif vec_len(max_log_offsets) > warnOffset:
WARN()
self.result.statusMessage += "WARN: Large compass offset in MAG data (X:%.2f, Y:%.2f, Z:%.2f)\n" % (max_log_offsets[0],max_log_offsets[1],max_log_offsets[2])
# check for mag field length change, and length outside of recommended range
if "MAG" in logdata.channels:
percentDiffThresholdWARN = 0.25
percentDiffThresholdFAIL = 0.35
minMagFieldThreshold = 120.0
maxMagFieldThreshold = 550.0
index = 0
length = len(logdata.channels["MAG"]["MagX"].listData)
magField = []
(minMagField, maxMagField) = (None,None)
(minMagFieldLine, maxMagFieldLine) = (None,None)
zerosFound = False
while index<length:
mx = logdata.channels["MAG"]["MagX"].listData[index][1]
my = logdata.channels["MAG"]["MagY"].listData[index][1]
mz = logdata.channels["MAG"]["MagZ"].listData[index][1]
if ((mx==0) and (my==0) and (mz==0)): # sometimes they're zero, not sure why, same reason as why we get NaNs as offsets?
zerosFound = True
else:
mf = math.sqrt(mx*mx + my*my + mz*mz)
magField.append(mf)
if mf<minMagField:
minMagField = mf
minMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0]
if mf>maxMagField:
maxMagField = mf
maxMagFieldLine = logdata.channels["MAG"]["MagX"].listData[index][0]
if index == 0:
(minMagField, maxMagField) = (mf,mf)
index += 1
if minMagField is None:
FAIL()
self.result.statusMessage = self.result.statusMessage + "No valid mag data found\n"
else:
percentDiff = (maxMagField-minMagField) / minMagField
if percentDiff > percentDiffThresholdFAIL:
FAIL()
self.result.statusMessage = self.result.statusMessage + "Large change in mag_field (%.2f%%)\n" % (percentDiff*100)
elif percentDiff > percentDiffThresholdWARN:
WARN()
self.result.statusMessage = self.result.statusMessage + "Moderate change in mag_field (%.2f%%)\n" % (percentDiff*100)
else:
self.result.statusMessage = self.result.statusMessage + "mag_field interference within limits (%.2f%%)\n" % (percentDiff*100)
if minMagField < minMagFieldThreshold:
self.result.statusMessage = self.result.statusMessage + "Min mag field length (%.2f) < recommended (%.2f)\n" % (minMagField,minMagFieldThreshold)
if maxMagField > maxMagFieldThreshold:
self.result.statusMessage = self.result.statusMessage + "Max mag field length (%.2f) > recommended (%.2f)\n" % (maxMagField,maxMagFieldThreshold)
if verbose:
self.result.statusMessage = self.result.statusMessage + "Min mag_field of %.2f on line %d\n" % (minMagField,minMagFieldLine)
self.result.statusMessage = self.result.statusMessage + "Max mag_field of %.2f on line %d\n" % (maxMagField,maxMagFieldLine)
if zerosFound:
if self.result.status == TestResult.StatusType.GOOD:
WARN()
self.result.statusMessage = self.result.statusMessage + "All zeros found in MAG X/Y/Z log data\n"
else:
self.result.statusMessage = self.result.statusMessage + "No MAG data, unable to test mag_field interference\n"
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/distutils/command/install_clib.py | 258 | 1315 | from __future__ import division, absolute_import, print_function
import os
from distutils.core import Command
from distutils.ccompiler import new_compiler
from numpy.distutils.misc_util import get_cmd
class install_clib(Command):
description = "Command to install installable C libraries"
user_options = []
def initialize_options(self):
self.install_dir = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install', ('install_lib', 'install_dir'))
def run (self):
build_clib_cmd = get_cmd("build_clib")
build_dir = build_clib_cmd.build_clib
# We need the compiler to get the library name -> filename association
if not build_clib_cmd.compiler:
compiler = new_compiler(compiler=None)
compiler.customize(self.distribution)
else:
compiler = build_clib_cmd.compiler
for l in self.distribution.installed_libraries:
target_dir = os.path.join(self.install_dir, l.target_dir)
name = compiler.library_filename(l.name)
source = os.path.join(build_dir, name)
self.mkpath(target_dir)
self.outfiles.append(self.copy_file(source, target_dir)[0])
def get_outputs(self):
return self.outfiles
| artistic-2.0 |
maxking/hamper | hamper/plugins/bitly.py | 2 | 3641 | import re
import urllib
import urllib2
import json
from hamper.interfaces import ChatPlugin
class Bitly(ChatPlugin):
name = 'bitly'
priority = 2
# Regex is taken from:
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
regex = ur"""
( # Capture 1: entire matched URL
(?:
(?P<prot>https?://) # http or https protocol
| # or
www\d{0,3}[.] # "www.", "www1.", "www2." ... "www999."
| # or
[a-z0-9.\-]+[.][a-z]{2,4}/ # looks like domain name
# followed by a slash
)
(?: # One or more:
[^\s()<>]+ # Run of non-space, non-()<>
| # or
\(([^\s()<>]+|(\([^\s()<>]+\)))*\) # balanced parens, up to 2 levels
)+
(?: # End with:
\(([^\s()<>]+|(\([^\s()<>]+\)))*\) # balanced parens, up to 2 levels
| # or
[^\s`!()\[\]{};:'".,<>?] # not a space or one of
# these punct chars
)
)
"""
def setup(self, loader):
self.regex = re.compile(self.regex, re.VERBOSE | re.IGNORECASE | re.U)
self.api_url = 'https://api-ssl.bitly.com/v3/shorten'
self.username = loader.config['bitly']['login']
self.api_key = loader.config['bitly']['api_key']
# If an exclude value is found in the url
# it will not be shortened
self.excludes = ['imgur.com', 'gist.github.com', 'pastebin.com']
# Make sure they've configured the bitly config values.
try:
self.username = loader.config['bitly']['login']
self.api_key = loader.config['bitly']['api_key']
except KeyError:
print ('\nTo use the bitly plugin you need to set your bitly login'
'\nand api_key in your config file.\n'
'Example:\n'
'bitly:\n'
" login: '123456789000'\n"
" api_key: '1234678901234567890123467890123456'\n")
quit()
def message(self, bot, comm):
match = self.regex.search(comm['message'])
# Found a url
if match:
# base url isn't % encoded, python 2.7 doesn't do this well, and I
# couldn't figure it out.
long_url = match.group(0)
# Only shorten urls which are longer than a bitly url (12 chars)
if len(long_url) <= 12:
return False
# Don't shorten url's which are in the exclude list
for item in self.excludes:
if item in long_url.lower():
return False
# Bitly requires a valid URI
if not match.group('prot'):
long_url = 'http://' + long_url
# Bitly requires valid % encoded urls
params = urllib.urlencode({'login': self.username,
'apiKey': self.api_key,
'longUrl': long_url})
req = urllib2.Request(self.api_url, data=params)
response = urllib2.urlopen(req)
data = json.load(response)
if data['status_txt'] == 'OK':
bot.reply(comm, "{0[user]}'s shortened url is {1[url]}"
.format(comm, data['data']))
# Always let the other plugins run
return False
| mit |
nrsimha/cookiecutter-django-jingo | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/jingo_paginator/helpers.py | 2 | 1063 | # import re
from jingo import register
from jinja2 import Markup
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# from django.shortcuts import render
from django.template.loader import render_to_string
@register.function
def paginate_list(request, object_list, items_per_page=8):
'''
Returns paginated list
'''
paginator = Paginator(object_list, items_per_page)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
items = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
items = paginator.page(paginator.num_pages)
return items
@register.function
def pager(request, paginated_objects, css_classes=None, align='pagination-centered'):
'''
Renders pagination pager
'''
return Markup(render_to_string('jingo_paginator/pager.html', {"request": request, "pager": paginated_objects, "align": align}))
| mit |
teltek/edx-platform | openedx/features/course_experience/tests/views/test_welcome_message.py | 13 | 4546 | """
Tests for course welcome messages.
"""
import ddt
from django.urls import reverse
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .test_course_updates import create_course_update, remove_course_updates
TEST_PASSWORD = 'test'
TEST_WELCOME_MESSAGE = '<h2>Welcome!</h2>'
def welcome_message_url(course):
"""
Returns the URL for the welcome message view.
"""
return reverse(
'openedx.course_experience.welcome_message_fragment_view',
kwargs={
'course_id': unicode(course.id),
}
)
def latest_update_url(course):
"""
Returns the URL for the latest update view.
"""
return reverse(
'openedx.course_experience.latest_update_fragment_view',
kwargs={
'course_id': unicode(course.id),
}
)
def dismiss_message_url(course):
"""
Returns the URL for the dismiss message endpoint.
"""
return reverse(
'openedx.course_experience.dismiss_welcome_message',
kwargs={
'course_id': unicode(course.id),
}
)
@ddt.ddt
class TestWelcomeMessageView(ModuleStoreTestCase):
"""
Tests for the course welcome message fragment view.
Also tests the LatestUpdate view because the functionality is similar.
"""
def setUp(self):
"""Set up the simplest course possible, then set up and enroll our fake user in the course."""
super(TestWelcomeMessageView, self).setUp()
with self.store.default_store(ModuleStoreEnum.Type.split):
self.course = CourseFactory.create()
with self.store.bulk_operations(self.course.id):
# Create a basic course structure
chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
section = ItemFactory.create(category='sequential', parent_location=chapter.location)
ItemFactory.create(category='vertical', parent_location=section.location)
self.user = UserFactory(password=TEST_PASSWORD)
CourseEnrollment.enroll(self.user, self.course.id)
self.client.login(username=self.user.username, password=TEST_PASSWORD)
def tearDown(self):
remove_course_updates(self.user, self.course)
super(TestWelcomeMessageView, self).tearDown()
@ddt.data(welcome_message_url, latest_update_url)
def test_message_display(self, url_generator):
create_course_update(self.course, self.user, 'First Update', date='January 1, 2000')
create_course_update(self.course, self.user, 'Second Update', date='January 1, 2017')
create_course_update(self.course, self.user, 'Retroactive Update', date='January 1, 2010')
response = self.client.get(url_generator(self.course))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Second Update')
self.assertContains(response, 'Dismiss')
@ddt.data(welcome_message_url, latest_update_url)
def test_replace_urls(self, url_generator):
img_url = 'img.png'
create_course_update(self.course, self.user, "<img src='/static/{url}'>".format(url=img_url))
response = self.client.get(url_generator(self.course))
self.assertContains(response, "/asset-v1:{org}+{course}+{run}+type@asset+block/{url}".format(
org=self.course.id.org,
course=self.course.id.course,
run=self.course.id.run,
url=img_url,
))
@ddt.data(welcome_message_url, latest_update_url)
def test_empty_message(self, url_generator):
response = self.client.get(url_generator(self.course))
self.assertEqual(response.status_code, 204)
def test_dismiss_welcome_message(self):
# Latest update is dimssed in JS and has no server/backend component.
create_course_update(self.course, self.user, 'First Update', date='January 1, 2017')
response = self.client.get(welcome_message_url(self.course))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'First Update')
self.client.post(dismiss_message_url(self.course))
response = self.client.get(welcome_message_url(self.course))
self.assertNotIn('First Update', response)
self.assertEqual(response.status_code, 204)
| agpl-3.0 |
Deepali-AddWeb/chomchom | gulp/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py | 1382 | 30567 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to compile targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print 'finding compile targets for match', target.name
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
if 'compile_targets' in values:
values['compile_targets'].sort()
print 'Targets that need to be built:'
for target in values['compile_targets']:
print '\t', target
if 'test_targets' in values:
values['test_targets'].sort()
print 'Test targets:'
for target in values['test_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print 'supplied test_targets'
for target_name in self._test_target_names:
print '\t', target_name
print 'found test_targets'
for target in test_targets:
print '\t', target.name
print 'searching for matching test targets'
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print 'matched test_targets'
for target in matching_test_targets:
print '\t', target.name
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print '\tall'
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.itervalues():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print 'Supplied test_targets & compile_targets'
for target in supplied_targets:
print '\t', target.name
print 'Finding compile targets'
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| gpl-2.0 |
edmorley/treeherder | treeherder/auth/backends.py | 2 | 7662 | import json
import logging
import time
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from jose import jwt
from rest_framework.exceptions import AuthenticationFailed
from treeherder.config.settings import (AUTH0_CLIENTID,
AUTH0_DOMAIN)
logger = logging.getLogger(__name__)
# The JSON Web Key Set (jwks), which is a set of keys
# containing the public keys that should be used to verify
# any JWT issued by the authorization server. Auth0 exposes
# a JWKS endpoint for each tenant, which is found at
# 'https://' + AUTH0_DOMAIN + '/.well-known/jwks.json'. This endpoint
# will contain the JWK used to sign all Auth0 issued JWTs for this tenant.
# Reference: https://auth0.com/docs/jwks
# The jwks is under our (Mozilla's) control. Changing it would be a big thing
# with lots of notice in advance. In order to mitigate the additional HTTP request
# as well as the possiblity of receiving a 503 status code, we use a static json file to
# read its content.
with open('treeherder/auth/jwks.json') as f:
jwks = json.load(f)
class AuthBackend:
def _get_access_token_expiry(self, request):
expiration_timestamp_in_seconds = request.META.get('HTTP_ACCESS_TOKEN_EXPIRES_AT')
if not expiration_timestamp_in_seconds:
raise AuthenticationFailed('Access-Token-Expires-At header is expected')
try:
return int(expiration_timestamp_in_seconds)
except ValueError:
raise AuthenticationFailed('Access-Token-Expires-At header value is invalid')
def _get_access_token(self, request):
auth = request.META.get('HTTP_AUTHORIZATION')
if not auth:
raise AuthenticationFailed('Authorization header is expected')
parts = auth.split()
if len(parts) != 2 or parts[0].lower() != 'bearer':
raise AuthenticationFailed("Authorization header must be of form 'Bearer {token}'")
token = parts[1]
return token
def _get_id_token(self, request):
id_token = request.META.get('HTTP_ID_TOKEN')
if not id_token:
raise AuthenticationFailed('Id-Token header is expected')
return id_token
def _get_id_token_expiry(self, user_info):
# `exp` is the expiration of the ID token in seconds since the epoch:
# https://auth0.com/docs/tokens/id-token#id-token-payload
# https://openid.net/specs/openid-connect-core-1_0.html#IDToken
return user_info['exp']
def _get_username_from_userinfo(self, user_info):
"""
Get the user's username from the jwt sub property
"""
subject = user_info['sub']
email = user_info['email']
if "Mozilla-LDAP" in subject:
return "mozilla-ldap/" + email
elif "email" in subject:
return "email/" + email
elif "github" in subject:
return "github/" + email
elif "google" in subject:
return "google/" + email
# Firefox account
elif "oauth2" in subject:
return "oauth2/" + email
else:
raise AuthenticationFailed("Unrecognized identity")
def _get_user_info(self, access_token, id_token):
"""
Extracts the user info payload from the Id Token.
Example return value:
{
"at_hash": "<HASH>",
"aud": "<HASH>",
"email_verified": true,
"email": "[email protected]",
"exp": 1551259495,
"family_name": "Surname",
"given_name": "Firstname",
"https://sso.mozilla.com/claim/groups": [
"all_scm_level_1",
"all_scm_level_2",
"all_scm_level_3",
# ...
],
"iat": 1550654695,
"iss": "https://auth.mozilla.auth0.com/",
"name": "Firstname Surname",
"nickname": "Firstname Surname",
"nonce": "<HASH>",
"picture": "<GRAVATAR_URL>",
"sub": "ad|Mozilla-LDAP|fsurname",
"updated_at": "2019-02-20T09:24:55.449Z",
}
"""
# JWT Validator
# Per https://auth0.com/docs/quickstart/backend/python/01-authorization#create-the-jwt-validation-decorator
try:
unverified_header = jwt.get_unverified_header(id_token)
except jwt.JWTError:
raise AuthError('Unable to decode the Id token header')
if 'kid' not in unverified_header:
raise AuthError('Id token header missing RSA key ID')
rsa_key = None
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
break
if not rsa_key:
raise AuthError('Id token using unrecognised RSA key ID')
try:
# https://python-jose.readthedocs.io/en/latest/jwt/api.html#jose.jwt.decode
user_info = jwt.decode(
id_token,
rsa_key,
algorithms=['RS256'],
audience=AUTH0_CLIENTID,
access_token=access_token,
issuer="https://"+AUTH0_DOMAIN+"/"
)
return user_info
except jwt.ExpiredSignatureError:
raise AuthError('Id token is expired')
except jwt.JWTClaimsError:
raise AuthError("Incorrect claims: please check the audience and issuer")
except jwt.JWTError:
raise AuthError("Invalid header: Unable to parse authentication")
def _calculate_session_expiry(self, request, user_info):
"""Returns the number of seconds after which the Django session should expire."""
access_token_expiry_timestamp = self._get_access_token_expiry(request)
id_token_expiry_timestamp = self._get_id_token_expiry(user_info)
now_in_seconds = int(time.time())
# The session length is set to match whichever token expiration time is closer.
earliest_expiration_timestamp = min(access_token_expiry_timestamp, id_token_expiry_timestamp)
seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds
if seconds_until_expiry <= 0:
raise AuthError('Session expiry time has already passed!')
return seconds_until_expiry
def authenticate(self, request):
access_token = self._get_access_token(request)
id_token = self._get_id_token(request)
user_info = self._get_user_info(access_token, id_token)
username = self._get_username_from_userinfo(user_info)
seconds_until_expiry = self._calculate_session_expiry(request, user_info)
logger.debug('Updating session to expire in %i seconds', seconds_until_expiry)
request.session.set_expiry(seconds_until_expiry)
try:
return User.objects.get(username=username)
except ObjectDoesNotExist:
# The user doesn't already exist, so create it since we allow
# anyone with SSO access to create an account on Treeherder.
logger.debug('Creating new user: %s', username)
return User.objects.create_user(username, email=user_info['email'])
def get_user(self, user_id):
try:
return User._default_manager.get(pk=user_id)
except User.DoesNotExist:
return None
class AuthError(Exception):
pass
| mpl-2.0 |
hesseltuinhof/mxnet | example/rcnn/rcnn/io/rpn.py | 14 | 9509 | """
RPN:
data =
{'data': [num_images, c, h, w],
'im_info': [num_images, 4] (optional)}
label =
{'gt_boxes': [num_boxes, 5] (optional),
'label': [batch_size, 1] <- [batch_size, num_anchors, feat_height, feat_width],
'bbox_target': [batch_size, num_anchors, feat_height, feat_width],
'bbox_weight': [batch_size, num_anchors, feat_height, feat_width]}
"""
import logging
import numpy as np
import numpy.random as npr
from ..logger import logger
from ..config import config
from .image import get_image, tensor_vstack
from ..processing.generate_anchor import generate_anchors
from ..processing.bbox_transform import bbox_overlaps, bbox_transform
def get_rpn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {}
return data, label, im_info
def get_rpn_batch(roidb):
"""
prototype for rpn batch: data, im_info, gt_boxes
:param roidb: ['image', 'flipped'] + ['gt_boxes', 'boxes', 'gt_classes']
:return: data, label
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
# gt boxes: (x1, y1, x2, y2, cls)
if roidb[0]['gt_classes'].size > 0:
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((roidb[0]['boxes'].shape[0], 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
data = {'data': im_array,
'im_info': im_info}
label = {'gt_boxes': gt_boxes}
return data, label
def assign_anchor(feat_shape, gt_boxes, im_info, feat_stride=16,
scales=(8, 16, 32), ratios=(0.5, 1, 2), allowed_border=0):
"""
assign ground truth boxes to anchor positions
:param feat_shape: infer output shape
:param gt_boxes: assign ground truth
:param im_info: filter out anchors overlapped with edges
:param feat_stride: anchor position step
:param scales: used to generate anchors, affects num_anchors (per location)
:param ratios: aspect ratios of generated anchors
:param allowed_border: filter out anchors with edge overlap > allowed_border
:return: dict of label
'label': of shape (batch_size, 1) <- (batch_size, num_anchors, feat_height, feat_width)
'bbox_target': of shape (batch_size, num_anchors * 4, feat_height, feat_width)
'bbox_inside_weight': *todo* mark the assigned anchors
'bbox_outside_weight': used to normalize the bbox_loss, all weights sums to RPN_POSITIVE_WEIGHT
"""
def _unmap(data, count, inds, fill=0):
"""" unmap a subset inds of data into original data of size count """
if len(data.shape) == 1:
ret = np.empty((count,), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
im_info = im_info[0]
scales = np.array(scales, dtype=np.float32)
base_anchors = generate_anchors(base_size=feat_stride, ratios=list(ratios), scales=scales)
num_anchors = base_anchors.shape[0]
feat_height, feat_width = feat_shape[-2:]
logger.debug('anchors: %s' % base_anchors)
logger.debug('anchor shapes: %s' % np.hstack((base_anchors[:, 2::4] - base_anchors[:, 0::4],
base_anchors[:, 3::4] - base_anchors[:, 1::4])))
logger.debug('im_info %s' % im_info)
logger.debug('height %d width %d' % (feat_height, feat_width))
logger.debug('gt_boxes shape %s' % np.array(gt_boxes.shape))
logger.debug('gt_boxes %s' % gt_boxes)
# 1. generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, feat_width) * feat_stride
shift_y = np.arange(0, feat_height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = base_anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where((all_anchors[:, 0] >= -allowed_border) &
(all_anchors[:, 1] >= -allowed_border) &
(all_anchors[:, 2] < im_info[1] + allowed_border) &
(all_anchors[:, 3] < im_info[0] + allowed_border))[0]
logger.debug('total_anchors %d' % total_anchors)
logger.debug('inds_inside %d' % len(inds_inside))
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
logger.debug('anchors shape %s' % np.array(anchors.shape))
# label: 1 is positive, 0 is negative, -1 is dont care
labels = np.empty((len(inds_inside),), dtype=np.float32)
labels.fill(-1)
if gt_boxes.size > 0:
# overlap between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(anchors.astype(np.float), gt_boxes.astype(np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps, np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IoU
labels[max_overlaps >= config.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if config.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < config.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
else:
labels[:] = 0
# subsample positive labels if we have too many
num_fg = int(config.TRAIN.RPN_FG_FRACTION * config.TRAIN.RPN_BATCH_SIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = npr.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
if logger.level == logging.INFO:
disable_inds = fg_inds[:(len(fg_inds) - num_fg)]
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = config.TRAIN.RPN_BATCH_SIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = npr.choice(bg_inds, size=(len(bg_inds) - num_bg), replace=False)
if logger.level == logging.INFO:
disable_inds = bg_inds[:(len(bg_inds) - num_bg)]
labels[disable_inds] = -1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
if gt_boxes.size > 0:
bbox_targets[:] = bbox_transform(anchors, gt_boxes[argmax_overlaps, :4])
bbox_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_weights[labels == 1, :] = np.array(config.TRAIN.RPN_BBOX_WEIGHTS)
if logger.level == logging.DEBUG:
_sums = bbox_targets[labels == 1, :].sum(axis=0)
_squared_sums = (bbox_targets[labels == 1, :] ** 2).sum(axis=0)
_counts = np.sum(labels == 1)
means = _sums / (_counts + 1e-14)
stds = np.sqrt(_squared_sums / _counts - means ** 2)
logger.debug('means %s' % means)
logger.debug('stdevs %s' % stds)
# map up to original set of anchors
labels = _unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_weights = _unmap(bbox_weights, total_anchors, inds_inside, fill=0)
if logger.level == logging.DEBUG:
if gt_boxes.size > 0:
logger.debug('rpn: max max_overlaps %f' % np.max(max_overlaps))
logger.debug('rpn: num_positives %f' % np.sum(labels == 1))
logger.debug('rpn: num_negatives %f' % np.sum(labels == 0))
_fg_sum = np.sum(labels == 1)
_bg_sum = np.sum(labels == 0)
_count = 1
logger.debug('rpn: num_positive avg %f' % (_fg_sum / _count))
logger.debug('rpn: num_negative avg %f' % (_bg_sum / _count))
labels = labels.reshape((1, feat_height, feat_width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, A * feat_height * feat_width))
bbox_targets = bbox_targets.reshape((1, feat_height, feat_width, A * 4)).transpose(0, 3, 1, 2)
bbox_weights = bbox_weights.reshape((1, feat_height, feat_width, A * 4)).transpose((0, 3, 1, 2))
label = {'label': labels,
'bbox_target': bbox_targets,
'bbox_weight': bbox_weights}
return label
| apache-2.0 |
thinkerou/grpc | examples/python/interceptors/headers/greeter_server.py | 13 | 1638 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter server."""
from concurrent import futures
import time
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
from request_header_validator_interceptor import RequestHeaderValidatorInterceptor
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
def serve():
header_validator = RequestHeaderValidatorInterceptor(
'one-time-password', '42', grpc.StatusCode.UNAUTHENTICATED,
'Access denied!')
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
interceptors=(header_validator,))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| apache-2.0 |
rvalyi/OpenUpgrade | addons/mail/wizard/mail_compose_message.py | 32 | 19668 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import re
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
# main mako-like expression pattern
EXPRESSION_PATTERN = re.compile('(\$\{.+?\})')
class mail_compose_message(osv.TransientModel):
""" Generic message composition wizard. You may inherit from this wizard
at model and view levels to provide specific features.
The behavior of the wizard depends on the composition_mode field:
- 'comment': post on a record. The wizard is pre-populated via ``get_record_data``
- 'mass_mail': wizard in mass mailing mode where the mail details can
contain template placeholders that will be merged with actual data
before being sent to each recipient.
"""
_name = 'mail.compose.message'
_inherit = 'mail.message'
_description = 'Email composition wizard'
_log_access = True
_batch_size = 500
def default_get(self, cr, uid, fields, context=None):
""" Handle composition mode. Some details about context keys:
- comment: default mode, model and ID of a record the user comments
- default_model or active_model
- default_res_id or active_id
- reply: active_id of a message the user replies to
- default_parent_id or message_id or active_id: ID of the
mail.message we reply to
- message.res_model or default_model
- message.res_id or default_res_id
- mass_mail: model and IDs of records the user mass-mails
- active_ids: record IDs
- default_model or active_model
"""
if context is None:
context = {}
result = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
# v6.1 compatibility mode
result['composition_mode'] = result.get('composition_mode', context.get('mail.compose.message.mode'))
result['model'] = result.get('model', context.get('active_model'))
result['res_id'] = result.get('res_id', context.get('active_id'))
result['parent_id'] = result.get('parent_id', context.get('message_id'))
# default values according to composition mode - NOTE: reply is deprecated, fall back on comment
if result['composition_mode'] == 'reply':
result['composition_mode'] = 'comment'
vals = {}
if 'active_domain' in context: # not context.get() because we want to keep global [] domains
vals['use_active_domain'] = True
vals['active_domain'] = '%s' % context.get('active_domain')
if result['composition_mode'] == 'comment':
vals.update(self.get_record_data(cr, uid, result, context=context))
for field in vals:
if field in fields:
result[field] = vals[field]
# TDE HACK: as mailboxes used default_model='res.users' and default_res_id=uid
# (because of lack of an accessible pid), creating a message on its own
# profile may crash (res_users does not allow writing on it)
# Posting on its own profile works (res_users redirect to res_partner)
# but when creating the mail.message to create the mail.compose.message
# access rights issues may rise
# We therefore directly change the model and res_id
if result['model'] == 'res.users' and result['res_id'] == uid:
result['model'] = 'res.partner'
result['res_id'] = self.pool.get('res.users').browse(cr, uid, uid).partner_id.id
return result
def _get_composition_mode_selection(self, cr, uid, context=None):
return [('comment', 'Post on a document'),
('mass_mail', 'Email Mass Mailing'),
('mass_post', 'Post on Multiple Documents')]
_columns = {
'composition_mode': fields.selection(
lambda s, *a, **k: s._get_composition_mode_selection(*a, **k),
string='Composition mode'),
'partner_ids': fields.many2many('res.partner',
'mail_compose_message_res_partner_rel',
'wizard_id', 'partner_id', 'Additional Contacts'),
'use_active_domain': fields.boolean('Use active domain'),
'active_domain': fields.char('Active domain', readonly=True),
'attachment_ids': fields.many2many('ir.attachment',
'mail_compose_message_ir_attachments_rel',
'wizard_id', 'attachment_id', 'Attachments'),
'is_log': fields.boolean('Log an Internal Note',
help='Whether the message is an internal note (comment mode only)'),
# mass mode options
'notify': fields.boolean('Notify followers',
help='Notify followers of the document (mass post only)'),
'same_thread': fields.boolean('Replies in the document',
help='Replies to the messages will go into the selected document (mass mail only)'),
}
#TODO change same_thread to False in trunk (Require view update)
_defaults = {
'composition_mode': 'comment',
'body': lambda self, cr, uid, ctx={}: '',
'subject': lambda self, cr, uid, ctx={}: False,
'partner_ids': lambda self, cr, uid, ctx={}: [],
'same_thread': True,
}
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Access rules of mail.compose.message:
- create: if
- model, no res_id, I create a message in mass mail mode
- then: fall back on mail.message acces rules
"""
if isinstance(ids, (int, long)):
ids = [ids]
# Author condition (CREATE (mass_mail))
if operation == 'create' and uid != SUPERUSER_ID:
# read mail_compose_message.ids to have their values
message_values = {}
cr.execute('SELECT DISTINCT id, model, res_id FROM "%s" WHERE id = ANY (%%s) AND res_id = 0' % self._table, (ids,))
for id, rmod, rid in cr.fetchall():
message_values[id] = {'model': rmod, 'res_id': rid}
# remove from the set to check the ids that mail_compose_message accepts
author_ids = [mid for mid, message in message_values.iteritems()
if message.get('model') and not message.get('res_id')]
ids = list(set(ids) - set(author_ids))
return super(mail_compose_message, self).check_access_rule(cr, uid, ids, operation, context=context)
def _notify(self, cr, uid, newid, context=None, force_send=False, user_signature=True):
""" Override specific notify method of mail.message, because we do
not want that feature in the wizard. """
return
def get_record_data(self, cr, uid, values, context=None):
""" Returns a defaults-like dict with initial values for the composition
wizard when sending an email related a previous email (parent_id) or
a document (model, res_id). This is based on previously computed default
values. """
if context is None:
context = {}
result, subject = {}, False
if values.get('parent_id'):
parent = self.pool.get('mail.message').browse(cr, uid, values.get('parent_id'), context=context)
result['record_name'] = parent.record_name,
subject = tools.ustr(parent.subject or parent.record_name or '')
if not values.get('model'):
result['model'] = parent.model
if not values.get('res_id'):
result['res_id'] = parent.res_id
partner_ids = values.get('partner_ids', list()) + [partner.id for partner in parent.partner_ids]
if context.get('is_private') and parent.author_id: # check message is private then add author also in partner list.
partner_ids += [parent.author_id.id]
result['partner_ids'] = partner_ids
elif values.get('model') and values.get('res_id'):
doc_name_get = self.pool[values.get('model')].name_get(cr, uid, [values.get('res_id')], context=context)
result['record_name'] = doc_name_get and doc_name_get[0][1] or ''
subject = tools.ustr(result['record_name'])
re_prefix = _('Re:')
if subject and not (subject.startswith('Re:') or subject.startswith(re_prefix)):
subject = "%s %s" % (re_prefix, subject)
result['subject'] = subject
return result
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def send_mail(self, cr, uid, ids, context=None):
""" Process the wizard content and proceed with sending the related
email(s), rendering any template patterns on the fly if needed. """
if context is None:
context = {}
# clean the context (hint: mass mailing sets some default values that
# could be wrongly interpreted by mail_mail)
context.pop('default_email_to', None)
context.pop('default_partner_ids', None)
for wizard in self.browse(cr, uid, ids, context=context):
mass_mode = wizard.composition_mode in ('mass_mail', 'mass_post')
active_model_pool = self.pool[wizard.model if wizard.model else 'mail.thread']
if not hasattr(active_model_pool, 'message_post'):
context['thread_model'] = wizard.model
active_model_pool = self.pool['mail.thread']
# wizard works in batch mode: [res_id] or active_ids or active_domain
if mass_mode and wizard.use_active_domain and wizard.model:
res_ids = self.pool[wizard.model].search(cr, uid, eval(wizard.active_domain), context=context)
elif mass_mode and wizard.model and context.get('active_ids'):
res_ids = context['active_ids']
else:
res_ids = [wizard.res_id]
sliced_res_ids = [res_ids[i:i + self._batch_size] for i in range(0, len(res_ids), self._batch_size)]
for res_ids in sliced_res_ids:
all_mail_values = self.get_mail_values(cr, uid, wizard, res_ids, context=context)
for res_id, mail_values in all_mail_values.iteritems():
if wizard.composition_mode == 'mass_mail':
self.pool['mail.mail'].create(cr, uid, mail_values, context=context)
else:
subtype = 'mail.mt_comment'
if context.get('mail_compose_log') or (wizard.composition_mode == 'mass_post' and not wizard.notify): # log a note: subtype is False
subtype = False
if wizard.composition_mode == 'mass_post':
context = dict(context,
mail_notify_force_send=False, # do not send emails directly but use the queue instead
mail_create_nosubscribe=True) # add context key to avoid subscribing the author
active_model_pool.message_post(cr, uid, [res_id], type='comment', subtype=subtype, context=context, **mail_values)
return {'type': 'ir.actions.act_window_close'}
def get_mail_values(self, cr, uid, wizard, res_ids, context=None):
"""Generate the values that will be used by send_mail to create mail_messages
or mail_mails. """
results = dict.fromkeys(res_ids, False)
rendered_values, default_recipients = {}, {}
mass_mail_mode = wizard.composition_mode == 'mass_mail'
# render all template-based value at once
if mass_mail_mode and wizard.model:
rendered_values = self.render_message_batch(cr, uid, wizard, res_ids, context=context)
for res_id in res_ids:
# static wizard (mail.message) values
mail_values = {
'subject': wizard.subject,
'body': wizard.body,
'parent_id': wizard.parent_id and wizard.parent_id.id,
'partner_ids': [partner.id for partner in wizard.partner_ids],
'attachment_ids': [attach.id for attach in wizard.attachment_ids],
'author_id': wizard.author_id.id,
'email_from': wizard.email_from,
'record_name': wizard.record_name,
}
# mass mailing: rendering override wizard static values
if mass_mail_mode and wizard.model:
# always keep a copy, reset record name (avoid browsing records)
mail_values.update(notification=True, model=wizard.model, res_id=res_id, record_name=False)
# auto deletion of mail_mail
if 'mail_auto_delete' in context:
mail_values['auto_delete'] = context.get('mail_auto_delete')
# rendered values using template
email_dict = rendered_values[res_id]
mail_values['partner_ids'] += email_dict.pop('partner_ids', [])
mail_values.update(email_dict)
if wizard.same_thread:
mail_values.pop('reply_to')
elif not mail_values.get('reply_to'):
mail_values['reply_to'] = mail_values['email_from']
# mail_mail values: body -> body_html, partner_ids -> recipient_ids
mail_values['body_html'] = mail_values.get('body', '')
mail_values['recipient_ids'] = [(4, id) for id in mail_values.pop('partner_ids', [])]
# process attachments: should not be encoded before being processed by message_post / mail_mail create
mail_values['attachments'] = [(name, base64.b64decode(enc_cont)) for name, enc_cont in email_dict.pop('attachments', list())]
attachment_ids = []
for attach_id in mail_values.pop('attachment_ids'):
new_attach_id = self.pool.get('ir.attachment').copy(cr, uid, attach_id, {'res_model': self._name, 'res_id': wizard.id}, context=context)
attachment_ids.append(new_attach_id)
mail_values['attachment_ids'] = self.pool['mail.thread']._message_preprocess_attachments(
cr, uid, mail_values.pop('attachments', []),
attachment_ids, 'mail.message', 0, context=context)
results[res_id] = mail_values
return results
#------------------------------------------------------
# Template rendering
#------------------------------------------------------
def render_message_batch(self, cr, uid, wizard, res_ids, context=None):
"""Generate template-based values of wizard, for the document records given
by res_ids. This method is meant to be inherited by email_template that
will produce a more complete dictionary, using Jinja2 templates.
Each template is generated for all res_ids, allowing to parse the template
once, and render it multiple times. This is useful for mass mailing where
template rendering represent a significant part of the process.
Default recipients are also computed, based on mail_thread method
message_get_default_recipients. This allows to ensure a mass mailing has
always some recipients specified.
:param browse wizard: current mail.compose.message browse record
:param list res_ids: list of record ids
:return dict results: for each res_id, the generated template values for
subject, body, email_from and reply_to
"""
subjects = self.render_template_batch(cr, uid, wizard.subject, wizard.model, res_ids, context=context)
bodies = self.render_template_batch(cr, uid, wizard.body, wizard.model, res_ids, context=context, post_process=True)
emails_from = self.render_template_batch(cr, uid, wizard.email_from, wizard.model, res_ids, context=context)
replies_to = self.render_template_batch(cr, uid, wizard.reply_to, wizard.model, res_ids, context=context)
ctx = dict(context, thread_model=wizard.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
results = dict.fromkeys(res_ids, False)
for res_id in res_ids:
results[res_id] = {
'subject': subjects[res_id],
'body': bodies[res_id],
'email_from': emails_from[res_id],
'reply_to': replies_to[res_id],
}
results[res_id].update(default_recipients.get(res_id, dict()))
return results
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
""" Render the given template text, replace mako-like expressions ``${expr}``
with the result of evaluating these expressions with an evaluation context
containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to
:param list res_ids: list of record ids
"""
if context is None:
context = {}
results = dict.fromkeys(res_ids, False)
for res_id in res_ids:
def merge(match):
exp = str(match.group()[2:-1]).strip()
result = eval(exp, {
'user': self.pool.get('res.users').browse(cr, uid, uid, context=context),
'object': self.pool[model].browse(cr, uid, res_id, context=context),
'context': dict(context), # copy context to prevent side-effects of eval
})
return result and tools.ustr(result) or ''
results[res_id] = template and EXPRESSION_PATTERN.sub(merge, template)
return results
# Compatibility methods
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def render_message(self, cr, uid, wizard, res_id, context=None):
return self.render_message_batch(cr, uid, wizard, [res_id], context)[res_id]
| agpl-3.0 |
nervenXC/topical_word_embeddings | TWE-2/gensim/models/lsimodel.py | 12 | 34640 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Analysis (aka Latent Semantic Indexing) in Python.
Implements scalable truncated Singular Value Decomposition in Python. The SVD
decomposition can be updated with new observations at any time (online, incremental,
memory-efficient training).
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size (though still dependent on the feature set size)
* corpora that are streamed: documents are only accessed sequentially, no
random-access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import numpy
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
from six import iterkeys
from six.moves import xrange
logger = logging.getLogger('gensim.models.lsimodel')
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
(k, 100 * rel_spectrum[k - 1]))
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order" % (a.shape, name))
a = numpy.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order" % (a.shape, name))
a = numpy.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix" % str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s**2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = numpy.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= numpy.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
[matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix" % str(k.shape))
try:
# in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of numpy, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/[email protected]/msg07224.html and
# bug ticket http://projects.scipy.org/numpy/ticket/706
# sdoering: replaced numpy's linalg.svd with scipy's linalg.svd:
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :( //sdoering: maybe there is one in scipy?
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = scipy.linalg.svd(numpy.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k**2, self.k)
u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = numpy.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = numpy.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in xrange(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = numpy.dot(self.u.T, self.u) - numpy.eye(self.u.shape[1])
# logger.info('orth error after=%f' % numpy.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed. See also FAQ [2]_.
Model persistency is achieved via its load/save methods.
.. [2] https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [3]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print(lsi[doc_tfidf]) # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print(lsi[doc_tfidf])
.. [3] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
dispatcher._pyroOneway.add("exit")
logger.debug("looking for dispatcher at %s" % str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)" % err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers' % self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i" % chunk_no)
self.dispatcher.putjob(job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s" % doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s" % doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents" % (corpus.shape[1]))
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse), but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = numpy.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use numpy's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = numpy.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = numpy.dot(u.take(indices, axis=0).T, numpy.array(data, dtype=u.dtype))
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a numpy array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist.flat)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as a string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.show_topic(10, topn=5)
[(-0.340, "category"), (0.298, "$M$"), (0.183, "algebra"), (-0.174, "functor"), (-0.168, "operator")]
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = numpy.asarray(self.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = numpy.abs(c).argsort()[::-1][:topn]
return [(1.0 * c[val] / norm, self.id2word[val]) for val in most]
def print_topic(self, topicno, topn=10):
"""
Return a single topic as a formatted string. See `show_topic()` for parameters.
>>> lsimodel.print_topic(10, topn=5)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
return ' + '.join(['%.3f*"%s"' % v for v in self.show_topic(topicno, topn)])
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Return `num_topics` most significant topics (return all by default).
For each topic, show `num_words` most significant words (10 words by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of (weight, word) 2-tuples if False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in xrange(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i(%.3f): %s" %
(i, self.projection.s[i],
topic))
return shown
def print_topics(self, num_topics=5, num_words=10):
"""Alias for `show_topics()` which prints the top 5 topics to log."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words)
def save(self, fname, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
"""
if self.projection is not None:
self.projection.save(fname + '.projection', *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays are mmap'ed back as read-only (shared memory).
"""
kwargs['mmap'] = kwargs.get('mmap', 'r')
result = super(LsiModel, cls).load(fname, *args, **kwargs)
try:
result.projection = super(LsiModel, cls).load(fname + '.projection', *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s" % (fname + '.state', e))
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics' % len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = numpy.abs(numpy.asarray(uvec).flatten())
udiff = uvec / numpy.sqrt(numpy.sum(numpy.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words" % (num_words, num_neg))
for topic in sorted(iterkeys(result)):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s' % (topic, s[topic], ', '.join(pos), ', '.join(neg)))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=numpy.float64, eps=1e-6):
"""
Run truncated Singular Value Decomposition (SVD) on a sparse input.
Return (U, S): the left singular vectors and the singular values of the input
data stream `corpus` [4]_. The corpus may be larger than RAM (iterator of vectors).
This may return less than the requested number of top `rank` factors, in case
the input itself is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the input data. In case you can only
afford a single pass, set `onepass=True` in :class:`LsiModel` and avoid using
this function directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [4] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix" % str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix" % str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations" % power_iters)
for power_iter in xrange(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in xrange(power_iters):
logger.info("running power iteration #%i" % (power_iter + 1))
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
u, s, vt = scipy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s**2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = numpy.dot(q, u)
return u.astype(dtype), s.astype(dtype)
| mit |
resmo/ansible | test/units/modules/network/fortios/test_fortios_system_geoip_override.py | 21 | 8183 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_geoip_override
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_geoip_override.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_geoip_override_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_geoip_override': {
'country_id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_geoip_override.fortios_system(input_data, fos_instance)
expected_data = {
'country-id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('system', 'geoip-override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_geoip_override_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_geoip_override': {
'country_id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_geoip_override.fortios_system(input_data, fos_instance)
expected_data = {
'country-id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('system', 'geoip-override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_geoip_override_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_geoip_override': {
'country_id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_geoip_override.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'geoip-override', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_geoip_override_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_geoip_override': {
'country_id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_geoip_override.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'geoip-override', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_geoip_override_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_geoip_override': {
'country_id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_geoip_override.fortios_system(input_data, fos_instance)
expected_data = {
'country-id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('system', 'geoip-override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_geoip_override_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_geoip_override': {
'random_attribute_not_valid': 'tag',
'country_id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_geoip_override.fortios_system(input_data, fos_instance)
expected_data = {
'country-id': 'test_value_3',
'description': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('system', 'geoip-override', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
qrkourier/ansible | lib/ansible/modules/network/dellos9/dellos9_command.py | 27 | 7356 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <[email protected]>
# Copyright (c) 2016 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos9_command
version_added: "2.2"
author: "Dhivya P (@dhivyap)"
short_description: Run commands on remote devices running Dell OS9
description:
- Sends arbitrary commands to a Dell OS9 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos9_config) to configure Dell OS9 devices.
extends_documentation_fragment: dellos9
options:
commands:
description:
- List of commands to send to the remote dellos9 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
required: false
default: null
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
notes:
- This module requires Dell OS9 version 9.10.0.1P13 or above.
- This module requires to increase the ssh connection rate limit.
Use the following command I(ip ssh connection-rate-limit 60)
to configure the same. This can be done via M(dellos9_config) module
as well.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
tasks:
- name: run show version on remote devices
dellos9_command:
commands: show version
provider: "{{ cli }}"
- name: run show version and check to see if output contains OS9
dellos9_command:
commands: show version
wait_for: result[0] contains OS9
provider: "{{ cli }}"
- name: run multiple commands on remote nodes
dellos9_command:
commands:
- show version
- show interfaces
provider: "{{ cli }}"
- name: run multiple commands and evaluate the output
dellos9_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains OS9
- result[1] contains Loopback
provider: "{{ cli }}"
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.dellos9 import run_commands
from ansible.module_utils.dellos9 import dellos9_argument_spec, check_args
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos9_command does not support running config mode '
'commands. Please use dellos9_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos9_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
signalfx/Diamond | src/collectors/xen_collector/test/testxen.py | 29 | 2970 | #!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from xen_collector import XENCollector
###############################################################################
def run_only_if_libvirt_is_available(func):
try:
import libvirt
except ImportError:
libvirt = None
pred = lambda: libvirt is not None
return run_only(func, pred)
class TestXENCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('XENCollector', {
})
self.collector = XENCollector(config, None)
def test_import(self):
self.assertTrue(XENCollector)
@run_only_if_libvirt_is_available
@patch('os.statvfs')
@patch('libvirt.openReadOnly')
@patch.object(Collector, 'publish')
def test_centos6(self, publish_mock, libvirt_mock, os_mock):
class info:
def __init__(self, id):
self.id = id
def info(self):
if self.id == 0:
return [1, 49420888L, 49420888L, 8, 911232000000000L]
if self.id == 1:
return [1, 2097152L, 2097152L, 2, 310676150000000L]
if self.id == 2:
return [1, 2097152L, 2097152L, 2, 100375300000000L]
if self.id == 3:
return [1, 10485760L, 10485760L, 2, 335312040000000L]
if self.id == 4:
return [1, 10485760L, 10485760L, 2, 351313480000000L]
libvirt_m = Mock()
libvirt_m.getInfo.return_value = ['x86_64', 48262, 8, 1200, 2, 1, 4, 1]
libvirt_m.listDomainsID.return_value = [0, 2, 1, 4, 3]
def lookupByIdMock(id):
lookup = info(id)
return lookup
libvirt_m.lookupByID = lookupByIdMock
libvirt_mock.return_value = libvirt_m
statsvfs_mock = Mock()
statsvfs_mock.f_bavail = 74492145
statsvfs_mock.f_frsize = 4096
os_mock.return_value = statsvfs_mock
self.collector.collect()
metrics = {
'TotalCores': 8.000000,
'InstalledMem': 48262.000000,
'MemAllocated': 24576.000000,
'MemFree': 23686.000000,
'DiskFree': 297968580.000000,
'FreeCores': 0.000000,
'AllocatedCores': 8.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == "__main__":
unittest.main()
| mit |
dean/deliveru | deliveru/settings.py | 1 | 2173 | """
Django settings for deliveru project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h@c&fv&@q0f@5*b7sn0&=go#3zs07nm-o)2bic66b()nmds%s!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'deliveru',
'deliveru.user_profile',
'deliveru.order',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'deliveru.urls'
WSGI_APPLICATION = 'deliveru.wsgi.application'
AUTH_PROFILE_MODULE = 'deliveru.user_profile.UserProfile'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit |
allenlavoie/tensorflow | tensorflow/python/kernel_tests/summary_tensor_op_test.py | 43 | 5972 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BAvSIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import summary_ops
from tensorflow.python.platform import test
class SummaryOpsTest(test.TestCase):
def _SummarySingleValue(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
def _AssertNumpyEq(self, actual, expected):
self.assertTrue(np.array_equal(actual, expected))
def testTags(self):
with self.test_session() as sess:
c = constant_op.constant(1)
s1 = summary_ops.tensor_summary("s1", c)
with ops.name_scope("foo"):
s2 = summary_ops.tensor_summary("s2", c)
with ops.name_scope("zod"):
s3 = summary_ops.tensor_summary("s3", c)
s4 = summary_ops.tensor_summary("TensorSummary", c)
summ1, summ2, summ3, summ4 = sess.run([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
self.assertEqual(v1.tag, "s1")
v2 = self._SummarySingleValue(summ2)
self.assertEqual(v2.tag, "foo/s2")
v3 = self._SummarySingleValue(summ3)
self.assertEqual(v3.tag, "foo/zod/s3")
v4 = self._SummarySingleValue(summ4)
self.assertEqual(v4.tag, "foo/zod/TensorSummary")
def testScalarSummary(self):
with self.test_session() as sess:
const = constant_op.constant(10.0)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, 10)
def testStringSummary(self):
s = six.b("foobar")
with self.test_session() as sess:
const = constant_op.constant(s)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, s)
def testManyScalarSummary(self):
with self.test_session() as sess:
const = array_ops.ones([5, 5, 5])
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, np.ones([5, 5, 5]))
def testManyStringSummary(self):
strings = [[six.b("foo bar"), six.b("baz")], [six.b("zoink"), six.b("zod")]]
with self.test_session() as sess:
const = constant_op.constant(strings)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, strings)
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.test_session() as sess:
const = constant_op.constant(bools)
summ = summary_ops.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, bools)
def testSummaryDescriptionAndDisplayName(self):
with self.test_session() as sess:
def get_description(summary_op):
summ_str = sess.run(summary_op)
summ = summary_pb2.Summary()
summ.ParseFromString(summ_str)
return summ.value[0].metadata
const = constant_op.constant(1)
# Default case; no description or display name
simple_summary = summary_ops.tensor_summary("simple", const)
descr = get_description(simple_summary)
self.assertEqual(descr.display_name, "")
self.assertEqual(descr.summary_description, "")
# Values are provided via function args
with_values = summary_ops.tensor_summary(
"simple",
const,
display_name="my name",
summary_description="my description")
descr = get_description(with_values)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# Values are provided via the SummaryMetadata arg
metadata = summary_pb2.SummaryMetadata()
metadata.display_name = "my name"
metadata.summary_description = "my description"
with_metadata = summary_ops.tensor_summary(
"simple", const, summary_metadata=metadata)
descr = get_description(with_metadata)
self.assertEqual(descr.display_name, "my name")
self.assertEqual(descr.summary_description, "my description")
# If both SummaryMetadata and explicit args are provided, the args win
overwrite = summary_ops.tensor_summary(
"simple",
const,
summary_metadata=metadata,
display_name="overwritten",
summary_description="overwritten")
descr = get_description(overwrite)
self.assertEqual(descr.display_name, "overwritten")
self.assertEqual(descr.summary_description, "overwritten")
if __name__ == "__main__":
test.main()
| apache-2.0 |
rkokkelk/CouchPotatoServer | libs/dateutil/relativedelta.py | 216 | 17224 | """
Copyright (c) 2003-2010 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
import datetime
import calendar
from six import integer_types
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __add__(self, other):
if isinstance(other, relativedelta):
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.microsecond or self.microsecond)
if not isinstance(other, datetime.date):
raise TypeError("unsupported type for add operation")
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError("unsupported type for sub operation")
return relativedelta(years=self.years-other.years,
months=self.months-other.months,
days=self.days-other.days,
hours=self.hours-other.hours,
minutes=self.minutes-other.minutes,
seconds=self.seconds-other.seconds,
microseconds=self.microseconds-other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=self.year or other.year,
month=self.month or other.month,
day=self.day or other.day,
weekday=self.weekday or other.weekday,
hour=self.hour or other.hour,
minute=self.minute or other.minute,
second=self.second or other.second,
microsecond=self.microsecond or other.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=int(self.years*f),
months=int(self.months*f),
days=int(self.days*f),
hours=int(self.hours*f),
minutes=int(self.minutes*f),
seconds=int(self.seconds*f),
microseconds=int(self.microseconds*f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| gpl-3.0 |
vladikoff/fxa-mochitest | tests/venv/lib/python2.7/site-packages/setuptools/tests/test_sandbox.py | 342 | 2170 | """develop tests
"""
import sys
import os
import shutil
import unittest
import tempfile
import types
import pkg_resources
import setuptools.sandbox
from setuptools.sandbox import DirectorySandbox, SandboxViolation
def has_win32com():
"""
Run this to determine if the local machine has win32com, and if it
does, include additional tests.
"""
if not sys.platform.startswith('win32'):
return False
try:
mod = __import__('win32com')
except ImportError:
return False
return True
class TestSandbox(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dir)
def test_devnull(self):
if sys.version < '2.4':
return
sandbox = DirectorySandbox(self.dir)
sandbox.run(self._file_writer(os.devnull))
def _file_writer(path):
def do_write():
f = open(path, 'w')
f.write('xxx')
f.close()
return do_write
_file_writer = staticmethod(_file_writer)
if has_win32com():
def test_win32com(self):
"""
win32com should not be prevented from caching COM interfaces
in gen_py.
"""
import win32com
gen_py = win32com.__gen_path__
target = os.path.join(gen_py, 'test_write')
sandbox = DirectorySandbox(self.dir)
try:
try:
sandbox.run(self._file_writer(target))
except SandboxViolation:
self.fail("Could not create gen_py file due to SandboxViolation")
finally:
if os.path.exists(target): os.remove(target)
def test_setup_py_with_BOM(self):
"""
It should be possible to execute a setup.py with a Byte Order Mark
"""
target = pkg_resources.resource_filename(__name__,
'script-with-bom.py')
namespace = types.ModuleType('namespace')
setuptools.sandbox.execfile(target, vars(namespace))
assert namespace.result == 'passed'
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
alfonsokim/nupic | examples/opf/experiments/anomaly/spatial/10field_few_skewed/description.py | 20 | 16550 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/experiment_generator.py'
"""
from nupic.frameworks.opf.exp_description_api import ExperimentDescriptionAPI
from nupic.frameworks.opf.exp_description_helpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.htm_prediction_model_callbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opf_utils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opf_task_driver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21),
'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21),
'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21),
'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21),
'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21),
'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21),
'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21),
'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TM and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'py',
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opf_task_driver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [htmPredictionModelControlEnableSPLearningCb, htmPredictionModelControlEnableTPLearningCb],
# 'setup' : [htmPredictionModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
ZenithDK/mopidy | tests/mpd/protocol/test_regression.py | 4 | 7958 | from __future__ import absolute_import, unicode_literals
import random
import mock
from mopidy.models import Playlist, Ref, Track
from mopidy.mpd.protocol import stored_playlists
from tests.mpd import protocol
class IssueGH17RegressionTest(protocol.BaseTestCase):
"""
The issue: http://github.com/mopidy/mopidy/issues/17
How to reproduce:
- Play a playlist where one track cannot be played
- Turn on random mode
- Press next until you get to the unplayable track
"""
def test(self):
tracks = [
Track(uri='dummy:a'),
Track(uri='dummy:b'),
Track(uri='dummy:error'),
Track(uri='dummy:d'),
Track(uri='dummy:e'),
Track(uri='dummy:f'),
]
self.audio.trigger_fake_playback_failure('dummy:error')
self.backend.library.dummy_library = tracks
self.core.tracklist.add(uris=[t.uri for t in tracks]).get()
random.seed(1) # Playlist order: abcfde
self.send_request('play')
self.assertEqual(
'dummy:a', self.core.playback.current_track.get().uri)
self.send_request('random "1"')
self.send_request('next')
self.assertEqual(
'dummy:b', self.core.playback.current_track.get().uri)
self.send_request('next')
# Should now be at track 'c', but playback fails and it skips ahead
self.assertEqual(
'dummy:f', self.core.playback.current_track.get().uri)
self.send_request('next')
self.assertEqual(
'dummy:d', self.core.playback.current_track.get().uri)
self.send_request('next')
self.assertEqual(
'dummy:e', self.core.playback.current_track.get().uri)
class IssueGH18RegressionTest(protocol.BaseTestCase):
"""
The issue: http://github.com/mopidy/mopidy/issues/18
How to reproduce:
Play, random on, next, random off, next, next.
At this point it gives the same song over and over.
"""
def test(self):
tracks = [
Track(uri='dummy:a'), Track(uri='dummy:b'), Track(uri='dummy:c'),
Track(uri='dummy:d'), Track(uri='dummy:e'), Track(uri='dummy:f'),
]
self.backend.library.dummy_library = tracks
self.core.tracklist.add(uris=[t.uri for t in tracks]).get()
random.seed(1)
self.send_request('play')
self.send_request('random "1"')
self.send_request('next')
self.send_request('random "0"')
self.send_request('next')
self.send_request('next')
tl_track_1 = self.core.playback.current_tl_track.get()
self.send_request('next')
tl_track_2 = self.core.playback.current_tl_track.get()
self.send_request('next')
tl_track_3 = self.core.playback.current_tl_track.get()
self.assertNotEqual(tl_track_1, tl_track_2)
self.assertNotEqual(tl_track_2, tl_track_3)
class IssueGH22RegressionTest(protocol.BaseTestCase):
"""
The issue: http://github.com/mopidy/mopidy/issues/22
How to reproduce:
Play, random on, remove all tracks from the current playlist (as in
"delete" each one, not "clear").
Alternatively: Play, random on, remove a random track from the current
playlist, press next until it crashes.
"""
def test(self):
tracks = [
Track(uri='dummy:a'), Track(uri='dummy:b'), Track(uri='dummy:c'),
Track(uri='dummy:d'), Track(uri='dummy:e'), Track(uri='dummy:f'),
]
self.backend.library.dummy_library = tracks
self.core.tracklist.add(uris=[t.uri for t in tracks]).get()
random.seed(1)
self.send_request('play')
self.send_request('random "1"')
self.send_request('deleteid "1"')
self.send_request('deleteid "2"')
self.send_request('deleteid "3"')
self.send_request('deleteid "4"')
self.send_request('deleteid "5"')
self.send_request('deleteid "6"')
self.send_request('status')
class IssueGH69RegressionTest(protocol.BaseTestCase):
"""
The issue: https://github.com/mopidy/mopidy/issues/69
How to reproduce:
Play track, stop, clear current playlist, load a new playlist, status.
The status response now contains "song: None".
"""
def test(self):
self.core.playlists.create('foo')
tracks = [
Track(uri='dummy:a'), Track(uri='dummy:b'), Track(uri='dummy:c'),
Track(uri='dummy:d'), Track(uri='dummy:e'), Track(uri='dummy:f'),
]
self.backend.library.dummy_library = tracks
self.core.tracklist.add(uris=[t.uri for t in tracks]).get()
self.send_request('play')
self.send_request('stop')
self.send_request('clear')
self.send_request('load "foo"')
self.assertNotInResponse('song: None')
class IssueGH113RegressionTest(protocol.BaseTestCase):
"""
The issue: https://github.com/mopidy/mopidy/issues/113
How to reproduce:
- Have a playlist with a name contining backslashes, like
"all lart spotify:track:\w\{22\} pastes".
- Try to load the playlist with the backslashes in the playlist name
escaped.
"""
def test(self):
self.core.playlists.create(
u'all lart spotify:track:\w\{22\} pastes')
self.send_request('lsinfo "/"')
self.assertInResponse(
u'playlist: all lart spotify:track:\w\{22\} pastes')
self.send_request(
r'listplaylistinfo "all lart spotify:track:\\w\\{22\\} pastes"')
self.assertInResponse('OK')
class IssueGH137RegressionTest(protocol.BaseTestCase):
"""
The issue: https://github.com/mopidy/mopidy/issues/137
How to reproduce:
- Send "list" query with mismatching quotes
"""
def test(self):
self.send_request(
u'list Date Artist "Anita Ward" '
u'Album "This Is Remixed Hits - Mashups & Rare 12" Mixes"')
self.assertInResponse('ACK [2@0] {list} Invalid unquoted character')
class IssueGH1120RegressionTest(protocol.BaseTestCase):
"""
The issue: https://github.com/mopidy/mopidy/issues/1120
How to reproduce:
- A playlist must be in both browse results and playlists
- Call for instance ``lsinfo "/"`` to populate the cache with the
playlist name from the playlist backend.
- Call ``lsinfo "/dummy"`` to override the playlist name with the browse
name.
- Call ``lsinfo "/"`` and we now have an invalid name with ``/`` in it.
"""
@mock.patch.object(stored_playlists, '_get_last_modified')
def test(self, last_modified_mock):
last_modified_mock.return_value = '2015-08-05T22:51:06Z'
self.backend.library.dummy_browse_result = {
'dummy:/': [Ref.playlist(name='Top 100 tracks', uri='dummy:/1')],
}
self.backend.playlists.set_dummy_playlists([
Playlist(name='Top 100 tracks', uri='dummy:/1'),
])
response1 = self.send_request('lsinfo "/"')
self.send_request('lsinfo "/dummy"')
response2 = self.send_request('lsinfo "/"')
self.assertEqual(response1, response2)
class IssueGH1348RegressionTest(protocol.BaseTestCase):
"""
The issue: http://github.com/mopidy/mopidy/issues/1348
"""
def test(self):
self.backend.library.dummy_library = [Track(uri='dummy:a')]
# Create a dummy playlist and trigger population of mapping
self.send_request('playlistadd "testing1" "dummy:a"')
self.send_request('listplaylists')
# Create an other playlist which isn't in the map
self.send_request('playlistadd "testing2" "dummy:a"')
self.assertEqual(['OK'], self.send_request('rm "testing2"'))
playlists = self.backend.playlists.as_list().get()
self.assertEqual(['testing1'], [ref.name for ref in playlists])
| apache-2.0 |
sadaf2605/django | django/db/backends/mysql/client.py | 94 | 1524 | import subprocess
from django.db.backends.base.client import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
@classmethod
def settings_to_cmd_args(cls, settings_dict):
args = [cls.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
cert = settings_dict['OPTIONS'].get('ssl', {}).get('ca')
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if cert:
args += ["--ssl-ca=%s" % cert]
if db:
args += [db]
return args
def runshell(self):
args = DatabaseClient.settings_to_cmd_args(self.connection.settings_dict)
subprocess.check_call(args)
| bsd-3-clause |
indautgrp/erpnext | erpnext/accounts/doctype/tax_rule/tax_rule.py | 7 | 5732 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import cstr, cint
class IncorrectCustomerGroup(frappe.ValidationError): pass
class IncorrectSupplierType(frappe.ValidationError): pass
class ConflictingTaxRule(frappe.ValidationError): pass
class TaxRule(Document):
def __setup__(self):
self.flags.ignore_these_exceptions_in_test = [ConflictingTaxRule]
def validate(self):
self.validate_tax_template()
self.validate_date()
self.validate_filters()
self.validate_use_for_shopping_cart()
def validate_tax_template(self):
if self.tax_type== "Sales":
self.purchase_tax_template = self.supplier = self.supplier_type = None
if self.customer:
self.customer_group = None
else:
self.sales_tax_template = self.customer = self.customer_group = None
if self.supplier:
self.supplier_type = None
if not (self.sales_tax_template or self.purchase_tax_template):
frappe.throw(_("Tax Template is mandatory."))
def validate_date(self):
if self.from_date and self.to_date and self.from_date > self.to_date:
frappe.throw(_("From Date cannot be greater than To Date"))
def validate_filters(self):
filters = {
"tax_type": self.tax_type,
"customer": self.customer,
"customer_group": self.customer_group,
"supplier": self.supplier,
"supplier_type": self.supplier_type,
"billing_city": self.billing_city,
"billing_county": self.billing_county,
"billing_state": self.billing_state,
"billing_country": self.billing_country,
"shipping_city": self.shipping_city,
"shipping_county": self.shipping_county,
"shipping_state": self.shipping_state,
"shipping_country": self.shipping_country,
"company": self.company
}
conds=""
for d in filters:
if conds:
conds += " and "
conds += """ifnull({0}, '') = '{1}'""".format(d, frappe.db.escape(cstr(filters[d])))
if self.from_date and self.to_date:
conds += """ and ((from_date > '{from_date}' and from_date < '{to_date}') or
(to_date > '{from_date}' and to_date < '{to_date}') or
('{from_date}' > from_date and '{from_date}' < to_date) or
('{from_date}' = from_date and '{to_date}' = to_date))""".format(from_date=self.from_date, to_date=self.to_date)
elif self.from_date and not self.to_date:
conds += """ and to_date > '{from_date}'""".format(from_date = self.from_date)
elif self.to_date and not self.from_date:
conds += """ and from_date < '{to_date}'""".format(to_date = self.to_date)
tax_rule = frappe.db.sql("select name, priority \
from `tabTax Rule` where {0} and name != '{1}'".format(conds, self.name), as_dict=1)
if tax_rule:
if tax_rule[0].priority == self.priority:
frappe.throw(_("Tax Rule Conflicts with {0}".format(tax_rule[0].name)), ConflictingTaxRule)
def validate_use_for_shopping_cart(self):
'''If shopping cart is enabled and no tax rule exists for shopping cart, enable this one'''
if (not self.use_for_shopping_cart
and cint(frappe.db.get_single_value('Shopping Cart Settings', 'enabled'))
and not frappe.db.get_value('Tax Rule', {'use_for_shopping_cart': 1, 'name': ['!=', self.name]})):
self.use_for_shopping_cart = 1
frappe.msgprint(_("Enabling 'Use for Shopping Cart', as Shopping Cart is enabled and there should be at least one Tax Rule for Shopping Cart"))
@frappe.whitelist()
def get_party_details(party, party_type, args=None):
out = {}
if args:
billing_filters= {"name": args.get("billing_address")}
shipping_filters= {"name": args.get("shipping_address")}
else:
billing_filters= {party_type: party, "is_primary_address": 1}
shipping_filters= {party_type:party, "is_shipping_address": 1}
billing_address= frappe.get_all("Address", fields=["city", "county", "state", "country"], filters= billing_filters)
shipping_address= frappe.get_all("Address", fields=["city", "county", "state", "country"], filters= shipping_filters)
if billing_address:
out["billing_city"]= billing_address[0].city
out["billing_county"]= billing_address[0].county
out["billing_state"]= billing_address[0].state
out["billing_country"]= billing_address[0].country
if shipping_address:
out["shipping_city"]= shipping_address[0].city
out["shipping_county"]= shipping_address[0].county
out["shipping_state"]= shipping_address[0].state
out["shipping_country"]= shipping_address[0].country
return out
def get_tax_template(posting_date, args):
"""Get matching tax rule"""
args = frappe._dict(args)
conditions = ["""(from_date is null or from_date = '' or from_date <= '{0}')
and (to_date is null or to_date = '' or to_date >= '{0}')""".format(posting_date)]
for key, value in args.iteritems():
if key=="use_for_shopping_cart":
conditions.append("use_for_shopping_cart = {0}".format(1 if value else 0))
else:
conditions.append("ifnull({0}, '') in ('', '{1}')".format(key, frappe.db.escape(cstr(value))))
tax_rule = frappe.db.sql("""select * from `tabTax Rule`
where {0}""".format(" and ".join(conditions)), as_dict = True)
if not tax_rule:
return None
for rule in tax_rule:
rule.no_of_keys_matched = 0
for key in args:
if rule.get(key): rule.no_of_keys_matched += 1
rule = sorted(tax_rule, lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority))[0]
tax_template = rule.sales_tax_template or rule.purchase_tax_template
doctype = "{0} Taxes and Charges Template".format(rule.tax_type)
if frappe.db.get_value(doctype, tax_template, 'disabled')==1:
return None
return tax_template
| gpl-3.0 |
markjenny/my_ucore_os_lab | related_info/lab7/semaphore_condition/thr-ex1.py | 48 | 1026 | #!/bin/env python
# -*- coding: utf-8 -*-
#filename: peartest.py
import threading, signal
is_exit = False
def doStress(i, cc):
global is_exit
idx = i
while not is_exit:
if (idx < 10000000):
print "thread[%d]: idx=%d"%(i, idx)
idx = idx + cc
else:
break
if is_exit:
print "receive a signal to exit, thread[%d] stop."%i
else:
print "thread[%d] complete."%i
def handler(signum, frame):
global is_exit
is_exit = True
print "receive a signal %d, is_exit = %d"%(signum, is_exit)
if __name__ == "__main__":
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
cc = 5
threads = []
for i in range(cc):
t = threading.Thread(target=doStress, args=(i,cc))
t.setDaemon(True)
threads.append(t)
t.start()
while 1:
alive = False
for i in range(cc):
alive = alive or threads[i].isAlive()
if not alive:
break
| gpl-2.0 |
jsirois/commons | src/python/twitter/common/app/modules/http.py | 14 | 3856 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import threading
from twitter.common import app, options
from twitter.common.exceptions import ExceptionalThread
from twitter.common.http.diagnostics import DiagnosticsEndpoints
from twitter.common.http.server import HttpServer
class LifecycleEndpoints(object):
@HttpServer.route('/quitquitquit', method='POST')
def quitquitquit(self):
app.quitquitquit()
@HttpServer.route('/abortabortabort', method='POST')
def abortabortabort(self):
app.abortabortabort()
class RootServer(HttpServer, app.Module):
"""
A root singleton server for all your http endpoints to bind to.
"""
OPTIONS = {
'enable':
options.Option('--enable_http',
default=False,
action='store_true',
dest='twitter_common_http_root_server_enabled',
help='Enable root http server for various subsystems, e.g. metrics exporting.'),
'disable_lifecycle':
options.Option('--http_disable_lifecycle',
default=False,
action='store_true',
dest='twitter_common_http_root_server_disable_lifecycle',
help='Disable the lifecycle commands, i.e. /quitquitquit and /abortabortabort.'),
'port':
options.Option('--http_port',
default=8888,
type='int',
metavar='PORT',
dest='twitter_common_http_root_server_port',
help='The port the root http server will be listening on.'),
'host':
options.Option('--http_host',
default='localhost',
type='string',
metavar='HOSTNAME',
dest='twitter_common_http_root_server_host',
help='The host the root http server will be listening on.'),
'framework':
options.Option('--http_framework',
default='wsgiref',
type='string',
metavar='FRAMEWORK',
dest='twitter_common_http_root_server_framework',
help='The framework that will be running the integrated http server.')
}
def __init__(self):
self._thread = None
HttpServer.__init__(self)
app.Module.__init__(self, __name__, description="Http subsystem.")
def setup_function(self):
assert self._thread is None, "Attempting to call start() after server has been started!"
options = app.get_options()
parent = self
self.mount_routes(DiagnosticsEndpoints())
if not options.twitter_common_http_root_server_disable_lifecycle:
self.mount_routes(LifecycleEndpoints())
class RootServerThread(ExceptionalThread):
def __init__(self):
super(RootServerThread, self).__init__()
self.daemon = True
def run(self):
rs = parent
rs.run(options.twitter_common_http_root_server_host,
options.twitter_common_http_root_server_port,
server=options.twitter_common_http_root_server_framework)
if options.twitter_common_http_root_server_enabled:
self._thread = RootServerThread()
self._thread.start()
| apache-2.0 |
fitzgen/servo | components/script/dom/bindings/codegen/parser/tests/test_identifier_conflict.py | 53 | 1193 | # Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
try:
parser.parse("""
enum Foo { "a" };
interface Foo;
""")
results = parser.finish()
harness.ok(False, "Should fail to parse")
except Exception, e:
harness.ok("Name collision" in e.message,
"Should have name collision for interface")
parser = parser.reset()
try:
parser.parse("""
dictionary Foo { long x; };
enum Foo { "a" };
""")
results = parser.finish()
harness.ok(False, "Should fail to parse")
except Exception, e:
harness.ok("Name collision" in e.message,
"Should have name collision for dictionary")
parser = parser.reset()
try:
parser.parse("""
enum Foo { "a" };
enum Foo { "b" };
""")
results = parser.finish()
harness.ok(False, "Should fail to parse")
except Exception, e:
harness.ok("Multiple unresolvable definitions" in e.message,
"Should have name collision for dictionary")
| mpl-2.0 |
pgjones/jinja | tests/test_core_tags.py | 23 | 12981 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.core_tags
~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the core tags like for and if.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import pytest
from jinja2 import Environment, TemplateSyntaxError, UndefinedError, \
DictLoader
@pytest.fixture
def env_trim():
return Environment(trim_blocks=True)
@pytest.mark.core_tags
@pytest.mark.for_loop
class TestForLoop():
def test_simple(self, env):
tmpl = env.from_string('{% for item in seq %}{{ item }}{% endfor %}')
assert tmpl.render(seq=list(range(10))) == '0123456789'
def test_else(self, env):
tmpl = env.from_string(
'{% for item in seq %}XXX{% else %}...{% endfor %}')
assert tmpl.render() == '...'
def test_empty_blocks(self, env):
tmpl = env.from_string('<{% for item in seq %}{% else %}{% endfor %}>')
assert tmpl.render() == '<>'
def test_context_vars(self, env):
slist = [42, 24]
for seq in [slist, iter(slist), reversed(slist), (_ for _ in slist)]:
tmpl = env.from_string('''{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}''')
one, two, _ = tmpl.render(seq=seq).split('###')
(one_index, one_index0, one_revindex, one_revindex0, one_first,
one_last, one_length) = one.split('|')
(two_index, two_index0, two_revindex, two_revindex0, two_first,
two_last, two_length) = two.split('|')
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == 'True' and two_first == 'False'
assert one_last == 'False' and two_last == 'True'
assert one_length == two_length == '2'
def test_cycling(self, env):
tmpl = env.from_string('''{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}''')
output = tmpl.render(seq=list(range(4)), through=('<1>', '<2>'))
assert output == '<1><2>' * 4
def test_scope(self, env):
tmpl = env.from_string('{% for item in seq %}{% endfor %}{{ item }}')
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self, env):
def inner():
for item in range(5):
yield item
tmpl = env.from_string('{% for item in iter %}{{ item }}{% endfor %}')
output = tmpl.render(iter=inner())
assert output == '01234'
def test_noniter(self, env):
tmpl = env.from_string('{% for item in none %}...{% endfor %}')
pytest.raises(TypeError, tmpl.render)
def test_recursive(self, env):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1<[1][2]>][2<[1][2]>][3<[a]>]'
def test_recursive_depth0(self, env):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]'
def test_recursive_depth(self, env):
tmpl = env.from_string('''{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}''')
assert tmpl.render(seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a='a')])
]) == '[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]'
def test_looploop(self, env):
tmpl = env.from_string('''{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}''')
assert tmpl.render(table=['ab', 'cd']) == '[1|1][1|2][2|1][2|2]'
def test_reversed_bug(self, env):
tmpl = env.from_string('{% for i in items %}{{ i }}'
'{% if not loop.last %}'
',{% endif %}{% endfor %}')
assert tmpl.render(items=reversed([3, 2, 1])) == '1,2,3'
def test_loop_errors(self, env):
tmpl = env.from_string('''{% for item in [1] if loop.index
== 0 %}...{% endfor %}''')
pytest.raises(UndefinedError, tmpl.render)
tmpl = env.from_string('''{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}''')
assert tmpl.render() == ''
def test_loop_filter(self, env):
tmpl = env.from_string('{% for item in range(10) if item '
'is even %}[{{ item }}]{% endfor %}')
assert tmpl.render() == '[0][2][4][6][8]'
tmpl = env.from_string('''
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}''')
assert tmpl.render() == '[1:0][2:2][3:4][4:6][5:8]'
def test_loop_unassignable(self, env):
pytest.raises(TemplateSyntaxError, env.from_string,
'{% for loop in seq %}...{% endfor %}')
def test_scoped_special_var(self, env):
t = env.from_string(
'{% for s in seq %}[{{ loop.first }}{% for c in s %}'
'|{{ loop.first }}{% endfor %}]{% endfor %}')
assert t.render(seq=('ab', 'cd')) \
== '[True|True|False][False|True|False]'
def test_scoped_loop_var(self, env):
t = env.from_string('{% for x in seq %}{{ loop.first }}'
'{% for y in seq %}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalse'
t = env.from_string('{% for x in seq %}{% for y in seq %}'
'{{ loop.first }}{% endfor %}{% endfor %}')
assert t.render(seq='ab') == 'TrueFalseTrueFalse'
def test_recursive_empty_loop_iter(self, env):
t = env.from_string('''
{%- for item in foo recursive -%}{%- endfor -%}
''')
assert t.render(dict(foo=[])) == ''
def test_call_in_loop(self, env):
t = env.from_string('''
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
''')
assert t.render() == '[1][2][3]'
def test_scoping_bug(self, env):
t = env.from_string('''
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
''')
assert t.render(foo=(1,)) == '...1......2...'
def test_unpacking(self, env):
tmpl = env.from_string('{% for a, b, c in [[1, 2, 3]] %}'
'{{ a }}|{{ b }}|{{ c }}{% endfor %}')
assert tmpl.render() == '1|2|3'
@pytest.mark.core_tags
@pytest.mark.if_condition
class TestIfCondition():
def test_simple(self, env):
tmpl = env.from_string('''{% if true %}...{% endif %}''')
assert tmpl.render() == '...'
def test_elif(self, env):
tmpl = env.from_string('''{% if false %}XXX{% elif true
%}...{% else %}XXX{% endif %}''')
assert tmpl.render() == '...'
def test_else(self, env):
tmpl = env.from_string('{% if false %}XXX{% else %}...{% endif %}')
assert tmpl.render() == '...'
def test_empty(self, env):
tmpl = env.from_string('[{% if true %}{% else %}{% endif %}]')
assert tmpl.render() == '[]'
def test_complete(self, env):
tmpl = env.from_string('{% if a %}A{% elif b %}B{% elif c == d %}'
'C{% else %}D{% endif %}')
assert tmpl.render(a=0, b=False, c=42, d=42.0) == 'C'
def test_no_scope(self, env):
tmpl = env.from_string(
'{% if a %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render(a=True) == '1'
tmpl = env.from_string(
'{% if true %}{% set foo = 1 %}{% endif %}{{ foo }}')
assert tmpl.render() == '1'
@pytest.mark.core_tags
@pytest.mark.macros
class TestMacros():
def test_simple(self, env_trim):
tmpl = env_trim.from_string('''\
{% macro say_hello(name) %}Hello {{ name }}!{% endmacro %}
{{ say_hello('Peter') }}''')
assert tmpl.render() == 'Hello Peter!'
def test_scoping(self, env_trim):
tmpl = env_trim.from_string('''\
{% macro level1(data1) %}
{% macro level2(data2) %}{{ data1 }}|{{ data2 }}{% endmacro %}
{{ level2('bar') }}{% endmacro %}
{{ level1('foo') }}''')
assert tmpl.render() == 'foo|bar'
def test_arguments(self, env_trim):
tmpl = env_trim.from_string('''\
{% macro m(a, b, c='c', d='d') %}{{ a }}|{{ b }}|{{ c }}|{{ d }}{% endmacro %}
{{ m() }}|{{ m('a') }}|{{ m('a', 'b') }}|{{ m(1, 2, 3) }}''')
assert tmpl.render() == '||c|d|a||c|d|a|b|c|d|1|2|3|d'
def test_arguments_defaults_nonsense(self, env_trim):
pytest.raises(TemplateSyntaxError, env_trim.from_string, '''\
{% macro m(a, b=1, c) %}a={{ a }}, b={{ b }}, c={{ c }}{% endmacro %}''')
def test_caller_defaults_nonsense(self, env_trim):
pytest.raises(TemplateSyntaxError, env_trim.from_string, '''\
{% macro a() %}{{ caller() }}{% endmacro %}
{% call(x, y=1, z) a() %}{% endcall %}''')
def test_varargs(self, env_trim):
tmpl = env_trim.from_string('''\
{% macro test() %}{{ varargs|join('|') }}{% endmacro %}\
{{ test(1, 2, 3) }}''')
assert tmpl.render() == '1|2|3'
def test_simple_call(self, env_trim):
tmpl = env_trim.from_string('''\
{% macro test() %}[[{{ caller() }}]]{% endmacro %}\
{% call test() %}data{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_complex_call(self, env_trim):
tmpl = env_trim.from_string('''\
{% macro test() %}[[{{ caller('data') }}]]{% endmacro %}\
{% call(data) test() %}{{ data }}{% endcall %}''')
assert tmpl.render() == '[[data]]'
def test_caller_undefined(self, env_trim):
tmpl = env_trim.from_string('''\
{% set caller = 42 %}\
{% macro test() %}{{ caller is not defined }}{% endmacro %}\
{{ test() }}''')
assert tmpl.render() == 'True'
def test_include(self, env_trim):
env_trim = Environment(
loader=DictLoader({
'include': '{% macro test(foo) %}[{{ foo }}]{% endmacro %}'
})
)
tmpl = env_trim.from_string(
'{% from "include" import test %}{{ test("foo") }}')
assert tmpl.render() == '[foo]'
def test_macro_api(self, env_trim):
tmpl = env_trim.from_string(
'{% macro foo(a, b) %}{% endmacro %}'
'{% macro bar() %}{{ varargs }}{{ kwargs }}{% endmacro %}'
'{% macro baz() %}{{ caller() }}{% endmacro %}')
assert tmpl.module.foo.arguments == ('a', 'b')
assert tmpl.module.foo.defaults == ()
assert tmpl.module.foo.name == 'foo'
assert not tmpl.module.foo.caller
assert not tmpl.module.foo.catch_kwargs
assert not tmpl.module.foo.catch_varargs
assert tmpl.module.bar.arguments == ()
assert tmpl.module.bar.defaults == ()
assert not tmpl.module.bar.caller
assert tmpl.module.bar.catch_kwargs
assert tmpl.module.bar.catch_varargs
assert tmpl.module.baz.caller
def test_callself(self, env_trim):
tmpl = env_trim.from_string('{% macro foo(x) %}{{ x }}{% if x > 1 %}|'
'{{ foo(x - 1) }}{% endif %}{% endmacro %}'
'{{ foo(5) }}')
assert tmpl.render() == '5|4|3|2|1'
@pytest.mark.core_tags
@pytest.mark.set
class TestSet():
def test_normal(self, env_trim):
tmpl = env_trim.from_string('{% set foo = 1 %}{{ foo }}')
assert tmpl.render() == '1'
assert tmpl.module.foo == 1
def test_block(self, env_trim):
tmpl = env_trim.from_string('{% set foo %}42{% endset %}{{ foo }}')
assert tmpl.render() == '42'
assert tmpl.module.foo == u'42'
| bsd-3-clause |
fengbaicanhe/intellij-community | python/helpers/profiler/thrift/transport/TTwisted.py | 97 | 10563 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import struct
from cStringIO import StringIO
from zope.interface import implements, Interface, Attribute
from twisted.internet.protocol import ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.internet.threads import deferToThread
from twisted.protocols import basic
from twisted.web import server, resource, http
from thrift.transport import TTransport
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
return self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
return self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
for k, v in self.client._reqs.iteritems():
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftSASLClientProtocol(ThriftClientProtocol):
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None,
host=None, service=None, mechanism='GSSAPI', **sasl_kwargs):
"""
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.SASLCLient = SASLClient
ThriftClientProtocol.__init__(self, client_class, iprot_factory, oprot_factory)
self._sasl_negotiation_deferred = None
self._sasl_negotiation_status = None
self.client = None
if host is not None:
self.createSASLClient(host, service, mechanism, **sasl_kwargs)
def createSASLClient(self, host, service, mechanism, **kwargs):
self.sasl = self.SASLClient(host, service, mechanism, **kwargs)
def dispatch(self, msg):
encoded = self.sasl.wrap(msg)
len_and_encoded = ''.join((struct.pack('!i', len(encoded)), encoded))
ThriftClientProtocol.dispatch(self, len_and_encoded)
@defer.inlineCallbacks
def connectionMade(self):
self._sendSASLMessage(self.START, self.sasl.mechanism)
initial_message = yield deferToThread(self.sasl.process)
self._sendSASLMessage(self.OK, initial_message)
while True:
status, challenge = yield self._receiveSASLMessage()
if status == self.OK:
response = yield deferToThread(self.sasl.process, challenge)
self._sendSASLMessage(self.OK, response)
elif status == self.COMPLETE:
if not self.sasl.complete:
msg = "The server erroneously indicated that SASL " \
"negotiation was complete"
raise TTransport.TTransportException(msg, message=msg)
else:
break
else:
msg = "Bad SASL negotiation status: %d (%s)" % (status, challenge)
raise TTransport.TTransportException(msg, message=msg)
self._sasl_negotiation_deferred = None
ThriftClientProtocol.connectionMade(self)
def _sendSASLMessage(self, status, body):
if body is None:
body = ""
header = struct.pack(">BI", status, len(body))
self.transport.write(header + body)
def _receiveSASLMessage(self):
self._sasl_negotiation_deferred = defer.Deferred()
self._sasl_negotiation_status = None
return self._sasl_negotiation_deferred
def connectionLost(self, reason=connectionDone):
if self.client:
ThriftClientProtocol.connectionLost(self, reason)
def dataReceived(self, data):
if self._sasl_negotiation_deferred:
# we got a sasl challenge in the format (status, length, challenge)
# save the status, let IntNStringReceiver piece the challenge data together
self._sasl_negotiation_status, = struct.unpack("B", data[0])
ThriftClientProtocol.dataReceived(self, data[1:])
else:
# normal frame, let IntNStringReceiver piece it together
ThriftClientProtocol.dataReceived(self, data)
def stringReceived(self, frame):
if self._sasl_negotiation_deferred:
# the frame is just a SASL challenge
response = (self._sasl_negotiation_status, frame)
self._sasl_negotiation_deferred.callback(response)
else:
# there's a second 4 byte length prefix inside the frame
decoded_frame = self.sasl.unwrap(frame[4:])
ThriftClientProtocol.stringReceived(self, decoded_frame)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
| apache-2.0 |
LUTAN/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception_v2_test.py | 111 | 11157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.slim.python.slim import model_analyzer
from tensorflow.contrib.slim.python.slim.nets import inception_v2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InceptionV2Test(test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception_v2.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = [
'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3'
]
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = [
'Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c'
]
for index, endpoint in enumerate(endpoints):
with ops.Graph().as_default():
inputs = random_ops.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception_v2.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(
out_tensor.op.name.startswith('InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index + 1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2_base(
inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {
'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024],
'Conv2d_1a_7x7': [batch_size, 112, 112, 64],
'MaxPool_2a_3x3': [batch_size, 56, 56, 64],
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]
}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with arg_scope(inception_v2.inception_v2_arg_scope()):
inception_v2.inception_v2_base(inputs)
total_params, _ = model_analyzer.analyze_vars(
variables_lib.get_model_variables())
self.assertAlmostEqual(10173112, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v2.inception_v2(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
_, end_points = inception_v2.inception_v2(inputs, num_classes)
endpoint_keys = [
key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')
]
_, end_points_with_multiplier = inception_v2.inception_v2(
inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
ops.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = array_ops.placeholder(
dtypes.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
variables.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
logits, _ = inception_v2.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = random_ops.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
logits, _ = inception_v2.inception_v2(
eval_inputs, num_classes, is_training=False)
predictions = math_ops.argmax(logits, 1)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = random_ops.random_uniform(
(train_batch_size, height, width, 3))
inception_v2.inception_v2(train_inputs, num_classes)
eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception_v2.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = math_ops.argmax(logits, 1)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = random_ops.random_uniform([1, 224, 224, 3])
logits, _ = inception_v2.inception_v2(
images, num_classes=num_classes, spatial_squeeze=False)
with self.test_session() as sess:
variables.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
test.main()
| apache-2.0 |
japeto/Vigtech-Services | env/lib/python2.7/site-packages/pip/_vendor/requests/models.py | 410 | 29176 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(
to_native_string(url, 'utf8')))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| lgpl-3.0 |
hnakamur/django-admin2 | djadmin2/tests/test_core.py | 2 | 2454 | from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.contrib.auth.models import Group, User
from django.contrib.sites.models import Site
import djadmin2
from ..types import ModelAdmin2
from ..core import Admin2
class SmallThing(models.Model):
pass
APP_LABEL, APP_VERBOSE_NAME = 'app_one_label', 'App One Verbose Name'
class Admin2Test(TestCase):
def setUp(self):
self.admin2 = Admin2()
def test_register(self):
self.admin2.register(SmallThing)
self.assertTrue(isinstance(self.admin2.registry[SmallThing], ModelAdmin2))
def test_register_error(self):
self.admin2.register(SmallThing)
self.assertRaises(ImproperlyConfigured, self.admin2.register, SmallThing)
def test_deregister(self):
self.admin2.register(SmallThing)
self.admin2.deregister(SmallThing)
self.assertTrue(SmallThing not in self.admin2.registry)
def test_deregister_error(self):
self.assertRaises(ImproperlyConfigured, self.admin2.deregister, SmallThing)
def test_register_app_verbose_name(self):
self.admin2.register_app_verbose_name(APP_LABEL, APP_VERBOSE_NAME)
self.assertEquals(
self.admin2.app_verbose_names[APP_LABEL],
APP_VERBOSE_NAME
)
def test_register_app_verbose_name_error(self):
self.admin2.register_app_verbose_name(APP_LABEL, APP_VERBOSE_NAME)
self.assertRaises(
ImproperlyConfigured,
self.admin2.register_app_verbose_name,
APP_LABEL,
APP_VERBOSE_NAME
)
def test_deregister_app_verbose_name(self):
self.admin2.register_app_verbose_name(APP_LABEL, APP_VERBOSE_NAME)
self.admin2.deregister_app_verbose_name(APP_LABEL)
self.assertTrue(APP_LABEL not in self.admin2.app_verbose_names)
def test_deregister_app_verbose_name_error(self):
self.assertRaises(
ImproperlyConfigured,
self.admin2.deregister_app_verbose_name,
APP_LABEL
)
def test_get_urls(self):
self.admin2.register(SmallThing)
self.assertEquals(8, len(self.admin2.get_urls()))
def test_default_entries(self):
expected_default_models = (User, Group, Site)
for model in expected_default_models:
self.assertTrue(isinstance(djadmin2.default.registry[model], ModelAdmin2))
| bsd-3-clause |
fvpolpeta/devide | modules/writers/pngWRT.py | 7 | 3025 | # $Id$
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import wx # needs this for wx.OPEN, we need to make this constant available
# elsewhere
class pngWRT(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
# call parent constructor
ModuleBase.__init__(self, module_manager)
# ctor for this specific mixin
# FilenameViewModuleMixin.__init__(self)
self._shiftScale = vtk.vtkImageShiftScale()
self._shiftScale.SetOutputScalarTypeToUnsignedShort()
module_utils.setup_vtk_object_progress(
self, self._shiftScale,
'Converting input to unsigned short.')
self._writer = vtk.vtkPNGWriter()
self._writer.SetFileDimensionality(3)
self._writer.SetInput(self._shiftScale.GetOutput())
module_utils.setup_vtk_object_progress(
self, self._writer, 'Writing PNG file(s)')
self._config.filePattern = '%d.png'
configList = [
('File pattern:', 'filePattern', 'base:str', 'filebrowser',
'Filenames will be built with this. See module help.',
{'fileMode' : wx.OPEN,
'fileMask' :
'PNG files (*.png)|*.png|All files (*.*)|*.*'})]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkPNGWriter' : self._writer})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._writer
def get_input_descriptions(self):
return ('vtkImageData',)
def set_input(self, idx, input_stream):
self._shiftScale.SetInput(input_stream)
def get_output_descriptions(self):
return ()
def get_output(self, idx):
raise Exception
def logic_to_config(self):
self._config.filePattern = self._writer.GetFilePattern()
def config_to_logic(self):
self._writer.SetFilePattern(self._config.filePattern)
def execute_module(self):
if len(self._writer.GetFilePattern()) and self._shiftScale.GetInput():
inp = self._shiftScale.GetInput()
inp.Update()
minv,maxv = inp.GetScalarRange()
self._shiftScale.SetShift(-minv)
self._shiftScale.SetScale(65535 / (maxv - minv))
self._shiftScale.Update()
self._writer.Write()
self._module_manager.setProgress(
100.0, "vtkPNGWriter: Writing PNG file(s). [DONE]")
| bsd-3-clause |
askulkarni2/ansible | test/units/playbook/test_taggable.py | 293 | 4452 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.playbook.taggable import Taggable
from units.mock.loader import DictDataLoader
class TaggableTestObj(Taggable):
def __init__(self):
self._loader = DictDataLoader({})
self.tags = []
class TestTaggable(unittest.TestCase):
def assert_evaluate_equal(self, test_value, tags, only_tags, skip_tags):
taggable_obj = TaggableTestObj()
taggable_obj.tags = tags
evaluate = taggable_obj.evaluate_tags(only_tags, skip_tags, {})
self.assertEqual(test_value, evaluate)
def test_evaluate_tags_tag_in_only_tags(self):
self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag1'], [])
def test_evaluate_tags_tag_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag1', 'tag2'], [], ['tag1'])
def test_evaluate_tags_special_always_in_object_tags(self):
self.assert_evaluate_equal(True, ['tag', 'always'], ['random'], [])
def test_evaluate_tags_tag_in_skip_tags_special_always_in_object_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], ['random'], ['tag'])
def test_evaluate_tags_special_always_in_skip_tags_and_always_in_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], [], ['always'])
def test_evaluate_tags_special_tagged_in_only_tags_and_object_tagged(self):
self.assert_evaluate_equal(True, ['tag'], ['tagged'], [])
def test_evaluate_tags_special_tagged_in_only_tags_and_object_untagged(self):
self.assert_evaluate_equal(False, [], ['tagged'], [])
def test_evaluate_tags_special_tagged_in_skip_tags_and_object_tagged(self):
self.assert_evaluate_equal(False, ['tag'], [], ['tagged'])
def test_evaluate_tags_special_tagged_in_skip_tags_and_object_untagged(self):
self.assert_evaluate_equal(True, [], [], ['tagged'])
def test_evaluate_tags_special_untagged_in_only_tags_and_object_tagged(self):
self.assert_evaluate_equal(False, ['tag'], ['untagged'], [])
def test_evaluate_tags_special_untagged_in_only_tags_and_object_untagged(self):
self.assert_evaluate_equal(True, [], ['untagged'], [])
def test_evaluate_tags_special_untagged_in_skip_tags_and_object_tagged(self):
self.assert_evaluate_equal(True, ['tag'], [], ['untagged'])
def test_evaluate_tags_special_untagged_in_skip_tags_and_object_untagged(self):
self.assert_evaluate_equal(False, [], [], ['untagged'])
def test_evaluate_tags_special_all_in_only_tags(self):
self.assert_evaluate_equal(True, ['tag'], ['all'], ['untagged'])
def test_evaluate_tags_special_all_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag'], ['tag'], ['all'])
def test_evaluate_tags_special_all_in_only_tags_and_special_all_in_skip_tags(self):
self.assert_evaluate_equal(False, ['tag'], ['all'], ['all'])
def test_evaluate_tags_special_all_in_skip_tags_and_always_in_object_tags(self):
self.assert_evaluate_equal(True, ['tag', 'always'], [], ['all'])
def test_evaluate_tags_special_all_in_skip_tags_and_special_always_in_skip_tags_and_always_in_object_tags(self):
self.assert_evaluate_equal(False, ['tag', 'always'], [], ['all', 'always'])
def test_evaluate_tags_accepts_lists(self):
self.assert_evaluate_equal(True, ['tag1', 'tag2'], ['tag2'], [])
def test_evaluate_tags_accepts_strings(self):
self.assert_evaluate_equal(True, 'tag1,tag2', ['tag2'], [])
def test_evaluate_tags_with_repeated_tags(self):
self.assert_evaluate_equal(False, ['tag', 'tag'], [], ['tag'])
| gpl-3.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/OpenGL/GL/NV/occlusion_query.py | 4 | 3576 | '''OpenGL extension NV.occlusion_query
This module customises the behaviour of the
OpenGL.raw.GL.NV.occlusion_query to provide a more
Python-friendly API
Overview (from the spec)
The HP_occlusion_test extension defines a mechanism whereby an
application can query the visibility of an object, where "visible"
means that at least one pixel passes the depth and stencil tests.
The HP extension has two major shortcomings.
- It returns the result as a simple GL_TRUE/GL_FALSE result, when in
fact it is often useful to know exactly how many pixels passed.
- It provides only a simple "stop-and-wait" model for using multiple
queries. The application begins an occlusion test and ends it;
then, at some later point, it asks for the result, at which point
the driver must stop and wait until the result from the previous
test is back before the application can even begin the next one.
This is a very simple model, but its performance is mediocre when
an application wishes to perform many queries, and it eliminates
most of the opportunites for parallelism between the CPU and GPU.
This extension solves both of those problems. It returns as its
result the number of pixels that pass, and it provides an interface
conceptually similar to that of NV_fence that allows applications to
issue many occlusion queries before asking for the result of any one.
As a result, they can overlap the time it takes for the occlusion
query results to be returned with other, more useful work, such as
rendering other parts of the scene or performing other computations
on the CPU.
There are many situations where a pixel count, rather than a boolean
result, is useful.
- If the visibility test is an object bounding box being used to
decide whether to skip the object, sometimes it can be acceptable,
and beneficial to performance, to skip an object if less than some
threshold number of pixels could be visible.
- Knowing the number of pixels visible in the bounding box may also
help decide what level of detail a model should be drawn with. If
only a few pixels are visible, a low-detail model may be
acceptable. In general, this allows level-of-detail mechanisms to
be slightly less ad hoc.
- "Depth peeling" techniques, such as order-independent transparency,
would typically like to know when to stop rendering more layers; it
is difficult to come up with a way to determine a priori how many
layers to use. A boolean count allows applications to stop when
more layers will not affect the image at all, but this will likely
be unacceptable for performance, with minimal gains to image
quality. Instead, it makes more sense to stop rendering when the
number of pixels goes below a threshold; this should provide better
results than any of these other algorithms.
- Occlusion queries can be used as a replacement for glReadPixels of
the depth buffer to determine whether, say, a light source is
visible for the purposes of a lens flare effect or a halo to
simulate glare. Pixel counts allow you to compute the percentage
of the light source that is visible, and the brightness of these
effects can be modulated accordingly.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/occlusion_query.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.occlusion_query import *
### END AUTOGENERATED SECTION | mit |
Universal-Model-Converter/UMC3.0a | dev tests and files/tests/font_test.py | 1 | 4937 | """
font.py -- Displays FPS in OpenGL using TrueType fonts.
Copyright (c) 2002. Nelson Rush. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from OpenGL.GL import *
from OpenGL.GLU import *
from pygame.locals import *
from pygame.display import *
from pygame.event import *
from pygame.key import *
from pygame.mouse import *
from pygame.font import *
from pygame.image import *
from pygame.time import *
from pygame import *
import sys
class Engine:
def __init__(self,w,h):
init()
display.init()
display.set_mode([w,h], DOUBLEBUF|OPENGL|HWPALETTE|HWSURFACE)
self.initgl()
self.resize(w,h)
mouse.set_visible(0)
self.w = w
self.h = h
font.init()
if not font.get_init():
print 'Could not render font.'
sys.exit(0)
self.font = font.Font('font.ttf',18)
self.char = []
for c in range(256):
self.char.append(self.CreateCharacter(chr(c)))
self.char = tuple(self.char)
self.lw = self.char[ord('0')][1]
self.lh = self.char[ord('0')][2]
self.angle = 0.0
self.frames = self.t = self.t_start = self.fps = 0
def initgl(self):
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glShadeModel(GL_SMOOTH)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
def resize(self,w,h):
if h == 0: h = 1
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(w) / float(h), 0.5, 150.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def CreateCharacter(self, s):
try:
letter_render = self.font.render(s, 1, (255,255,255), (0,0,0))
letter = image.tostring(letter_render, 'RGBA', 1)
letter_w, letter_h = letter_render.get_size()
except:
letter = None
letter_w = 0
letter_h = 0
return (letter, letter_w, letter_h)
def textView(self):
glViewport(0,0,self.w,self.h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, self.w - 1.0, 0.0, self.h - 1.0, -1.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def Print(self,s,x,y):
s = str(s)
i = 0
lx = 0
length = len(s)
self.textView()
glPushMatrix()
while i < length:
glRasterPos2i(x + lx, y)
ch = self.char[ ord( s[i] ) ]
glDrawPixels(ch[1], ch[2], GL_RGBA, GL_UNSIGNED_BYTE, ch[0])
lx += ch[1]
i += 1
glPopMatrix()
def DrawFPS(self, x, y):
self.t = time.get_ticks()
self.frames += 1
if self.t - self.t_start > 1000:
self.fps = self.frames * 1000 / (self.t - self.t_start)
self.t_start = self.t
self.frames = 0
self.Print(self.fps, x, y)
def draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
self.resize(self.w,self.h)
glTranslatef(0.0, 0.0, -6.0)
glPushMatrix()
glRotatef(self.angle, 0.0, 1.0, 0.0)
glBegin(GL_TRIANGLES)
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0.0, 1.0, 0.0)
glVertex3f(-1.0, -1.0, 0.0)
glVertex3f(1.0, -1.0, 0.0)
glEnd()
glPopMatrix()
self.DrawFPS(self.w - (self.lw * 3), self.h - self.lh)
self.angle += 1.0
def run(self):
while 1:
e = event.poll()
k = key.get_pressed()
if e.type == KEYDOWN and e.key == K_ESCAPE: break
self.draw()
display.flip()
if __name__ == '__main__':
engine = Engine(640,480)
engine.run()
font.quit()
display.quit() | mit |
jack51706/viper | modules/pehash/pehasher.py | 3 | 4059 | #!/usr/bin/python
from __future__ import division
import sys
import bz2
import string
import hashlib
try:
import pefile
HAVE_PEFILE = True
except ImportError:
HAVE_PEFILE = False
try:
import bitstring
HAVE_BITSTRING = True
except ImportError:
HAVE_BITSTRING = False
from viper.common.out import *
def calculate_pehash(file_path=None):
if not HAVE_PEFILE:
self.log('error', "Missing dependency2, install pefile (`pip install pefile`)")
return ''
if not HAVE_BITSTRING:
self.log('error', "Missing dependency2, install bitstring (`pip install bitstring`)")
return ''
if not file_path:
return ''
try:
exe = pefile.PE(file_path)
#image characteristics
img_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Characteristics))
#pad to 16 bits
img_chars = bitstring.BitArray(bytes=img_chars.tobytes())
if img_chars.len == 16:
img_chars_xor = img_chars[0:7] ^ img_chars[8:15]
else:
img_chars_xor = img_chars[0:7]
#start to build pehash
pehash_bin = bitstring.BitArray(img_chars_xor)
#subsystem -
sub_chars = bitstring.BitArray(hex(exe.FILE_HEADER.Machine))
#pad to 16 bits
sub_chars = bitstring.BitArray(bytes=sub_chars.tobytes())
sub_chars_xor = sub_chars[0:7] ^ sub_chars[8:15]
pehash_bin.append(sub_chars_xor)
#Stack Commit Size
stk_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfStackCommit))
stk_size_bits = string.zfill(stk_size.bin, 32)
#now xor the bits
stk_size = bitstring.BitArray(bin=stk_size_bits)
stk_size_xor = stk_size[8:15] ^ stk_size[16:23] ^ stk_size[24:31]
#pad to 8 bits
stk_size_xor = bitstring.BitArray(bytes=stk_size_xor.tobytes())
pehash_bin.append(stk_size_xor)
#Heap Commit Size
hp_size = bitstring.BitArray(hex(exe.OPTIONAL_HEADER.SizeOfHeapCommit))
hp_size_bits = string.zfill(hp_size.bin, 32)
#now xor the bits
hp_size = bitstring.BitArray(bin=hp_size_bits)
hp_size_xor = hp_size[8:15] ^ hp_size[16:23] ^ hp_size[24:31]
#pad to 8 bits
hp_size_xor = bitstring.BitArray(bytes=hp_size_xor.tobytes())
pehash_bin.append(hp_size_xor)
#Section chars
for section in exe.sections:
#virutal address
sect_va = bitstring.BitArray(hex(section.VirtualAddress))
sect_va = bitstring.BitArray(bytes=sect_va.tobytes())
pehash_bin.append(sect_va)
#rawsize
sect_rs = bitstring.BitArray(hex(section.SizeOfRawData))
sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
sect_rs_bits = string.zfill(sect_rs.bin, 32)
sect_rs = bitstring.BitArray(bin=sect_rs_bits)
sect_rs = bitstring.BitArray(bytes=sect_rs.tobytes())
sect_rs_bits = sect_rs[8:31]
pehash_bin.append(sect_rs_bits)
#section chars
sect_chars = bitstring.BitArray(hex(section.Characteristics))
sect_chars = bitstring.BitArray(bytes=sect_chars.tobytes())
sect_chars_xor = sect_chars[16:23] ^ sect_chars[24:31]
pehash_bin.append(sect_chars_xor)
#entropy calulation
address = section.VirtualAddress
size = section.SizeOfRawData
raw = exe.write()[address+size:]
if size == 0:
kolmog = bitstring.BitArray(float=1, length=32)
pehash_bin.append(kolmog[0:7])
continue
bz2_raw = bz2.compress(raw)
bz2_size = len(bz2_raw)
#k = round(bz2_size / size, 5)
k = bz2_size / size
kolmog = bitstring.BitArray(float=k, length=32)
pehash_bin.append(kolmog[0:7])
m = hashlib.sha1()
m.update(pehash_bin.tobytes())
return str(m.hexdigest())
except:
return ''
| bsd-3-clause |
Perferom/android_external_chromium_org | tools/checkperms/checkperms.py | 27 | 15170 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the svn:executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/components/ihfokbkgjpifnbbojhneepfflplebdkc/'
'ihfokbkgjpifnbbojhneepfflplebdkc_1/a_changing_binary_file',
'chrome/test/data/components/ihfokbkgjpifnbbojhneepfflplebdkc/'
'ihfokbkgjpifnbbojhneepfflplebdkc_2/a_changing_binary_file',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/android_testrunner/',
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/jemalloc/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_svn_info(dir_path):
"""Returns svn meta-data for a svn checkout."""
if not os.path.isdir(dir_path):
return {}
out = capture(['svn', 'info', '.', '--non-interactive'], dir_path)
return dict(l.split(': ', 1) for l in out.splitlines() if l)
def get_svn_url(dir_path):
return get_svn_info(dir_path).get('URL')
def get_svn_root(dir_path):
"""Returns the svn checkout root or None."""
svn_url = get_svn_url(dir_path)
if not svn_url:
return None
logging.info('svn url: %s' % svn_url)
while True:
parent = os.path.dirname(dir_path)
if parent == dir_path:
return None
svn_url = svn_url.rsplit('/', 1)[0]
if svn_url != get_svn_url(parent):
return dir_path
dir_path = parent
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/', data == '\x7fELF')
def check_file(root_path, rel_path, bare_output):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
if bare_output:
return full_path
return '%s: Must have executable bit set' % full_path
return
if must_not_be_executable(rel_path):
if bit:
if bare_output:
return full_path
return '%s: Must not have executable bit set' % full_path
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bare_output:
return full_path
if bit:
return '%s: Has executable bit but not shebang or ELF header' % full_path
if shebang:
return '%s: Has shebang but not executable bit' % full_path
return '%s: Has ELF header but not executable bit' % full_path
def check_files(root, files, bare_output):
errors = []
for rel_path in files:
if is_ignored(rel_path):
continue
error = check_file(root, rel_path, bare_output)
if error:
errors.append(error)
return errors
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path, self.bare_output)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiSvnQuick(ApiBase):
"""Returns all files in svn-versioned directories, independent of the fact if
they are versionned.
Uses svn info in each directory to determine which directories should be
crawled.
"""
def __init__(self, *args):
super(ApiSvnQuick, self).__init__(*args)
self.url = get_svn_url(self.root_dir)
def check_dir(self, rel_path):
url = self.url + '/' + rel_path
if get_svn_url(os.path.join(self.root_dir, rel_path)) != url:
return []
return super(ApiSvnQuick, self).check_dir(rel_path)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiSvn(ApiAllFilesAtOnceBase):
"""Returns all the subversion controlled files.
Warning: svn ls is abnormally slow.
"""
def _get_all_files(self):
cmd = ['svn', 'ls', '--non-interactive', '--recursive']
return (
x for x in capture(cmd, self.root_dir).splitlines()
if not x.endswith(os.path.sep))
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_svn_root(dir_path or cwd)
if root:
if not bare:
print('Found subversion checkout at %s' % root)
return ApiSvnQuick(dir_path or root, bare)
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
errors = check_files(options.root, options.files, options.bare)
print '\n'.join(errors)
return bool(errors)
api = get_scm(options.root, options.bare)
if args:
start_dir = args[0]
else:
start_dir = api.root_dir
errors = api.check(start_dir)
if not options.bare:
print 'Processed %s files, %d files where tested for shebang/ELF header' % (
api.count, api.count_read_header)
if errors:
if not options.bare:
print '\nFAILED\n'
print '\n'.join(errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
gaddman/ansible | lib/ansible/modules/windows/win_command.py | 28 | 3934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
- For non-Windows targets, use the M(command) module instead.
options:
free_form:
description:
- The C(win_command) module takes a free form command to run.
- There is no parameter actually named 'free form'. See the examples!
required: yes
creates:
description:
- A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
type: path
removes:
description:
- A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
type: path
chdir:
description:
- Set the specified path as the current working directory before executing a command.
type: path
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: '2.5'
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
- For non-Windows targets, use the M(command) module instead.
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
- name: Save the result of 'whoami' in 'whoami_out'
win_command: whoami
register: whoami_out
- name: Run command that only runs if folder exists and runs from a specific folder
win_command: wbadmin -backupTarget:C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
- name: Run an executable and send data to the stdin for the executable
win_command: powershell.exe -
args:
stdin: Write-Host test
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
gokuale/weblate | weblate/trans/tests/test_dictionary.py | 11 | 7785 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for dictionary manipulations.
"""
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.models import Dictionary
from django.core.urlresolvers import reverse
from weblate.trans.tests.utils import get_test_file
TEST_TBX = get_test_file('terms.tbx')
TEST_CSV = get_test_file('terms.csv')
TEST_CSV_HEADER = get_test_file('terms-header.csv')
TEST_PO = get_test_file('terms.po')
class DictionaryTest(ViewTestCase):
'''
Testing of dictionary manipulations.
'''
def get_url(self, url):
return reverse(url, kwargs={
'lang': 'cs',
'project': self.subproject.project.slug,
})
def import_file(self, filename, **kwargs):
with open(filename) as handle:
params = {'file': handle}
params.update(kwargs)
return self.client.post(
self.get_url('upload_dictionary'),
params
)
def test_import(self):
'''
Test for importing of TBX into glossary.
'''
show_url = self.get_url('show_dictionary')
# Import file
response = self.import_file(TEST_TBX)
# Check correct response
self.assertRedirects(response, show_url)
# Check number of imported objects
self.assertEqual(Dictionary.objects.count(), 164)
# Check they are shown
response = self.client.get(show_url)
self.assertContains(response, u'podpůrná vrstva')
# Change single word
word = Dictionary.objects.get(target=u'podpůrná vrstva')
word.target = u'zkouška sirén'
word.save()
# Import file again with orverwriting
response = self.import_file(TEST_TBX, method='overwrite')
# Check number of imported objects
self.assertEqual(Dictionary.objects.count(), 164)
# Check entry got overwritten
response = self.client.get(show_url)
self.assertContains(response, u'podpůrná vrstva')
# Change single word
word = Dictionary.objects.get(target=u'podpůrná vrstva')
word.target = u'zkouška sirén'
word.save()
# Import file again with adding
response = self.import_file(TEST_TBX, method='add')
# Check number of imported objects
self.assertEqual(Dictionary.objects.count(), 165)
def test_import_csv(self):
# Import file
response = self.import_file(TEST_CSV)
# Check correct response
self.assertRedirects(response, self.get_url('show_dictionary'))
response = self.client.get(self.get_url('show_dictionary'))
# Check number of imported objects
self.assertEqual(Dictionary.objects.count(), 164)
def test_import_csv_header(self):
# Import file
response = self.import_file(TEST_CSV_HEADER)
# Check correct response
self.assertRedirects(response, self.get_url('show_dictionary'))
# Check number of imported objects
self.assertEqual(Dictionary.objects.count(), 164)
def test_import_po(self):
# Import file
response = self.import_file(TEST_PO)
# Check correct response
self.assertRedirects(response, self.get_url('show_dictionary'))
# Check number of imported objects
self.assertEqual(Dictionary.objects.count(), 164)
def test_edit(self):
'''
Test for manually adding words to glossary.
'''
show_url = self.get_url('show_dictionary')
edit_url = self.get_url('edit_dictionary')
delete_url = self.get_url('delete_dictionary')
# Add word
response = self.client.post(
show_url,
{'source': 'source', 'target': u'překlad'}
)
# Check correct response
self.assertRedirects(response, show_url)
# Check number of objects
self.assertEqual(Dictionary.objects.count(), 1)
dict_id = Dictionary.objects.all()[0].id
dict_id_url = '?id=%d' % dict_id
# Check they are shown
response = self.client.get(show_url)
self.assertContains(response, u'překlad')
# Edit page
response = self.client.get(edit_url + dict_id_url)
self.assertContains(response, u'překlad')
# Edit translation
response = self.client.post(
edit_url + dict_id_url,
{'source': 'src', 'target': u'přkld'}
)
self.assertRedirects(response, show_url)
# Check they are shown
response = self.client.get(show_url)
self.assertContains(response, u'přkld')
# Test deleting
response = self.client.post(delete_url, {'id': dict_id})
self.assertRedirects(response, show_url)
# Check number of objects
self.assertEqual(Dictionary.objects.count(), 0)
def test_download_csv(self):
'''
Test for downloading CVS file.
'''
# Import test data
self.import_file(TEST_TBX)
response = self.client.get(
self.get_url('download_dictionary'),
{'format': 'csv'}
)
self.assertContains(
response,
u'addon,doplněk'
)
def test_download_tbx(self):
'''
Test for downloading TBX file.
'''
# Import test data
self.import_file(TEST_TBX)
response = self.client.get(
self.get_url('download_dictionary'),
{'format': 'tbx'}
)
self.assertContains(
response,
u'<term>website</term>'
)
self.assertContains(
response,
u'<term>webové stránky</term>'
)
def test_download_po(self):
'''
Test for downloading PO file.
'''
# Import test data
self.import_file(TEST_TBX)
response = self.client.get(
self.get_url('download_dictionary'),
{'format': 'po'}
)
self.assertContains(
response,
u'msgid "wizard"\nmsgstr "průvodce"'
)
def test_list(self):
'''
Test for listing dictionaries.
'''
self.import_file(TEST_TBX)
# List dictionaries
response = self.client.get(reverse(
'show_dictionaries',
kwargs=self.kw_project
))
self.assertContains(response, 'Czech')
self.assertContains(response, 'Italian')
dict_url = self.get_url('show_dictionary')
# List all words
response = self.client.get(dict_url)
self.assertContains(response, 'Czech')
self.assertContains(response, '1 / 7')
self.assertContains(response, u'datový tok')
# Filtering by letter
response = self.client.get(dict_url, {'letter': 'b'})
self.assertContains(response, 'Czech')
self.assertContains(response, '1 / 1')
self.assertContains(response, u'datový tok')
| gpl-3.0 |
tomkun/stem | stem/util/term.py | 7 | 3027 | # Copyright 2011-2013, Damian Johnson
# See LICENSE for licensing information
"""
Utilities for working with the terminal.
**Module Overview:**
::
format - wrap text with ANSI for the given colors or attributes
.. data:: Color (enum)
.. data:: BgColor (enum)
Enumerations for foreground or background terminal color.
=========== ===========
Color Description
=========== ===========
**BLACK** black color
**BLUE** blue color
**CYAN** cyan color
**GREEN** green color
**MAGENTA** magenta color
**RED** red color
**WHITE** white color
**YELLOW** yellow color
=========== ===========
.. data:: Attr (enum)
Enumerations of terminal text attributes.
============= ===========
Attr Description
============= ===========
**BOLD** heavy typeface
**HILIGHT** inverted foreground and background
**UNDERLINE** underlined text
============= ===========
"""
import stem.util.enum
import stem.util.str_tools
TERM_COLORS = ("BLACK", "RED", "GREEN", "YELLOW", "BLUE", "MAGENTA", "CYAN", "WHITE")
Color = stem.util.enum.Enum(*TERM_COLORS)
BgColor = stem.util.enum.Enum(*["BG_" + color for color in TERM_COLORS])
Attr = stem.util.enum.Enum("BOLD", "UNDERLINE", "HILIGHT")
# mappings of terminal attribute enums to their ANSI escape encoding
FG_ENCODING = dict([(list(Color)[i], str(30 + i)) for i in range(8)])
BG_ENCODING = dict([(list(BgColor)[i], str(40 + i)) for i in range(8)])
ATTR_ENCODING = {Attr.BOLD: "1", Attr.UNDERLINE: "4", Attr.HILIGHT: "7"}
CSI = "\x1B[%sm"
RESET = CSI % "0"
def format(msg, *attr):
"""
Simple terminal text formatting using `ANSI escape sequences
<https://secure.wikimedia.org/wikipedia/en/wiki/ANSI_escape_code#CSI_codes>`_.
The following are some toolkits providing similar capabilities:
* `django.utils.termcolors <https://code.djangoproject.com/browser/django/trunk/django/utils/termcolors.py>`_
* `termcolor <http://pypi.python.org/pypi/termcolor>`_
* `colorama <http://pypi.python.org/pypi/colorama>`_
:param str msg: string to be formatted
:param str attr: text attributes, this can be :data:`~stem.util.term.Color`, :data:`~stem.util.term.BgColor`, or :data:`~stem.util.term.Attr` enums
and are case insensitive (so strings like "red" are fine)
:returns: **str** wrapped with ANSI escape encodings, starting with the given
attributes and ending with a reset
"""
# if we have reset sequences in the message then apply our attributes
# after each of them
if RESET in msg:
return "".join([format(comp, *attr) for comp in msg.split(RESET)])
encodings = []
for text_attr in attr:
text_attr, encoding = stem.util.str_tools._to_camel_case(text_attr), None
encoding = FG_ENCODING.get(text_attr, encoding)
encoding = BG_ENCODING.get(text_attr, encoding)
encoding = ATTR_ENCODING.get(text_attr, encoding)
if encoding:
encodings.append(encoding)
if encodings:
return (CSI % ";".join(encodings)) + msg + RESET
else:
return msg
| lgpl-3.0 |
GiovanniConserva/TestDeploy | venv/Lib/encodings/utf_16_le.py | 860 | 1037 | """ Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
google/hyou | test/worksheet_test.py | 1 | 2592 | # Copyright 2015 Google Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import unittest
import hyou.api
import hyou.collection
import hyou.util
import http_mocks
class WorksheetReadOnlyTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = hyou.api.API(
http_mocks.ReplayHttp('unittest-sheets.json'),
discovery=False)
def setUp(self):
self.collection = hyou.collection.Collection(self.api)
self.spreadsheet = self.collection[
'18OLN5A2SSKAeYLXw4SnZxU1yRJnMdf_ZCjc0D2UdhX8']
self.worksheet1 = self.spreadsheet['Sheet1']
def test_title(self):
self.assertEqual('Sheet1', self.worksheet1.title)
def test_rows(self):
self.assertEqual(2, self.worksheet1.rows)
def test_cols(self):
self.assertEqual(5, self.worksheet1.cols)
def test_repr(self):
self.assertEqual(str('Worksheet(key=0)'), repr(self.worksheet1))
def test_view(self):
self.worksheet1.view(start_row=3)
self.worksheet1.view(end_row=-1)
self.worksheet1.view(start_row=1, end_row=0)
self.worksheet1.view(start_col=6)
self.worksheet1.view(end_col=-1)
self.worksheet1.view(start_col=1, end_col=0)
class WorksheetReadWriteTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = hyou.api.API(
http_mocks.ReplayHttp('unittest-sheets.json'),
discovery=False)
def setUp(self):
self.collection = hyou.collection.Collection(self.api)
self.spreadsheet = self.collection[
'1z5eYrVoLP-RUWdzeqUShRc2VPFX0SUCTlHMmUS0K8Lo']
self.worksheet1 = self.spreadsheet['Sheet1']
def test_set_title(self):
self.worksheet1.title = 'Sheet1'
def test_set_size(self):
self.worksheet1.set_size(2, 5)
def test_set_rows(self):
self.worksheet1.rows = 2
def test_set_cols(self):
self.worksheet1.cols = 5
| apache-2.0 |
Nuclearfossil/ATF | Test/FunctionalTests/CircuitEditorTestScripts/EditSaveCloseAndReopen.py | 10 | 3356 | #Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import System
import Test
import CircuitEditorUtil
doc = atfDocService.OpenNewDocument(editor)
CircuitEditorUtil.SetGlobals(schemaLoader, Schema)
modules = []
annotations = []
connections = []
print "Adding annotations"
comment = editingContext.Insert[Annotation](DomNode(Schema.annotationType.Type), 300, 100)
editingContext.SetProperty(comment.DomNode, Schema.annotationType.textAttribute, "I am a comment")
comment2 = editingContext.Insert[Annotation](DomNode(Schema.annotationType.Type), 400, 100)
editingContext.SetProperty(comment2.DomNode, Schema.annotationType.textAttribute, "!@#$%^&*()_+<>/.,;[]\\")
print "Adding modules"
btn = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "benjamin button"), 100, 100)
light = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "lights out"), 200, 100)
sound = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("soundType", "like a lion in zion"), 100, 200)
speaker = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("speakerType", "speakeazy"), 200, 200)
btn2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn2"), 100, 300)
btn3 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("buttonType", "btn3"), 100, 400)
andObj = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("andType", "andONE"), 200, 300)
orObj = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("orType", "orca"), 200, 400)
light2 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "light2"), 300, 300)
light3 = editingContext.Insert[Module](CircuitEditorUtil.CreateModuleNode("lightType", "light3"), 300, 400)
print "Adding connections"
btnToLight = editingContext.Connect(btn, btn.Type.Outputs[0], light, light.Type.Inputs[0], None)
soundToSpeaker = editingContext.Connect(sound, sound.Type.Outputs[0], speaker, speaker.Type.Inputs[0], None)
btn2ToAnd = editingContext.Connect(btn2, btn2.Type.Outputs[0], andObj, andObj.Type.Inputs[0], None)
btn2ToOr = editingContext.Connect(btn2, btn2.Type.Outputs[0], orObj, orObj.Type.Inputs[0], None)
btn3ToAnd = editingContext.Connect(btn3, btn3.Type.Outputs[0], andObj, andObj.Type.Inputs[0], None)
btn3ToOr = editingContext.Connect(btn3, btn3.Type.Outputs[0], orObj, orObj.Type.Inputs[0], None)
btn2ToAnd = editingContext.Connect(btn2, btn2.Type.Outputs[0], andObj, andObj.Type.Inputs[0], None)
andToLight2 = editingContext.Connect(andObj, andObj.Type.Outputs[0], light2, light2.Type.Inputs[0], None)
orToLight3 = editingContext.Connect(orObj, orObj.Type.Outputs[0], light3, light3.Type.Inputs[0], None)
for annotation in circuitContainer.Annotations:
annotations.append(annotation)
for module in circuitContainer.Elements:
modules.append(module)
for connection in circuitContainer.Wires:
connections.append(connection)
filePath = Test.GetNewFilePath("EditAndSave.circuit")
atfFile.SaveAs(doc,Uri(filePath) )
Test.True(File.Exists(filePath), "Verify file saved")
atfFile.Close(doc)
docNew = atfFile.OpenExistingDocument(editor, Uri(filePath))
CircuitEditorUtil.VerifyCircuit(circuitContainer, modules, annotations, connections)
print Test.SUCCESS
| apache-2.0 |
nealtodd/wagtail | wagtail/embeds/finders/embedly.py | 16 | 2010 | from wagtail.embeds.exceptions import EmbedException, EmbedNotFoundException
from .base import EmbedFinder
class EmbedlyException(EmbedException):
pass
class AccessDeniedEmbedlyException(EmbedlyException):
pass
class EmbedlyFinder(EmbedFinder):
key = None
def __init__(self, key=None):
if key:
self.key = key
def get_key(self):
return self.key
def accept(self, url):
# We don't really know what embedly supports so accept everything
return True
def find_embed(self, url, max_width=None, key=None):
from embedly import Embedly
# Get embedly key
if key is None:
key = self.get_key()
# Get embedly client
client = Embedly(key=key)
# Call embedly
if max_width is not None:
oembed = client.oembed(url, maxwidth=max_width, better=False)
else:
oembed = client.oembed(url, better=False)
# Check for error
if oembed.get('error'):
if oembed['error_code'] in [401, 403]:
raise AccessDeniedEmbedlyException
elif oembed['error_code'] == 404:
raise EmbedNotFoundException
else:
raise EmbedlyException
# Convert photos into HTML
if oembed['type'] == 'photo':
html = '<img src="%s" alt="">' % (oembed['url'], )
else:
html = oembed.get('html')
# Return embed as a dict
return {
'title': oembed['title'] if 'title' in oembed else '',
'author_name': oembed['author_name'] if 'author_name' in oembed else '',
'provider_name': oembed['provider_name'] if 'provider_name' in oembed else '',
'type': oembed['type'],
'thumbnail_url': oembed.get('thumbnail_url'),
'width': oembed.get('width'),
'height': oembed.get('height'),
'html': html,
}
embed_finder_class = EmbedlyFinder
| bsd-3-clause |
BeATz-UnKNoWN/python-for-android | python-modules/twisted/twisted/test/proto_helpers.py | 56 | 15308 | # -*- test-case-name: twisted.test.test_stringtransport -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Assorted functionality which is commonly useful when writing unit tests.
"""
from StringIO import StringIO
from zope.interface import implements
from twisted.internet.interfaces import ITransport, IConsumer, IPushProducer,\
IConnector
from twisted.internet.interfaces import IReactorTCP, IReactorSSL, IReactorUNIX
from twisted.internet.interfaces import IListeningPort
from twisted.protocols import basic
from twisted.internet import protocol, error, address
from twisted.internet.address import IPv4Address, UNIXAddress
class AccumulatingProtocol(protocol.Protocol):
"""
L{AccumulatingProtocol} is an L{IProtocol} implementation which collects
the data delivered to it and can fire a Deferred when it is connected or
disconnected.
@ivar made: A flag indicating whether C{connectionMade} has been called.
@ivar data: A string giving all the data passed to C{dataReceived}.
@ivar closed: A flag indicated whether C{connectionLost} has been called.
@ivar closedReason: The value of the I{reason} parameter passed to
C{connectionLost}.
@ivar closedDeferred: If set to a L{Deferred}, this will be fired when
C{connectionLost} is called.
"""
made = closed = 0
closedReason = None
closedDeferred = None
data = ""
factory = None
def connectionMade(self):
self.made = 1
if (self.factory is not None and
self.factory.protocolConnectionMade is not None):
d = self.factory.protocolConnectionMade
self.factory.protocolConnectionMade = None
d.callback(self)
def dataReceived(self, data):
self.data += data
def connectionLost(self, reason):
self.closed = 1
self.closedReason = reason
if self.closedDeferred is not None:
d, self.closedDeferred = self.closedDeferred, None
d.callback(None)
class LineSendingProtocol(basic.LineReceiver):
lostConn = False
def __init__(self, lines, start = True):
self.lines = lines[:]
self.response = []
self.start = start
def connectionMade(self):
if self.start:
map(self.sendLine, self.lines)
def lineReceived(self, line):
if not self.start:
map(self.sendLine, self.lines)
self.lines = []
self.response.append(line)
def connectionLost(self, reason):
self.lostConn = True
class FakeDatagramTransport:
noAddr = object()
def __init__(self):
self.written = []
def write(self, packet, addr=noAddr):
self.written.append((packet, addr))
class StringTransport:
"""
A transport implementation which buffers data in memory and keeps track of
its other state without providing any behavior.
L{StringTransport} has a number of attributes which are not part of any of
the interfaces it claims to implement. These attributes are provided for
testing purposes. Implementation code should not use any of these
attributes; they are not provided by other transports.
@ivar disconnecting: A C{bool} which is C{False} until L{loseConnection} is
called, then C{True}.
@ivar producer: If a producer is currently registered, C{producer} is a
reference to it. Otherwise, C{None}.
@ivar streaming: If a producer is currently registered, C{streaming} refers
to the value of the second parameter passed to C{registerProducer}.
@ivar hostAddr: C{None} or an object which will be returned as the host
address of this transport. If C{None}, a nasty tuple will be returned
instead.
@ivar peerAddr: C{None} or an object which will be returned as the peer
address of this transport. If C{None}, a nasty tuple will be returned
instead.
@ivar producerState: The state of this L{StringTransport} in its capacity
as an L{IPushProducer}. One of C{'producing'}, C{'paused'}, or
C{'stopped'}.
@ivar io: A L{StringIO} which holds the data which has been written to this
transport since the last call to L{clear}. Use L{value} instead of
accessing this directly.
"""
implements(ITransport, IConsumer, IPushProducer)
disconnecting = False
producer = None
streaming = None
hostAddr = None
peerAddr = None
producerState = 'producing'
def __init__(self, hostAddress=None, peerAddress=None):
self.clear()
if hostAddress is not None:
self.hostAddr = hostAddress
if peerAddress is not None:
self.peerAddr = peerAddress
self.connected = True
def clear(self):
"""
Discard all data written to this transport so far.
This is not a transport method. It is intended for tests. Do not use
it in implementation code.
"""
self.io = StringIO()
def value(self):
"""
Retrieve all data which has been buffered by this transport.
This is not a transport method. It is intended for tests. Do not use
it in implementation code.
@return: A C{str} giving all data written to this transport since the
last call to L{clear}.
@rtype: C{str}
"""
return self.io.getvalue()
# ITransport
def write(self, data):
if isinstance(data, unicode): # no, really, I mean it
raise TypeError("Data must not be unicode")
self.io.write(data)
def writeSequence(self, data):
self.io.write(''.join(data))
def loseConnection(self):
"""
Close the connection. Does nothing besides toggle the C{disconnecting}
instance variable to C{True}.
"""
self.disconnecting = True
def getPeer(self):
if self.peerAddr is None:
return address.IPv4Address('TCP', '192.168.1.1', 54321)
return self.peerAddr
def getHost(self):
if self.hostAddr is None:
return address.IPv4Address('TCP', '10.0.0.1', 12345)
return self.hostAddr
# IConsumer
def registerProducer(self, producer, streaming):
if self.producer is not None:
raise RuntimeError("Cannot register two producers")
self.producer = producer
self.streaming = streaming
def unregisterProducer(self):
if self.producer is None:
raise RuntimeError(
"Cannot unregister a producer unless one is registered")
self.producer = None
self.streaming = None
# IPushProducer
def _checkState(self):
if self.disconnecting:
raise RuntimeError(
"Cannot resume producing after loseConnection")
if self.producerState == 'stopped':
raise RuntimeError("Cannot resume a stopped producer")
def pauseProducing(self):
self._checkState()
self.producerState = 'paused'
def stopProducing(self):
self.producerState = 'stopped'
def resumeProducing(self):
self._checkState()
self.producerState = 'producing'
class StringTransportWithDisconnection(StringTransport):
def loseConnection(self):
if self.connected:
self.connected = False
self.protocol.connectionLost(error.ConnectionDone("Bye."))
class StringIOWithoutClosing(StringIO):
"""
A StringIO that can't be closed.
"""
def close(self):
"""
Do nothing.
"""
class _FakePort(object):
"""
A fake L{IListeningPort} to be used in tests.
@ivar _hostAddress: The L{IAddress} this L{IListeningPort} is pretending
to be listening on.
"""
implements(IListeningPort)
def __init__(self, hostAddress):
"""
@param hostAddress: An L{IAddress} this L{IListeningPort} should
pretend to be listening on.
"""
self._hostAddress = hostAddress
def startListening(self):
"""
Fake L{IListeningPort.startListening} that doesn't do anything.
"""
def stopListening(self):
"""
Fake L{IListeningPort.stopListening} that doesn't do anything.
"""
def getHost(self):
"""
Fake L{IListeningPort.getHost} that returns our L{IAddress}.
"""
return self._hostAddress
class _FakeConnector(object):
"""
A fake L{IConnector} that allows us to inspect if it has been told to stop
connecting.
@ivar stoppedConnecting: has this connector's
L{FakeConnector.stopConnecting} method been invoked yet?
@ivar _address: An L{IAddress} provider that represents our destination.
"""
implements(IConnector)
stoppedConnecting = False
def __init__(self, address):
"""
@param address: An L{IAddress} provider that represents this
connector's destination.
"""
self._address = address
def stopConnecting(self):
"""
Implement L{IConnector.stopConnecting} and set
L{FakeConnector.stoppedConnecting} to C{True}
"""
self.stoppedConnecting = True
def disconnect(self):
"""
Implement L{IConnector.disconnect} as a no-op.
"""
def connect(self):
"""
Implement L{IConnector.connect} as a no-op.
"""
def getDestination(self):
"""
Implement L{IConnector.getDestination} to return the C{address} passed
to C{__init__}.
"""
return self._address
class MemoryReactor(object):
"""
A fake reactor to be used in tests. This reactor doesn't actually do
much that's useful yet. It accepts TCP connection setup attempts, but
they will never succeed.
@ivar tcpClients: a list that keeps track of connection attempts (ie, calls
to C{connectTCP}).
@type tcpClients: C{list}
@ivar tcpServers: a list that keeps track of server listen attempts (ie, calls
to C{listenTCP}).
@type tcpServers: C{list}
@ivar sslClients: a list that keeps track of connection attempts (ie,
calls to C{connectSSL}).
@type sslClients: C{list}
@ivar sslServers: a list that keeps track of server listen attempts (ie,
calls to C{listenSSL}).
@type sslServers: C{list}
@ivar unixClients: a list that keeps track of connection attempts (ie,
calls to C{connectUNIX}).
@type unixClients: C{list}
@ivar unixServers: a list that keeps track of server listen attempts (ie,
calls to C{listenUNIX}).
@type unixServers: C{list}
"""
implements(IReactorTCP, IReactorSSL, IReactorUNIX)
def __init__(self):
"""
Initialize the tracking lists.
"""
self.tcpClients = []
self.tcpServers = []
self.sslClients = []
self.sslServers = []
self.unixClients = []
self.unixServers = []
def listenTCP(self, port, factory, backlog=50, interface=''):
"""
Fake L{reactor.listenTCP}, that logs the call and returns an
L{IListeningPort}.
"""
self.tcpServers.append((port, factory, backlog, interface))
return _FakePort(IPv4Address('TCP', '0.0.0.0', port))
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
Fake L{reactor.connectTCP}, that logs the call and returns an
L{IConnector}.
"""
self.tcpClients.append((host, port, factory, timeout, bindAddress))
return _FakeConnector(IPv4Address('TCP', host, port))
def listenSSL(self, port, factory, contextFactory,
backlog=50, interface=''):
"""
Fake L{reactor.listenSSL}, that logs the call and returns an
L{IListeningPort}.
"""
self.sslServers.append((port, factory, contextFactory,
backlog, interface))
return _FakePort(IPv4Address('TCP', '0.0.0.0', port))
def connectSSL(self, host, port, factory, contextFactory,
timeout=30, bindAddress=None):
"""
Fake L{reactor.connectSSL}, that logs the call and returns an
L{IConnector}.
"""
self.sslClients.append((host, port, factory, contextFactory,
timeout, bindAddress))
return _FakeConnector(IPv4Address('TCP', host, port))
def listenUNIX(self, address, factory,
backlog=50, mode=0666, wantPID=0):
"""
Fake L{reactor.listenUNIX}, that logs the call and returns an
L{IListeningPort}.
"""
self.unixServers.append((address, factory, backlog, mode, wantPID))
return _FakePort(UNIXAddress(address))
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""
Fake L{reactor.connectUNIX}, that logs the call and returns an
L{IConnector}.
"""
self.unixClients.append((address, factory, timeout, checkPID))
return _FakeConnector(UNIXAddress(address))
class RaisingMemoryReactor(object):
"""
A fake reactor to be used in tests. It accepts TCP connection setup
attempts, but they will fail.
@ivar _listenException: An instance of an L{Exception}
@ivar _connectException: An instance of an L{Exception}
"""
implements(IReactorTCP, IReactorSSL, IReactorUNIX)
def __init__(self, listenException=None, connectException=None):
"""
@param listenException: An instance of an L{Exception} to raise when any
C{listen} method is called.
@param connectException: An instance of an L{Exception} to raise when
any C{connect} method is called.
"""
self._listenException = listenException
self._connectException = connectException
def listenTCP(self, port, factory, backlog=50, interface=''):
"""
Fake L{reactor.listenTCP}, that raises L{self._listenException}.
"""
raise self._listenException
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
Fake L{reactor.connectTCP}, that raises L{self._connectException}.
"""
raise self._connectException
def listenSSL(self, port, factory, contextFactory,
backlog=50, interface=''):
"""
Fake L{reactor.listenSSL}, that raises L{self._listenException}.
"""
raise self._listenException
def connectSSL(self, host, port, factory, contextFactory,
timeout=30, bindAddress=None):
"""
Fake L{reactor.connectSSL}, that raises L{self._connectException}.
"""
raise self._connectException
def listenUNIX(self, address, factory,
backlog=50, mode=0666, wantPID=0):
"""
Fake L{reactor.listenUNIX}, that raises L{self._listenException}.
"""
raise self._listenException
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""
Fake L{reactor.connectUNIX}, that raises L{self._connectException}.
"""
raise self._connectException
| apache-2.0 |
S01780/python-social-auth | social/tests/backends/test_foursquare.py | 92 | 4108 | import json
from social.tests.backends.oauth import OAuth2Test
class FoursquareOAuth2Test(OAuth2Test):
backend_path = 'social.backends.foursquare.FoursquareOAuth2'
user_data_url = 'https://api.foursquare.com/v2/users/self'
expected_username = 'FooBar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'notifications': [{
'item': {
'unreadCount': 0
},
'type': 'notificationTray'
}],
'meta': {
'errorType': 'deprecated',
'code': 200,
'errorDetail': 'Please provide an API version to avoid future '
'errors.See http://bit.ly/vywCav'
},
'response': {
'user': {
'photo': 'https://is0.4sqi.net/userpix_thumbs/'
'BYKIT01VN4T4BISN.jpg',
'pings': False,
'homeCity': 'Foo, Bar',
'id': '1010101',
'badges': {
'count': 0,
'items': []
},
'friends': {
'count': 1,
'groups': [{
'count': 0,
'items': [],
'type': 'friends',
'name': 'Mutual friends'
}, {
'count': 1,
'items': [{
'bio': '',
'gender': 'male',
'firstName': 'Baz',
'relationship': 'friend',
'photo': 'https://is0.4sqi.net/userpix_thumbs/'
'BYKIT01VN4T4BISN.jpg',
'lists': {
'groups': [{
'count': 1,
'items': [],
'type': 'created'
}]
},
'homeCity': 'Baz, Qux',
'lastName': 'Qux',
'tips': {
'count': 0
},
'id': '10101010'
}],
'type': 'others',
'name': 'Other friends'
}]
},
'referralId': 'u-1010101',
'tips': {
'count': 0
},
'type': 'user',
'todos': {
'count': 0
},
'bio': '',
'relationship': 'self',
'lists': {
'groups': [{
'count': 1,
'items': [],
'type': 'created'
}]
},
'photos': {
'count': 0,
'items': []
},
'checkinPings': 'off',
'scores': {
'max': 0,
'checkinsCount': 0,
'goal': 50,
'recent': 0
},
'checkins': {
'count': 0
},
'firstName': 'Foo',
'gender': 'male',
'contact': {
'email': '[email protected]'
},
'lastName': 'Bar',
'following': {
'count': 0
},
'requests': {
'count': 0
},
'mayorships': {
'count': 0,
'items': []
}
}
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| bsd-3-clause |
adamginsburg/APEX_CMZ_H2CO | reduction/individual_cube_making.py | 2 | 2041 | import socket
import inspect, os
dirpath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
if 'eso-macbook' in socket.gethostname():
execfile(os.path.join(dirpath,'run_pipeline_cyg.py'))
elif 'cleese' in socket.gethostname():
execfile(os.path.join(dirpath,'run_pipeline_cleese.py'))
else:
raise ValueError("Machine {0} not recognized.".format(socket.gethostname()))
try:
label = "_v"+subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
except CalledProcessError:
label = ""
logfile = ("".join([time.strftime("apexcmzpipeline{0}_%y_%m_%d_%H:%M:%S"),".log"])).format(label)
with log.log_to_file(logfile):
for dataset in make_apex_cubes.datasets_2014:
mapnames = make_apex_cubes.datasets_2014[dataset]
ds = os.path.split(dataset)[1]
for mapname in mapnames:
make_apex_cubes.build_cube_2014(mapname,
datapath=make_apex_cubes.april2014path,
outpath=make_apex_cubes.april2014path,
lowhigh='low', pca_clean=True,
pcakwargs={}, datasets=[ds])
for dataset in make_apex_cubes.datasets_2013:
make_apex_cubes.build_cube_2013(datapath=make_apex_cubes.june2013datapath,
outpath=make_apex_cubes.june2013path,
lowhigh='low', pca_clean=True,
pcakwargs={}, datasets=[dataset],
extra_suffix=dataset[-5:])
for dataset in make_apex_cubes.datasets_ao:
make_apex_cubes.build_cube_ao(window='high', datasets=[dataset],
datapath=make_apex_cubes.aorawpath,
outpath=make_apex_cubes.aopath,
timewise_pca=True, pca_clean=True,
freq=True,)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.