repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sbusso/rethinkdb
|
external/v8_3.30.33.16/tools/testrunner/objects/output.py
|
105
|
2443
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import signal
from ..local import utils
class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.timed_out:
return False
return (self.exit_code < 0 and
self.exit_code != -signal.SIGABRT)
def HasTimedOut(self):
return self.timed_out
def Pack(self):
return [self.exit_code, self.timed_out, self.stdout, self.stderr]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Output(packed[0], packed[1], packed[2], packed[3])
|
agpl-3.0
|
yongshengwang/hue
|
desktop/core/ext-py/Django-1.6.10/build/lib/django/test/_doctest.py
|
110
|
103163
|
# This is a slightly modified version of the doctest.py that shipped with Python 2.5
# It incorporates changes that have been submitted to the Python ticket tracker
# as ticket #1521051. These changes allow for a DoctestRunner and Doctest base
# class to be specified when constructing a DoctestSuite.
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
import warnings
warnings.warn(
"The django.test._doctest module is deprecated; "
"use the doctest module from the Python standard library instead.",
PendingDeprecationWarning)
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from django.utils import six
from django.utils.six.moves import StringIO, xrange
if sys.platform.startswith('java'):
# On Jython, isclass() reports some modules as classes. Patch it.
def patch_isclass(isclass):
def patched_isclass(obj):
return isclass(obj) and hasattr(obj, '__module__')
return patched_isclass
inspect.isclass = patch_isclass(inspect.isclass)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, six.string_types):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename) as fp:
return fp.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, six.string_types), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def _cmpkey(self):
return (self.name, self.filename, self.lineno, id(self))
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp(self._cmpkey(), other._cmpkey())
def __lt__(self, other):
return self._cmpkey() < other._cmpkey()
def __le__(self, other):
return self._cmpkey() <= other._cmpkey()
def __gt__(self, other):
return self._cmpkey() > other._cmpkey()
def __ge__(self, other):
return self._cmpkey() >= other._cmpkey()
def __eq__(self, other):
return self._cmpkey() == other._cmpkey()
def __ne__(self, other):
return self._cmpkey() != other._cmpkey()
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value if its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is six.get_function_globals(object)
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, six.string_types):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, six.string_types)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, six.string_types):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, six.string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = six.get_function_code(obj)
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print(runner.run(test))
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Doctest and Py3 issue:
# If the current example that we wish to run is going to fail
# because it expects a leading u"", then use an alternate displayhook
original_displayhook = sys.displayhook
if six.PY3:
# only set alternate displayhook if Python 3.x or after
lines = []
def py3_displayhook(value):
if value is None:
# None should not be considered at all
return original_displayhook(value)
# Collect the repr output in one variable
s = repr(value)
# Strip b"" and u"" prefixes from the repr and expected output
# TODO: better way of stripping the prefixes?
expected = example.want
expected = expected.strip() # be wary of newlines
s = s.replace("u", "")
s = s.replace("b", "")
expected = expected.replace("u", "")
expected = expected.replace("b", "")
# single quote vs. double quote should not matter
# default all quote marks to double quote
s = s.replace("'", '"')
expected = expected.replace("'", '"')
# In case of multi-line expected result
lines.append(s)
# let them match
if s == expected: # be wary of false positives here
# they should be the same, print expected value
sys.stdout.write("%s\n" % example.want.strip())
# multi-line expected output, doctest uses loop
elif len(expected.split("\n")) == len(lines):
if "\n".join(lines) == expected:
sys.stdout.write("%s\n" % example.want.strip())
else:
sys.stdout.write("%s\n" % repr(value))
elif len(expected.split("\n")) != len(lines):
# we are not done looping yet, do not print anything!
pass
else:
sys.stdout.write("%s\n" % repr(value))
sys.displayhook = py3_displayhook
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
six.exec_(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
finally:
# restore the original displayhook
sys.displayhook = original_displayhook
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
# Python 3.1 requires seek after truncate
self._fakeout.seek(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if six.PY3:
# module name will be in group(1) and the expected
# exception message will be in group(2)
m = re.match(r'(.*)\.(\w+:.+\s)', exc_msg)
# make sure there's a match
if m != None:
f_name = m.group(1)
# check to see if m.group(1) contains the module name
if f_name == exception[0].__module__:
# strip the module name from exc_msg
exc_msg = m.group(2)
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print("%d items had no tests:" % len(notests))
notests.sort()
for thing in notests:
print(" %s" % thing)
if passed:
print("%d items passed all tests:" % len(passed))
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print("%d items had failures:" % len(failed))
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print("%d tests in % d items" % (len(self._name2ft), totalt))
print("%d passed and %d failed." % (totalt - totalf, totalf))
if totalf:
print("***Test Failed*** %d failures." % totalf)
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as e:
... failure = e
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as e:
... failure = e
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string %s" % name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print("%s of %s examples failed in string %s" % (f, t, name))
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, runner=DocTestRunner):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
self._dt_runner = runner
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = self._dt_runner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as e:
... failure = e
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as e:
... failure = e
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
test_class=DocTestCase, **options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(test_class(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
with open(srcfilename, 'w') as fp:
fp.write(src)
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(range(1000)) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(xrange(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
|
apache-2.0
|
kirca/odoo
|
openerpcommand/call.py
|
16
|
1241
|
"""
Call an arbitrary model's method.
"""
import ast
import os
import pprint
import sys
import time
import xmlrpclib
import client
class Call(client.Client):
"""\
Call an arbitrary model's method.
Example:
> oe call res.users.read '[1, 3]' '[]' -u 1 -p admin
"""
# TODO The above docstring is completely borked in the
# --help message.
command_name = 'call'
def __init__(self, subparsers=None):
super(Call, self).__init__(subparsers)
self.parser.add_argument('call', metavar='MODEL.METHOD',
help='the model and the method to call, using the '
'<model>.<method> format.')
self.parser.add_argument('args', metavar='ARGUMENT',
nargs='+',
help='the argument for the method call, must be '
'`ast.literal_eval` compatible. Can be repeated.')
def work(self):
try:
model, method = self.args.call.rsplit('.', 1)
except:
print "Invalid syntax `%s` must have the form <model>.<method>."
sys.exit(1)
args = tuple(map(ast.literal_eval, self.args.args)) if self.args.args else ()
x = self.execute(model, method, *args)
pprint.pprint(x, indent=4)
|
agpl-3.0
|
cuemacro/findatapy
|
findatapy/util/dataconstants.py
|
1
|
11166
|
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
DataConstants
Has various constants required for the findatapy project. These have been defined as static variables.
"""
import os
import keyring
def path_join(folder, file):
if 's3://' in folder:
if folder[-1] == '/':
return folder + file
else:
return folder + '/' + file
else:
return os.path.join(folder, file)
def key_store(service_name):
key = None
# this will fail on some cloud notebook platforms so put in try/except loop
try:
key = keyring.get_password(service_name, os.getlogin())
except:
pass
# set the keys by running set_api_keys.py file!
# if key is None:
# key = input("Please enter the %s API key: " % service_name)
#
# keyring.set_password(service_name, os.getlogin(), key)
return key
class DataConstants(object):
###### SHOULD AUTODETECT FOLDER
root_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))).replace('\\', '/')
temp_folder = root_folder + "temp"
###### FOR FUTURE VERSIONS (which include caching)
# Folders for holding market data
folder_historic_CSV = "x:/"
folder_time_series_data = "x:/"
# Usually the data folder where we want to store market data (eg. '.../test/*.parquet')
# or 'arctic'
default_data_engine = None
###### FOR DATABASE (Arctic/MongoDB)
db_server = '127.0.0.1'
db_port = '27017'
db_username = None
db_password = None
###### FOR TEMPORARY IN-MEMORY CACHE (Redis)
db_cache_server = '127.0.0.1'
db_cache_port = '6379'
write_cache_engine = 'redis' # 'redis' or 'no_cache' means we don't use cache
use_cache_compression = True
parquet_compression = 'gzip' # 'gzip' or 'snappy'
aws_region = None
###### FOR ALIAS TICKERS
# Config file for time series categories
config_root_folder = path_join(root_folder, "conf")
time_series_categories_fields = \
path_join(config_root_folder, "time_series_categories_fields.csv")
# We can have multiple tickers files (separated by ";")
time_series_tickers_list = path_join(config_root_folder, "time_series_tickers_list.csv") +";" + \
path_join(config_root_folder, "fx_vol_tickers.csv")+";" + \
path_join(config_root_folder, "fx_forwards_tickers.csv")+";" + \
path_join(config_root_folder, "base_depos_tickers_list.csv")+";"
time_series_fields_list = path_join(config_root_folder, "time_series_fields_list.csv")
# Config file for long term econ data
all_econ_tickers = path_join(config_root_folder, "all_econ_tickers.csv")
econ_country_codes = path_join(config_root_folder, "econ_country_codes.csv")
econ_country_groups = path_join(config_root_folder, "econ_country_groups.csv")
holidays_parquet_table = path_join(config_root_folder, "holidays_table.parquet")
# For events filtering
events_category = 'events'
events_category_dt = 'events_dt'
# Ignore these columns when doing smart grouping
drop_cols_smart_tickers_grouping = ['level_0']
###### FOR CURRENT VERSION
# which marketdatagenerator type to use?
# note - marketdatagenerator currently implemented
# cachedmarketdatagenerator is only for proprietary version at present
default_market_data_generator = "marketdatagenerator"
# In Python threading does not offer true parallisation, but can be useful when downloading data, because
# a lot of the time is spend waiting on data, multiprocessing library addresses this problem by spawning new Python
# instances, but this has greater overhead (maybe more advisable when downloading very long time series)
# "thread" or "multiprocessing" (experimental!) library to use when downloading data
market_thread_technique = "thread"
multiprocessing_library = 'multiprocess' # 'multiprocessing_on_dill' or 'multiprocess' or 'multiprocessing'
# How many threads to use for loading external data (don't do too many on slow machines!)
# also some data sources will complain if you start too many parallel threads to call data!
# for some data providers might get better performance from 1 thread only!
market_thread_no = { 'quandl' : 4,
'bloomberg' : 4,
'yahoo' : 1, # yfinance already threads requests, so don't do it twice!
'other' : 4,
'dukascopy' : 8,
'fxcm' : 4}
# Seconds for timeout
timeout_downloader = {'dukascopy' : 120}
# Dukascopy specific settings
dukascopy_retries = 20
dukascopy_mini_timeout_seconds = 10
dukascopy_multithreading = True # Can get rejected connections when threading with Dukascopy
dukascopy_try_time = 0 # Usually values of 0-1/8-1/4-1 are reasonable
# smaller values => quicker retry, but don't want to poll server too much
# We can override the thread count and drop back to single thread for certain market data downloads, as can have issues with
# quite large daily datasets from Bloomberg (and other data vendors) when doing multi-threading, so can override and use
# single threading on these (and also split into several chunks)
#
override_multi_threading_for_categories = []
# These fields should always be converted to numbers (for every data vendor in MarketDataGenerator)
always_numeric_column = ['close', 'open', 'high', 'low', 'tot']
# These fields will be forcibly be converted to datetime64 (only for Bloomberg)
always_date_columns = ['release-date-time-full', 'last-tradeable-day',
'futures-chain-last-trade-dates', 'first-notice-date', 'first-tradeable-day',
'cal-non-settle-dates', 'first-revision-date', 'release-dt']
default_time_units = 'us' # 'ns' or 'ms' too
# These are string/object fields which do not need to be converted
always_str_fields = ['futures-chain-tickers']
# Dataframe chunk size
chunk_size_mb = 500
# Log config file
logging_conf = path_join(config_root_folder, "logging.conf")
####### Bloomberg settings
bbg_server = "localhost" # needs changing if you use Bloomberg Server API
bbg_server_port = 8194
# These fields are BDS style fields to be downloaded using Bloomberg's Reference Data interface
# You may need to add to this list
bbg_ref_fields = {'release-date-time-full' : 'ECO_FUTURE_RELEASE_DATE_LIST',
'last-tradeable-day' : 'LAST_TRADEABLE_DT',
'futures-chain-tickers' : 'FUT_CHAIN',
'futures-chain-last-trade-dates' :'FUT_CHAIN_LAST_TRADE_DATES',
'first-notice-date' : 'FUT_NOTICE_FIRST',
'first-tradeable-day' : 'FUT_FIRST_TRADE_DT',
'cal-non-settle-dates': 'CALENDAR_NON_SETTLEMENT_DATES'
}
# Depending on the ticker field inclusion of specific keywords, apply a particular BBG override (make sure all lowercase)
bbg_keyword_dict_override = {
'RELEASE_STAGE_OVERRIDE' : {'A' : ['gdp', 'advance'],
'F' : ['gdp', 'final'],
'P' : ['gdp', 'preliminary']}
}
####### Dukascopy settings
dukascopy_base_url = "https://www.dukascopy.com/datafeed/"
dukascopy_write_temp_tick_disk = False
####### FXCM settings
fxcm_base_url = 'https://tickdata.fxcorporate.com/'
fxcm_write_temp_tick_disk = False
####### Quandl settings
quandl_api_key = key_store("Quandl")
####### Alpha Vantage settings
alpha_vantage_api_key = key_store("AlphaVantage")
####### FXCM API (contact FXCM to get this)
fxcm_api_key = "x"
####### Eikon settings
eikon_api_key = key_store("Eikon")
####### Twitter settings (you need to set these up on Twitter)
TWITTER_APP_KEY = key_store("Twitter App Key")
TWITTER_APP_SECRET = key_store("Twitter App Secret")
TWITTER_OAUTH_TOKEN = key_store("Twitter OAUTH token")
TWITTER_OAUTH_TOKEN_SECRET = key_store("Twitter OAUTH token Secret")
####### FRED (Federal Reserve of St Louis data) settings
fred_api_key = key_store("FRED")
####### FX vol fields
# Default download for FX vol surfaces etc.
# types of quotation on vol surface
# ATM, 25d riskies, 10d riskies, 25d strangles/butterflies, 10d strangles/butterflies
fx_vol_part = ["V", "25R", "10R", "25B", "10B"]
# Deltas quoted, eg 10d and 25d
fx_vol_delta = [10, 25]
# All the tenors on our vol surface
fx_vol_tenor = ["ON", "1W", "2W", "3W", "1M", "2M", "3M", "4M", "6M", "9M", "1Y", "2Y", "3Y", "5Y"]
# Which base depo currencies are available?
base_depos_currencies = ['EUR', 'GBP', 'AUD', 'NZD', 'USD', 'CAD', 'CHF', 'NOK', 'SEK', 'JPY']
# Tenors available for base depos
base_depos_tenor = ["ON", "TN", "SN", "1W", "2W", "3W", "1M", "2M", "3M", "4M", "6M", "9M", "1Y", "2Y", "3Y", "5Y"]
### FX forwards total return index construction
# All the tenors on our forwards
fx_forwards_tenor = ["ON", "TN", "SN", "1W", "2W", "3W", "1M", "2M", "3M", "4M", "6M", "9M", "1Y", "2Y", "3Y", "5Y"]
override_fields = {}
### What data environments are there
default_data_environment = 'backtest'
possible_data_environment = ['backtest', 'prod']
# Overwrite field variables with those listed in DataCred or user provided dictionary override_fields
def __init__(self, override_fields={}):
try:
from findatapy.util.datacred import DataCred
cred_keys = DataCred.__dict__.keys()
for k in DataConstants.__dict__.keys():
if k in cred_keys and '__' not in k:
setattr(DataConstants, k, getattr(DataCred, k))
except:
pass
# Store overrided fields
if override_fields == {}:
override_fields = DataConstants.override_fields
else:
DataConstants.override_fields = override_fields
for k in override_fields.keys():
if '__' not in k:
setattr(DataConstants, k, override_fields[k])
@staticmethod
def reset_api_key(service_name, api_key):
keyring.set_password(service_name, os.getlogin(), api_key)
|
apache-2.0
|
zerkrx/zerkbox
|
lib/youtube_dl/extractor/radiojavan.py
|
43
|
2220
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
str_to_int,
)
class RadioJavanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?radiojavan\.com/videos/video/(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://www.radiojavan.com/videos/video/chaartaar-ashoobam',
'md5': 'e85208ffa3ca8b83534fca9fe19af95b',
'info_dict': {
'id': 'chaartaar-ashoobam',
'ext': 'mp4',
'title': 'Chaartaar - Ashoobam',
'thumbnail': r're:^https?://.*\.jpe?g$',
'upload_date': '20150215',
'view_count': int,
'like_count': int,
'dislike_count': int,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = [{
'url': 'https://media.rdjavan.com/media/music_video/%s' % video_path,
'format_id': '%sp' % height,
'height': int(height),
} for height, video_path in re.findall(r"RJ\.video(\d+)p\s*=\s*'/?([^']+)'", webpage)]
self._sort_formats(formats)
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._search_regex(
r'class="date_added">Date added: ([^<]+)<',
webpage, 'upload date', fatal=False))
view_count = str_to_int(self._search_regex(
r'class="views">Plays: ([\d,]+)',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
r'class="rating">([\d,]+) likes',
webpage, 'like count', fatal=False))
dislike_count = str_to_int(self._search_regex(
r'class="rating">([\d,]+) dislikes',
webpage, 'dislike count', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'upload_date': upload_date,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'formats': formats,
}
|
gpl-3.0
|
mitchelljkotler/django
|
tests/test_runner/test_debug_sql.py
|
210
|
4048
|
import sys
import unittest
from django.db import connection
from django.test import TestCase
from django.test.runner import DiscoverRunner
from django.utils import six
from django.utils.encoding import force_text
from .models import Person
@unittest.skipUnless(connection.vendor == 'sqlite', 'Only run on sqlite so we can check output SQL.')
class TestDebugSQL(unittest.TestCase):
class PassingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='pass').count()
class FailingTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='fail').count()
self.fail()
class ErrorTest(TestCase):
def runTest(self):
Person.objects.filter(first_name='error').count()
raise Exception
def _test_output(self, verbosity):
runner = DiscoverRunner(debug_sql=True, verbosity=0)
suite = runner.test_suite()
suite.addTest(self.FailingTest())
suite.addTest(self.ErrorTest())
suite.addTest(self.PassingTest())
old_config = runner.setup_databases()
stream = six.StringIO()
resultclass = runner.get_resultclass()
runner.test_runner(
verbosity=verbosity,
stream=stream,
resultclass=resultclass,
).run(suite)
runner.teardown_databases(old_config)
if six.PY2:
stream.buflist = [force_text(x) for x in stream.buflist]
return stream.getvalue()
def test_output_normal(self):
full_output = self._test_output(1)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertNotIn(output, full_output)
def test_output_verbose(self):
full_output = self._test_output(2)
for output in self.expected_outputs:
self.assertIn(output, full_output)
for output in self.verbose_expected_outputs:
self.assertIn(output, full_output)
if six.PY3:
expected_outputs = [
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'error');'''),
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'fail');'''),
]
else:
expected_outputs = [
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'error');'''),
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'fail');'''),
]
verbose_expected_outputs = [
# Output format changed in Python 3.5+
x.format('' if sys.version_info < (3, 5) else 'TestDebugSQL.') for x in [
'runTest (test_runner.test_debug_sql.{}FailingTest) ... FAIL',
'runTest (test_runner.test_debug_sql.{}ErrorTest) ... ERROR',
'runTest (test_runner.test_debug_sql.{}PassingTest) ... ok',
]
]
if six.PY3:
verbose_expected_outputs += [
('''QUERY = 'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = ('*', 'pass');'''),
]
else:
verbose_expected_outputs += [
('''QUERY = u'SELECT COUNT(%s) AS "__count" '''
'''FROM "test_runner_person" WHERE '''
'''"test_runner_person"."first_name" = %s' '''
'''- PARAMS = (u'*', u'pass');'''),
]
|
bsd-3-clause
|
cccfran/sympy
|
sympy/physics/unitsystems/systems/natural.py
|
91
|
1785
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Naturalunit system.
The natural system comes from "setting c = 1, hbar = 1". From the computer
point of view it means that we use velocity and action instead of length and
time. Moreover instead of mass we use energy.
"""
from __future__ import division
from sympy.physics.unitsystems.dimensions import Dimension, DimensionSystem
from sympy.physics.unitsystems.units import Unit, Constant, UnitSystem
from sympy.physics.unitsystems.prefixes import PREFIXES, prefix_unit
# base dimensions
action = Dimension(name="action", symbol="A", length=2, mass=1, time=-1)
energy = Dimension(name="energy", symbol="E", length=2, mass=1, time=-2)
velocity = Dimension(name="velocity", symbol="V", length=1, time=-1)
# derived dimensions
length = Dimension(name="length", symbol="L", length=1)
mass = Dimension(name="mass", symbol="M", mass=1)
time = Dimension(name="time", symbol="T", time=1)
acceleration = Dimension(name="acceleration", length=1, time=-2)
momentum = Dimension(name="momentum", mass=1, length=1, time=-1)
force = Dimension(name="force", symbol="F", mass=1, length=1, time=-2)
power = Dimension(name="power", length=2, mass=1, time=-3)
frequency = Dimension(name="frequency", symbol="f", time=-1)
dims = (length, mass, time, momentum, force, energy, power, frequency)
# dimension system
natural_dim = DimensionSystem(base=(action, energy, velocity), dims=dims,
name="Natural system")
# base units
hbar = Constant(action, factor=1.05457266e-34, abbrev="hbar")
eV = Unit(energy, factor=1.60219e-19, abbrev="eV")
c = Constant(velocity, factor=299792458, abbrev="c")
units = prefix_unit(eV, PREFIXES)
# unit system
natural = UnitSystem(base=(hbar, eV, c), units=units, name="Natural system")
|
bsd-3-clause
|
Deledrius/korman
|
korman/exporter/animation.py
|
1
|
39175
|
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
import itertools
import math
import mathutils
from PyHSPlasma import *
import weakref
from . import utils
class AnimationConverter:
def __init__(self, exporter):
self._exporter = weakref.ref(exporter)
self._bl_fps = bpy.context.scene.render.fps
def _convert_frame_time(self, frame_num):
return frame_num / self._bl_fps
def convert_object_animations(self, bo, so):
if not bo.plasma_object.has_animation_data:
return
def fetch_animation_data(id_data):
if id_data is not None:
if id_data.animation_data is not None:
action = id_data.animation_data.action
return action, getattr(action, "fcurves", None)
return None, None
# TODO: At some point, we should consider supporting NLA stuff.
# But for now, this seems sufficient.
obj_action, obj_fcurves = fetch_animation_data(bo)
data_action, data_fcurves = fetch_animation_data(bo.data)
# We're basically just going to throw all the FCurves at the controller converter (read: wall)
# and see what sticks. PlasmaMAX has some nice animation channel stuff that allows for some
# form of separation, but Blender's NLA editor is way confusing and appears to not work with
# things that aren't the typical position, rotation, scale animations.
applicators = []
if isinstance(bo.data, bpy.types.Camera):
applicators.append(self._convert_camera_animation(bo, so, obj_fcurves, data_fcurves))
else:
applicators.append(self._convert_transform_animation(bo.name, obj_fcurves, bo.matrix_basis))
if bo.plasma_modifiers.soundemit.enabled:
applicators.extend(self._convert_sound_volume_animation(bo.name, obj_fcurves, bo.plasma_modifiers.soundemit))
if isinstance(bo.data, bpy.types.Lamp):
lamp = bo.data
applicators.extend(self._convert_lamp_color_animation(bo.name, data_fcurves, lamp))
if isinstance(lamp, bpy.types.SpotLamp):
applicators.extend(self._convert_spot_lamp_animation(bo.name, data_fcurves, lamp))
if isinstance(lamp, bpy.types.PointLamp):
applicators.extend(self._convert_omni_lamp_animation(bo.name, data_fcurves, lamp))
# Check to make sure we have some valid animation applicators before proceeding.
if not any(applicators):
return
# There is a race condition in the client with animation loading. It expects for modifiers
# to be listed on the SceneObject in a specific order. D'OH! So, always use these funcs.
agmod, agmaster = self.get_anigraph_objects(bo, so)
atcanim = self._mgr.find_create_object(plATCAnim, so=so)
# Add the animation data to the ATC
for i in applicators:
if i is not None:
atcanim.addApplicator(i)
agmod.channelName = bo.name
agmaster.addPrivateAnim(atcanim.key)
# This was previously part of the Animation Modifier, however, there can be lots of animations
# Therefore we move it here.
def get_ranges(*args, **kwargs):
index = kwargs.get("index", 0)
for i in args:
if i is not None:
yield i.frame_range[index]
atcanim.name = "(Entire Animation)"
atcanim.start = self._convert_frame_time(min(get_ranges(obj_action, data_action, index=0)))
atcanim.end = self._convert_frame_time(max(get_ranges(obj_action, data_action, index=1)))
# Marker points
if obj_action is not None:
for marker in obj_action.pose_markers:
atcanim.setMarker(marker.name, self._convert_frame_time(marker.frame))
# Fixme? Not sure if we really need to expose this...
atcanim.easeInMin = 1.0
atcanim.easeInMax = 1.0
atcanim.easeInLength = 1.0
atcanim.easeOutMin = 1.0
atcanim.easeOutMax = 1.0
atcanim.easeOutLength = 1.0
def _convert_camera_animation(self, bo, so, obj_fcurves, data_fcurves):
if data_fcurves:
# The hard part about this crap is that FOV animations are not stored in ATC Animations
# instead, FOV animation keyframes are held inside of the camera modifier. Cyan's solution
# in PlasmaMAX appears to be for any xform keyframe, add two messages to the camera modifier
# representing the FOV at that point. Makes more sense to me to use each FOV keyframe instead
fov_fcurve = next((i for i in data_fcurves if i.data_path == "plasma_camera.settings.fov"), None)
if fov_fcurve:
# NOTE: this is another critically important key ordering in the SceneObject modifier
# list. CameraModifier calls into AGMasterMod code that assumes the AGModifier
# is already available. Should probably consider adding some code to libHSPlasma
# to order the SceneObject modifier key vector at some point.
anim_key = self.get_animation_key(bo)
camera = self._mgr.find_create_object(plCameraModifier, so=so)
cam_key = camera.key
aspect, fps = (3.0 / 4.0), self._bl_fps
degrees = math.degrees
fov_fcurve.update()
# Seeing as how we're transforming the data entirely, we'll just use the fcurve itself
# instead of our other animation helpers. But ugh does this mess look like sloppy C.
keyframes = fov_fcurve.keyframe_points
num_keyframes = len(keyframes)
has_fov_anim = bool(num_keyframes)
i = 0
while i < num_keyframes:
this_keyframe = keyframes[i]
next_keyframe = keyframes[0] if i+1 == num_keyframes else keyframes[i+1]
# So remember, these are messages. When we hit a keyframe, we're dispatching a message
# representing the NEXT desired FOV.
this_frame_time = this_keyframe.co[0] / fps
next_frame_num, next_frame_value = next_keyframe.co
next_frame_time = next_frame_num / fps
# This message is held on the camera modifier and sent to the animation... It calls
# back when the animation reaches the keyframe time, causing the FOV message to be sent.
cb_msg = plEventCallbackMsg()
cb_msg.event = kTime
cb_msg.eventTime = this_frame_time
cb_msg.index = i
cb_msg.repeats = -1
cb_msg.addReceiver(cam_key)
anim_msg = plAnimCmdMsg()
anim_msg.animName = "(Entire Animation)"
anim_msg.time = this_frame_time
anim_msg.sender = anim_key
anim_msg.addReceiver(anim_key)
anim_msg.addCallback(cb_msg)
anim_msg.setCmd(plAnimCmdMsg.kAddCallbacks, True)
camera.addMessage(anim_msg, anim_key)
# This is the message actually changes the FOV. Interestingly, it is sent at
# export-time and while playing the game, the camera modifier just steals its
# parameters and passes them to the brain. Can't make this stuff up.
cam_msg = plCameraMsg()
cam_msg.addReceiver(cam_key)
cam_msg.setCmd(plCameraMsg.kAddFOVKeyFrame, True)
cam_config = cam_msg.config
cam_config.accel = next_frame_time # Yassss...
cam_config.fovW = degrees(next_frame_value)
cam_config.fovH = degrees(next_frame_value * aspect)
camera.addFOVInstruction(cam_msg)
i += 1
else:
has_fov_anim = False
else:
has_fov_anim = False
# If we exported any FOV animation at all, then we need to ensure there is an applicator
# returned from here... At bare minimum, we'll need the applicator with an empty
# CompoundController. This should be sufficient to keep CWE from crashing...
applicator = self._convert_transform_animation(bo.name, obj_fcurves, bo.matrix_basis, allow_empty=has_fov_anim)
camera = locals().get("camera", self._mgr.find_create_object(plCameraModifier, so=so))
camera.animated = applicator is not None
return applicator
def _convert_lamp_color_animation(self, name, fcurves, lamp):
if not fcurves:
return None
energy_curve = next((i for i in fcurves if i.data_path == "energy" and i.keyframe_points), None)
color_curves = sorted((i for i in fcurves if i.data_path == "color" and i.keyframe_points), key=lambda x: x.array_index)
if energy_curve is None and color_curves is None:
return None
elif lamp.use_only_shadow:
self._exporter().report.warn("Cannot animate Lamp color because this lamp only casts shadows", indent=3)
return None
elif not lamp.use_specular and not lamp.use_diffuse:
self._exporter().report.warn("Cannot animate Lamp color because neither Diffuse nor Specular are enabled", indent=3)
return None
# OK Specular is easy. We just toss out the color as a point3.
color_keyframes, color_bez = self._process_keyframes(color_curves, convert=lambda x: x * -1.0 if lamp.use_negative else None)
if color_keyframes and lamp.use_specular:
channel = plPointControllerChannel()
channel.controller = self._make_point3_controller(color_curves, color_keyframes, color_bez, lamp.color)
applicator = plLightSpecularApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
# Hey, look, it's a third way to process FCurves. YAY!
def convert_diffuse_animation(color, energy):
if lamp.use_negative:
return { key: (0.0 - value) * energy[0] for key, value in color.items() }
else:
return { key: value * energy[0] for key, value in color.items() }
diffuse_defaults = { "color": lamp.color, "energy": lamp.energy }
diffuse_fcurves = color_curves + [energy_curve,]
diffuse_keyframes = self._process_fcurves(diffuse_fcurves, convert_diffuse_animation, diffuse_defaults)
if not diffuse_keyframes:
return None
# Whew.
channel = plPointControllerChannel()
channel.controller = self._make_point3_controller([], diffuse_keyframes, False, [])
applicator = plLightDiffuseApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
def _convert_omni_lamp_animation(self, name, fcurves, lamp):
if not fcurves:
return None
energy_fcurve = next((i for i in fcurves if i.data_path == "energy"), None)
distance_fcurve = next((i for i in fcurves if i.data_path == "distance"), None)
if energy_fcurve is None and distance_fcurve is None:
return None
light_converter = self._exporter().light
intensity, atten_end = light_converter.convert_attenuation(lamp)
# All types allow animating cutoff
if distance_fcurve is not None:
channel = plScalarControllerChannel()
channel.controller = self.make_scalar_leaf_controller(distance_fcurve,
lambda x: x if lamp.use_sphere else x * 2)
applicator = plOmniCutoffApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
falloff = lamp.falloff_type
if falloff == "CONSTANT":
if energy_fcurve is not None:
self._exporter().report.warn("Constant attenuation cannot be animated in Plasma", ident=3)
elif falloff == "INVERSE_LINEAR":
def convert_linear_atten(distance, energy):
intens = abs(energy[0])
atten_end = distance[0] if lamp.use_sphere else distance[0] * 2
return light_converter.convert_attenuation_linear(intens, atten_end)
keyframes = self._process_fcurves([distance_fcurve, energy_fcurve], convert_linear_atten,
{"distance": lamp.distance, "energy": lamp.energy})
if keyframes:
channel = plScalarControllerChannel()
channel.controller = self._make_scalar_leaf_controller(keyframes, False)
applicator = plOmniApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
elif falloff == "INVERSE_SQUARE":
if self._mgr.getVer() >= pvMoul:
def convert_quadratic_atten(distance, energy):
intens = abs(energy[0])
atten_end = distance[0] if lamp.use_sphere else distance[0] * 2
return light_converter.convert_attenuation_quadratic(intens, atten_end)
keyframes = self._process_fcurves([distance_fcurve, energy_fcurve], convert_quadratic_atten,
{"distance": lamp.distance, "energy": lamp.energy})
if keyframes:
channel = plScalarControllerChannel()
channel.controller = self._make_scalar_leaf_controller(keyframes, False)
applicator = plOmniSqApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
else:
self._exporter().report.port("Lamp Falloff '{}' animations only partially supported for this version of Plasma", falloff, indent=3)
else:
self._exporter().report.warn("Lamp Falloff '{}' animations are not supported".format(falloff), ident=3)
def _convert_sound_volume_animation(self, name, fcurves, soundemit):
if not fcurves:
return None
def convert_volume(value):
if value == 0.0:
return 0.0
else:
return math.log10(value) * 20.0
for sound in soundemit.sounds:
path = "{}.volume".format(sound.path_from_id())
fcurve = next((i for i in fcurves if i.data_path == path and i.keyframe_points), None)
if fcurve is None:
continue
for i in soundemit.get_sound_indices(sound=sound):
applicator = plSoundVolumeApplicator()
applicator.channelName = name
applicator.index = i
# libHSPlasma assumes a channel is not shared among applicators...
# so yes, we must convert the same animation data again and again.
channel = plScalarControllerChannel()
channel.controller = self.make_scalar_leaf_controller(fcurve, convert=convert_volume)
applicator.channel = channel
yield applicator
def _convert_spot_lamp_animation(self, name, fcurves, lamp):
if not fcurves:
return None
blend_fcurve = next((i for i in fcurves if i.data_path == "spot_blend"), None)
size_fcurve = next((i for i in fcurves if i.data_path == "spot_size"), None)
if blend_fcurve is None and size_fcurve is None:
return None
# Spot Outer is just the size keyframes...
if size_fcurve is not None:
channel = plScalarControllerChannel()
channel.controller = self.make_scalar_leaf_controller(size_fcurve, lambda x: math.degrees(x))
applicator = plSpotOuterApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
# Spot inner must be calculated...
def convert_spot_inner(spot_blend, spot_size):
blend = min(0.001, spot_blend[0])
size = spot_size[0]
value = size - (blend * size)
return math.degrees(value)
defaults = { "spot_blend": lamp.spot_blend, "spot_size": lamp.spot_size }
keyframes = self._process_fcurves([blend_fcurve, size_fcurve], convert_spot_inner, defaults)
if keyframes:
channel = plScalarControllerChannel()
channel.controller = self._make_scalar_leaf_controller(keyframes, False)
applicator = plSpotInnerApplicator()
applicator.channelName = name
applicator.channel = channel
yield applicator
def _convert_transform_animation(self, name, fcurves, xform, allow_empty=False):
tm = self.convert_transform_controller(fcurves, xform, allow_empty)
if tm is None and not allow_empty:
return None
applicator = plMatrixChannelApplicator()
applicator.enabled = True
applicator.channelName = name
channel = plMatrixControllerChannel()
channel.controller = tm
applicator.channel = channel
channel.affine = utils.affine_parts(xform)
return applicator
def convert_transform_controller(self, fcurves, xform, allow_empty=False):
if not fcurves and not allow_empty:
return None
pos = self.make_pos_controller(fcurves, xform)
rot = self.make_rot_controller(fcurves, xform)
scale = self.make_scale_controller(fcurves, xform)
if pos is None and rot is None and scale is None:
if not allow_empty:
return None
tm = plCompoundController()
tm.X = pos
tm.Y = rot
tm.Z = scale
return tm
def get_anigraph_keys(self, bo=None, so=None):
mod = self._mgr.find_create_key(plAGModifier, so=so, bl=bo)
master = self._mgr.find_create_key(plAGMasterMod, so=so, bl=bo)
return mod, master
def get_anigraph_objects(self, bo=None, so=None):
mod = self._mgr.find_create_object(plAGModifier, so=so, bl=bo)
master = self._mgr.find_create_object(plAGMasterMod, so=so, bl=bo)
return mod, master
def get_animation_key(self, bo, so=None):
# we might be controlling more than one animation. isn't that cute?
# https://www.youtube.com/watch?v=hspNaoxzNbs
# (but obviously this is not wrong...)
group_mod = bo.plasma_modifiers.animation_group
if group_mod.enabled:
return self._mgr.find_create_key(plMsgForwarder, bl=bo, so=so, name=group_mod.key_name)
else:
return self.get_anigraph_keys(bo, so)[1]
def make_matrix44_controller(self, fcurves, pos_path, scale_path, pos_default, scale_default):
def convert_matrix_keyframe(**kwargs):
pos = kwargs.get(pos_path)
scale = kwargs.get(scale_path)
# Since only some position curves may be supplied, construct dict with all positions
allpos = dict(enumerate(pos_default))
allscale = dict(enumerate(scale_default))
allpos.update(pos)
allscale.update(scale)
matrix = hsMatrix44()
# Note: scale and pos are dicts, so we can't unpack
matrix.setTranslate(hsVector3(allpos[0], allpos[1], allpos[2]))
matrix.setScale(hsVector3(allscale[0], allscale[1], allscale[2]))
return matrix
fcurves = [i for i in fcurves if i.data_path == pos_path or i.data_path == scale_path]
if not fcurves:
return None
default_values = { pos_path: pos_default, scale_path: scale_default }
keyframes = self._process_fcurves(fcurves, convert_matrix_keyframe, default_values)
if not keyframes:
return None
# Now we make the controller
return self._make_matrix44_controller(keyframes)
def make_pos_controller(self, fcurves, default_xform, convert=None):
pos_curves = [i for i in fcurves if i.data_path == "location" and i.keyframe_points]
keyframes, bez_chans = self._process_keyframes(pos_curves, convert)
if not keyframes:
return None
# At one point, I had some... insanity here to try to crush bezier channels and hand off to
# blah blah blah... As it turns out, point3 keyframe's tangents are vector3s :)
ctrl = self._make_point3_controller(pos_curves, keyframes, bez_chans, default_xform.to_translation())
return ctrl
def make_rot_controller(self, fcurves, default_xform, convert=None):
# TODO: support rotation_quaternion
rot_curves = [i for i in fcurves if i.data_path == "rotation_euler" and i.keyframe_points]
keyframes, bez_chans = self._process_keyframes(rot_curves, convert=None)
if not keyframes:
return None
# Ugh. Unfortunately, it appears Blender's default interpolation is bezier. So who knows if
# many users will actually see the benefit here? Makes me sad.
if bez_chans:
ctrl = self._make_scalar_compound_controller(rot_curves, keyframes, bez_chans, default_xform.to_euler())
else:
ctrl = self._make_quat_controller(rot_curves, keyframes, default_xform.to_euler())
return ctrl
def make_scale_controller(self, fcurves, default_xform, convert=None):
scale_curves = [i for i in fcurves if i.data_path == "scale" and i.keyframe_points]
keyframes, bez_chans = self._process_keyframes(scale_curves, convert)
if not keyframes:
return None
# There is no such thing as a compound scale controller... in Plasma, anyway.
ctrl = self._make_scale_value_controller(scale_curves, keyframes, bez_chans, default_xform)
return ctrl
def make_scalar_leaf_controller(self, fcurve, convert=None):
keyframes, bezier = self._process_fcurve(fcurve, convert)
if not keyframes:
return None
ctrl = self._make_scalar_leaf_controller(keyframes, bezier)
return ctrl
def _make_matrix44_controller(self, keyframes):
ctrl = plLeafController()
keyframe_type = hsKeyFrame.kMatrix44KeyFrame
exported_frames = []
for keyframe in keyframes:
exported = hsMatrix44Key()
exported.frame = keyframe.frame_num
exported.frameTime = keyframe.frame_time
exported.type = keyframe_type
exported.value = keyframe.value
exported_frames.append(exported)
ctrl.keys = (exported_frames, keyframe_type)
return ctrl
def _make_point3_controller(self, fcurves, keyframes, bezier, default_xform):
ctrl = plLeafController()
subctrls = ("X", "Y", "Z")
keyframe_type = hsKeyFrame.kBezPoint3KeyFrame if bezier else hsKeyFrame.kPoint3KeyFrame
exported_frames = []
ctrl_fcurves = { i.array_index: i for i in fcurves }
for keyframe in keyframes:
exported = hsPoint3Key()
exported.frame = keyframe.frame_num
exported.frameTime = keyframe.frame_time
exported.type = keyframe_type
in_tan = hsVector3()
out_tan = hsVector3()
value = hsVector3()
for i, subctrl in enumerate(subctrls):
fval = keyframe.values.get(i, None)
if fval is not None:
setattr(value, subctrl, fval)
setattr(in_tan, subctrl, keyframe.in_tans[i])
setattr(out_tan, subctrl, keyframe.out_tans[i])
else:
try:
setattr(value, subctrl, ctrl_fcurves[i].evaluate(keyframe.frame_num_blender))
except KeyError:
setattr(value, subctrl, default_xform[i])
setattr(in_tan, subctrl, 0.0)
setattr(out_tan, subctrl, 0.0)
exported.inTan = in_tan
exported.outTan = out_tan
exported.value = value
exported_frames.append(exported)
ctrl.keys = (exported_frames, keyframe_type)
return ctrl
def _make_quat_controller(self, fcurves, keyframes, default_xform):
ctrl = plLeafController()
keyframe_type = hsKeyFrame.kQuatKeyFrame
exported_frames = []
ctrl_fcurves = { i.array_index: i for i in fcurves }
for keyframe in keyframes:
exported = hsQuatKey()
exported.frame = keyframe.frame_num
exported.frameTime = keyframe.frame_time
exported.type = keyframe_type
# NOTE: quat keyframes don't do bezier nonsense
value = mathutils.Euler()
for i in range(3):
fval = keyframe.values.get(i, None)
if fval is not None:
value[i] = fval
else:
try:
value[i] = ctrl_fcurves[i].evaluate(keyframe.frame_num_blender)
except KeyError:
value[i] = default_xform[i]
quat = value.to_quaternion()
exported.value = utils.quaternion(quat)
exported_frames.append(exported)
ctrl.keys = (exported_frames, keyframe_type)
return ctrl
def _make_scalar_compound_controller(self, fcurves, keyframes, bez_chans, default_xform):
ctrl = plCompoundController()
subctrls = ("X", "Y", "Z")
for i in subctrls:
setattr(ctrl, i, plLeafController())
exported_frames = ([], [], [])
ctrl_fcurves = { i.array_index: i for i in fcurves }
for keyframe in keyframes:
for i, subctrl in enumerate(subctrls):
fval = keyframe.values.get(i, None)
if fval is not None:
keyframe_type = hsKeyFrame.kBezScalarKeyFrame if i in bez_chans else hsKeyFrame.kScalarKeyFrame
exported = hsScalarKey()
exported.frame = keyframe.frame_num
exported.frameTime = keyframe.frame_time
exported.inTan = keyframe.in_tans[i]
exported.outTan = keyframe.out_tans[i]
exported.type = keyframe_type
exported.value = fval
exported_frames[i].append(exported)
for i, subctrl in enumerate(subctrls):
my_keyframes = exported_frames[i]
# ensure this controller has at least ONE keyframe
if not my_keyframes:
hack_frame = hsScalarKey()
hack_frame.frame = 0
hack_frame.frameTime = 0.0
hack_frame.type = hsKeyFrame.kScalarKeyFrame
hack_frame.value = default_xform[i]
my_keyframes.append(hack_frame)
getattr(ctrl, subctrl).keys = (my_keyframes, my_keyframes[0].type)
return ctrl
def _make_scalar_leaf_controller(self, keyframes, bezier):
ctrl = plLeafController()
keyframe_type = hsKeyFrame.kBezScalarKeyFrame if bezier else hsKeyFrame.kScalarKeyFrame
exported_frames = []
for keyframe in keyframes:
exported = hsScalarKey()
exported.frame = keyframe.frame_num
exported.frameTime = keyframe.frame_time
exported.inTan = keyframe.in_tan
exported.outTan = keyframe.out_tan
exported.type = keyframe_type
exported.value = keyframe.value
exported_frames.append(exported)
ctrl.keys = (exported_frames, keyframe_type)
return ctrl
def _make_scale_value_controller(self, fcurves, keyframes, bez_chans, default_xform):
subctrls = ("X", "Y", "Z")
keyframe_type = hsKeyFrame.kBezScaleKeyFrame if bez_chans else hsKeyFrame.kScaleKeyFrame
exported_frames = []
ctrl_fcurves = { i.array_index: i for i in fcurves }
default_scale = default_xform.to_scale()
unit_quat = default_xform.to_quaternion()
unit_quat.normalize()
unit_quat = utils.quaternion(unit_quat)
for keyframe in keyframes:
exported = hsScaleKey()
exported.frame = keyframe.frame_num
exported.frameTime = keyframe.frame_time
exported.type = keyframe_type
in_tan = hsVector3()
out_tan = hsVector3()
value = hsVector3()
for i, subctrl in enumerate(subctrls):
fval = keyframe.values.get(i, None)
if fval is not None:
setattr(value, subctrl, fval)
setattr(in_tan, subctrl, keyframe.in_tans[i])
setattr(out_tan, subctrl, keyframe.out_tans[i])
else:
try:
setattr(value, subctrl, ctrl_fcurves[i].evaluate(keyframe.frame_num_blender))
except KeyError:
setattr(value, subctrl, default_scale[i])
setattr(in_tan, subctrl, 0.0)
setattr(out_tan, subctrl, 0.0)
exported.inTan = in_tan
exported.outTan = out_tan
exported.value = (value, unit_quat)
exported_frames.append(exported)
ctrl = plLeafController()
ctrl.keys = (exported_frames, keyframe_type)
return ctrl
def _process_fcurve(self, fcurve, convert=None):
"""Like _process_keyframes, but for one fcurve"""
keyframe_data = type("KeyFrameData", (), {})
fps = self._bl_fps
pi = math.pi
keyframes = {}
bezier = False
fcurve.update()
for fkey in fcurve.keyframe_points:
keyframe = keyframe_data()
frame_num, value = fkey.co
if fps == 30.0:
keyframe.frame_num = int(frame_num)
else:
keyframe.frame_num = int(frame_num * (30.0 / fps))
keyframe.frame_time = frame_num / fps
if fkey.interpolation == "BEZIER":
keyframe.in_tan = -(value - fkey.handle_left[1]) / (frame_num - fkey.handle_left[0]) / fps / (2 * pi)
keyframe.out_tan = (value - fkey.handle_right[1]) / (frame_num - fkey.handle_right[0]) / fps / (2 * pi)
bezier = True
else:
keyframe.in_tan = 0.0
keyframe.out_tan = 0.0
keyframe.value = value if convert is None else convert(value)
keyframes[frame_num] = keyframe
final_keyframes = [keyframes[i] for i in sorted(keyframes)]
return (final_keyframes, bezier)
def _process_fcurves(self, fcurves, convert, defaults=None):
"""Processes FCurves of different data sets and converts them into a single list of keyframes.
This should be used when multiple Blender fields map to a single Plasma option."""
class KeyFrameData:
def __init__(self):
self.values = {}
fps = self._bl_fps
pi = math.pi
# It is assumed therefore that any multichannel FCurves will have all channels represented.
# This seems fairly safe with my experiments with Lamp colors...
grouped_fcurves = {}
for fcurve in fcurves:
if fcurve is None:
continue
fcurve.update()
if fcurve.data_path in grouped_fcurves:
grouped_fcurves[fcurve.data_path][fcurve.array_index] = fcurve
else:
grouped_fcurves[fcurve.data_path] = { fcurve.array_index: fcurve }
# Default values for channels that are not animated
for key, value in defaults.items():
if key not in grouped_fcurves:
if hasattr(value, "__len__"):
grouped_fcurves[key] = value
else:
grouped_fcurves[key] = [value,]
# Assemble a dict { PlasmaFrameNum: { FCurveDataPath: KeyFrame } }
keyframe_points = {}
for fcurve in fcurves:
if fcurve is None:
continue
for keyframe in fcurve.keyframe_points:
frame_num_blender, value = keyframe.co
frame_num = int(frame_num_blender * (30.0 / fps))
# This is a temporary keyframe, so we're not going to worry about converting everything
# Only the frame number to Plasma so we can go ahead and merge any rounded dupes
entry, data = keyframe_points.get(frame_num), None
if entry is None:
entry = {}
keyframe_points[frame_num] = entry
else:
data = entry.get(fcurve.data_path)
if data is None:
data = KeyFrameData()
data.frame_num = frame_num
data.frame_num_blender = frame_num_blender
entry[fcurve.data_path] = data
data.values[fcurve.array_index] = value
# Now, we loop through our assembled keyframes and interpolate any missing data using the FCurves
fcurve_chans = { key: len(value) for key, value in grouped_fcurves.items() }
expected_values = sum(fcurve_chans.values())
all_chans = frozenset(grouped_fcurves.keys())
# We will also do the final convert here as well...
final_keyframes = []
for frame_num in sorted(keyframe_points.copy().keys()):
keyframes = keyframe_points[frame_num]
frame_num_blender = next(iter(keyframes.values())).frame_num_blender
# If any data_paths are missing, init a dummy
missing_channels = all_chans - frozenset(keyframes.keys())
for chan in missing_channels:
dummy = KeyFrameData()
dummy.frame_num = frame_num
dummy.frame_num_blender = frame_num_blender
keyframes[chan] = dummy
# Ensure all values are filled out.
num_values = sum(map(len, (i.values for i in keyframes.values())))
if num_values != expected_values:
for chan, sorted_fcurves in grouped_fcurves.items():
chan_keyframes = keyframes[chan]
chan_values = fcurve_chans[chan]
if len(chan_keyframes.values) == chan_values:
continue
for i in range(chan_values):
if i not in chan_keyframes.values:
try:
fcurve = grouped_fcurves[chan][i]
except:
chan_keyframes.values[i] = defaults[chan]
else:
if isinstance(fcurve, bpy.types.FCurve):
chan_keyframes.values[i] = fcurve.evaluate(chan_keyframes.frame_num_blender)
else:
# it's actually a default value!
chan_keyframes.values[i] = fcurve
# All values are calculated! Now we convert the disparate key data into a single keyframe.
kwargs = { data_path: keyframe.values for data_path, keyframe in keyframes.items() }
final_keyframe = KeyFrameData()
final_keyframe.frame_num = frame_num
final_keyframe.frame_num_blender = frame_num_blender
final_keyframe.frame_time = frame_num / fps
value = convert(**kwargs)
if hasattr(value, "__len__"):
final_keyframe.in_tans = [0.0] * len(value)
final_keyframe.out_tans = [0.0] * len(value)
final_keyframe.values = value
else:
final_keyframe.in_tan = 0.0
final_keyframe.out_tan = 0.0
final_keyframe.value = value
final_keyframes.append(final_keyframe)
return final_keyframes
def _process_keyframes(self, fcurves, convert=None):
"""Groups all FCurves for the same frame together"""
keyframe_data = type("KeyFrameData", (), {})
fps = self._bl_fps
pi = math.pi
keyframes = {}
bez_chans = set()
for fcurve in fcurves:
fcurve.update()
for fkey in fcurve.keyframe_points:
frame_num, value = fkey.co
keyframe = keyframes.get(frame_num, None)
if keyframe is None:
keyframe = keyframe_data()
if fps == 30.0:
# hope you don't have a frame 29.9 and frame 30.0...
keyframe.frame_num = int(frame_num)
else:
keyframe.frame_num = int(frame_num * (30.0 / fps))
keyframe.frame_num_blender = frame_num
keyframe.frame_time = frame_num / fps
keyframe.in_tans = {}
keyframe.out_tans = {}
keyframe.values = {}
keyframes[frame_num] = keyframe
idx = fcurve.array_index
keyframe.values[idx] = value if convert is None else convert(value)
# Calculate the bezier interpolation nonsense
if fkey.interpolation == "BEZIER":
keyframe.in_tans[idx] = -(value - fkey.handle_left[1]) / (frame_num - fkey.handle_left[0]) / fps / (2 * pi)
keyframe.out_tans[idx] = (value - fkey.handle_right[1]) / (frame_num - fkey.handle_right[0]) / fps / (2 * pi)
bez_chans.add(idx)
else:
keyframe.in_tans[idx] = 0.0
keyframe.out_tans[idx] = 0.0
# Return the keyframes in a sequence sorted by frame number
final_keyframes = [keyframes[i] for i in sorted(keyframes)]
return (final_keyframes, bez_chans)
@property
def _mgr(self):
return self._exporter().mgr
|
gpl-3.0
|
fbossy/SickRage
|
lib/chardet/gb2312prober.py
|
53
|
1690
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312_SM_MODEL
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
super(GB2312Prober, self).__init__()
self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
self.distribution_analyzer = GB2312DistributionAnalysis()
self.reset()
@property
def charset_name(self):
return "GB2312"
|
gpl-3.0
|
schleichdi2/OpenNfr_E2_Gui-6.0
|
lib/python/Plugins/Extensions/Infopanel/bootvideo.py
|
2
|
8876
|
from boxbranding import getMachineBrand, getMachineName
from os import path
from Components.Console import Console
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.config import ConfigSubsection, ConfigYesNo, config, ConfigSelection, ConfigText, ConfigNumber, ConfigSet, ConfigLocations, NoSave, ConfigClock, ConfigInteger, ConfigBoolean, ConfigPassword, ConfigIP, ConfigSlider, ConfigSelectionNumber, getConfigListEntry, KEY_LEFT, KEY_RIGHT, configfile
from Components.Sources.StaticText import StaticText
from Plugins.Extensions.Infopanel.outofflash import MoveVideos_int, MoveVideos
from Components.MenuList import MenuList
from enigma import *
from Tools.LoadPixmap import LoadPixmap
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from glob import glob
import os
config.bootvideo = ConfigSubsection()
config.bootvideo.booting = ConfigText(default = "no Bootvideo")
class PanelList(MenuList):
if (getDesktop(0).size().width() == 1920):
def __init__(self, list, font0 = 32, font1 = 24, itemHeight = 50, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", font0))
self.l.setFont(1, gFont("Regular", font1))
self.l.setItemHeight(itemHeight)
else:
def __init__(self, list, font0 = 24, font1 = 16, itemHeight = 50, enableWrapAround = True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", font0))
self.l.setFont(1, gFont("Regular", font1))
self.l.setItemHeight(itemHeight)
def MenuEntryItem(entry):
if (getDesktop(0).size().width() == 1920):
res = [entry]
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, 5), size=(100, 40), png=entry[0])) # png vorn
res.append(MultiContentEntryText(pos=(110, 5), size=(690, 40), font=0, text=entry[1])) # menupunkt
return res
else:
res = [entry]
res.append(MultiContentEntryPixmapAlphaTest(pos=(0, 5), size=(100, 40), png=entry[0])) # png vorn
res.append(MultiContentEntryText(pos=(110, 10), size=(440, 40), font=0, text=entry[1])) # menupunkt
return res
def InfoEntryComponent(file):
png = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/pics/" + file + ".png")
if png == None:
png = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/pics/default.png")
res = (png)
return res
class BootvideoSetupScreen(Screen):
skin = """<screen name="BootvideoSetupScreen" position="center,center" size="950,520" title="BootvideoSetupScreen">
<ePixmap pixmap="/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/pics/redlogo.png" position="0,380" size="950,84" alphatest="on" zPosition="1" />
<ePixmap pixmap="/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/pics/alliance.png" position="670,255" size="100,67" alphatest="on" zPosition="1" />
<ePixmap pixmap="/usr/lib/enigma2/python/Plugins/Extensions/Infopanel/pics/opennfr_info.png" position="510,11" size="550,354" alphatest="on" zPosition="1" />
<widget source="global.CurrentTime" render="Label" position="450, 340" size="500,24" font="Regular;20" foregroundColor="white" halign="right" transparent="1" zPosition="5">
<convert type="ClockToText">>Format%H:%M:%S</convert>
</widget>
<eLabel backgroundColor="un56c856" position="0,330" size="950,1" zPosition="0" />
<widget name="Mlist" position="10,10" size="480,300" zPosition="1" scrollbarMode="showOnDemand" backgroundColor="un251e1f20" transparent="1" />
<widget name="label1" position="10,340" size="490,25" font="Regular;20" transparent="1" foregroundColor="#f2e000" halign="left" />
<ePixmap pixmap="skin_default/buttons/red.png" position="10,480" size="30,30" alphatest="blend" />
<ePixmap pixmap="skin_default/buttons/green.png" position="190,480" size="30,30" alphatest="blend" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="370,480" size="30,30" alphatest="blend" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="550,480" size="30,30" alphatest="blend" />
<ePixmap pixmap="skin_default/buttons/key_info.png" position="735,480" size="30,30" alphatest="blend" />
<widget source="key_red" render="Label" position="45,482" size="140,24" zPosition="1" font="Regular;20" halign="left" backgroundColor="black" transparent="1" />
<widget source="key_green" render="Label" position="225,483" size="140,24" zPosition="1" font="Regular;20" halign="left" backgroundColor="black" transparent="1" />
<widget source="key_yellow" render="Label" position="405,483" size="140,24" zPosition="1" font="Regular;20" halign="left" backgroundColor="black" transparent="1" />
<widget source="key_blue" render="Label" position="590,483" size="140,24" zPosition="1" font="Regular;20" halign="left" backgroundColor="black" transparent="1" />
<widget source="key_info" render="Label" position="775,483" size="140,24" zPosition="1" font="Regular;20" halign="left" backgroundColor="black" transparent="1" />
<widget source="session.VideoPicture" render="Pig" position="510,11" size="420,236" backgroundColor="transparent" zPosition="2" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("BootvideoSetupScreen"))
self.Console = Console()
self.onShown.append(self.setWindowTitle)
aktbootvideo = config.bootvideo.booting.value
self.oldbmcService = self.session.nav.getCurrentlyPlayingServiceReference()
self["label1"] = Label(_("now Using Bootvideo: %s") % aktbootvideo)
self["key_red"] = StaticText(_("Exit"))
self["key_green"] = StaticText(_("Save"))
self["key_blue"] = StaticText(_("Back2Flash"))
self["key_yellow"] = StaticText(_("Outsourcing"))
self["key_info"] = StaticText(_("preview"))
vpath = "/usr/share/enigma2/bootvideos/"
if not os.path.exists(vpath):
bootvideo = []
else:
uvideo=[]
uvideo = os.listdir(vpath)
bootvideo = []
for xvideo in uvideo:
if xvideo.endswith(".mp4"):
bootvideo.append(xvideo)
elif xvideo.endswith(".mkv"):
bootvideo.append(xvideo)
elif xvideo.endswith(".mpeg"):
bootvideo.append(xvideo)
self.list = []
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ColorActions", "MenuActions", "EPGSelectActions"],
{
"cancel": self.Exit,
"exit": self.Exit,
"red": self.Exit,
"ok": self.ok,
"green": self.ok,
"blue": self.KeyBlue,
"yellow": self.KeyYellow,
"info": self.KeyInfo,
}, 1)
self.Mlist = []
self.Mlist.append(MenuEntryItem((InfoEntryComponent('no Bootvideo'), _("no Bootvideo"), 'nobootvideo')))
for video in bootvideo:
self.Mlist.append(MenuEntryItem((InfoEntryComponent('%s' % video), _('%s' % video), '%s' % video)))
self.onChangedEntry = []
if (getDesktop(0).size().width() == 1920):
self["Mlist"] = PanelList([], font0=36, font1=28, itemHeight=50)
else:
self["Mlist"] = PanelList([])
self["Mlist"].l.setList(self.Mlist)
self["Mlist"].onSelectionChanged.append(self.selectionChanged)
def KeyInfo(self):
self.session.nav.stopService()
menu = self['Mlist'].getCurrent()[2]
menu1 = list(menu)[7]
os.system('gst-launch-1.0 playbin uri=file:///usr/share/enigma2/bootvideos/%s' % menu1)
self.session.nav.playService(self.oldbmcService)
def KeyYellow(self):
self.session.open(MoveVideos)
def KeyBlue(self):
self.session.open(MoveVideos_int)
def setWindowTitle(self):
self.setTitle('%s' % (_('Bootvideo Setup')))
def getCurrentEntry(self):
if self['Mlist'].l.getCurrentSelection():
selection = self['Mlist'].l.getCurrentSelection()[0]
if (selection[0] is not None):
return selection[0]
def selectionChanged(self):
item = self.getCurrentEntry()
def Exit(self):
self.close()
def ok(self):
menu = self['Mlist'].getCurrent()[2]
menu1 = list(menu)[7]
config.bootvideo.booting.value = menu1
config.bootvideo.booting.save()
configfile.save()
self.close()
|
gpl-2.0
|
i386x/doit
|
tests/test_support/test_app/test_config.py
|
1
|
21039
|
# -*- coding: utf-8 -*-
#! \file ./tests/test_support/test_app/test_config.py
#! \author Jiří Kučera, <[email protected]>
#! \stamp 2016-12-13 15:57:50 (UTC+01:00, DST+00:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
Configuration files maintaining tests.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
import os
import unittest
from ...common import OPEN_FAIL, DataBuffer, make_rwopen, OpenContext, \
ModuleContext
from doit.support.app.application import EXIT_SUCCESS, EXIT_FAILURE
from doit.support.app.config import \
NL, COMM, ITEM, \
load_config, \
set_item, get_item, del_item, \
merge_items, config_to_kvmap, \
save_config, \
set_config_option, unset_config_option
cfgA = """\
pgen.quite = 1
# Default format directory:
pgen.fmtdir = ../../fmt
ignored line
= another ignored line
test = 0
path =
"""
cfgA_d = [
(ITEM, 'pgen.quite', "1"),
(NL, ""),
(COMM, "# Default format directory:"),
(ITEM, 'pgen.fmtdir', "../../fmt"),
(NL, ""),
(COMM, "ignored line"),
(COMM, "= another ignored line"),
(ITEM, 'test', "0"),
(ITEM, 'path', "")
]
cfgA_m = {
'pgen.quite': 0,
'pgen.fmtdir': 3,
'test': 7,
'path': 8
}
class OsModuleMock(ModuleContext):
__slots__ = [
'__old_exists', '__old_isfile'
]
def __init__(self, env):
ModuleContext.__init__(self, env)
self.save()
#-def
def save(self):
self.__old_exists = os.path.exists
self.__old_isfile = os.path.isfile
#-def
def replace(self, env):
os.path.exists = (lambda p: env[0])
os.path.isfile = (lambda p: env[1])
#-def
def restore(self):
os.path.exists = self.__old_exists
os.path.isfile = self.__old_isfile
#-def
#-class
class ConfigCommandMock(object):
__slots__ = [ 'stream', 'name' ]
def __init__(self, name):
self.stream = ""
self.name = name
#-def
def wout(self, s):
self.stream += s
#-def
werr = wout
def get_name(self):
return self.name
#-def
#-class
class TestLoadConfigCase(unittest.TestCase):
def test_load_config(self):
with OpenContext(0, cfgA, False):
d, m = load_config("f")
self.assertEqual(d, cfgA_d)
self.assertEqual(m, cfgA_m)
#-def
def test_load_config_empty(self):
with OpenContext(0, "", False):
d, m = load_config("f")
self.assertEqual(d, [])
self.assertEqual(m, {})
#-def
def test_load_config_error(self):
with OpenContext(OPEN_FAIL, cfgA_d, False):
d, m = load_config("f")
self.assertIsNone(d)
self.assertIsNone(m)
#-def
#-class
class TestSetItemCase(unittest.TestCase):
def setUp(self):
self.dA = [
(COMM, "# Comment."),
(ITEM, 'a.b', "cd")
]
self.mA = {
'a.b': 1
}
#-def
def test_set_item(self):
set_item(self.dA, self.mA, 'a.b', "ef")
self.assertEqual(self.dA, [
(COMM, "# Comment."),
(ITEM, 'a.b', "ef")
])
self.assertEqual(self.mA, {'a.b': 1})
set_item(self.dA, self.mA, 'x.y', "zzz")
self.assertEqual(self.dA, [
(COMM, "# Comment."),
(ITEM, 'a.b', "ef"),
(ITEM, 'x.y', "zzz")
])
self.assertEqual(self.mA, {'a.b': 1, 'x.y': 2})
set_item(self.dA, self.mA, 'x.y', "acc")
self.assertEqual(self.dA, [
(COMM, "# Comment."),
(ITEM, 'a.b', "ef"),
(ITEM, 'x.y', "acc")
])
self.assertEqual(self.mA, {'a.b': 1, 'x.y': 2})
#-def
#-class
class TestGetItemCase(unittest.TestCase):
def setUp(self):
self.dA = [
(ITEM, 'a', "asdf"),
(ITEM, 'gh', "1258"),
(NL, ""),
(COMM, "#"),
(ITEM, '56', "#4$")
]
self.mA = {'a': 0, 'gh': 1, '56': 4}
#-def
def test_get_item(self):
self.assertEqual(get_item(self.dA, self.mA, 'gh'), "1258")
self.assertEqual(get_item(self.dA, self.mA, 'a'), "asdf")
self.assertIsNone(get_item(self.dA, self.mA, 'b.c'))
self.assertEqual(get_item(self.dA, self.mA, '56'), "#4$")
#-def
#-class
class TestDelItemCase(unittest.TestCase):
def setUp(self):
self.dA = [
(NL, ""),
(COMM, "#"),
(ITEM, 'a.b', "1"),
(NL, ""),
(ITEM, 'c.d', "23"),
(ITEM, 'e2', "uth"),
(ITEM, 'e2', "hut"),
(COMM, "# $")
]
self.mA = {'a.b': 2, 'c.d': 4, 'e2': 6}
#-def
def test_del_item(self):
self.assertEqual(get_item(self.dA, self.mA, 'c.d'), "23")
del_item(self.dA, self.mA, 'c.d')
self.assertEqual(self.dA, [
(NL, ""),
(COMM, "#"),
(ITEM, 'a.b', "1"),
(NL, ""),
(COMM, "#c.d = 23"),
(ITEM, 'e2', "uth"),
(ITEM, 'e2', "hut"),
(COMM, "# $")
])
self.assertEqual(self.mA, {'a.b': 2, 'e2': 6})
self.assertIsNone(get_item(self.dA, self.mA, 'c.d'))
del_item(self.dA, self.mA, 'R')
self.assertEqual(self.dA, [
(NL, ""),
(COMM, "#"),
(ITEM, 'a.b', "1"),
(NL, ""),
(COMM, "#c.d = 23"),
(ITEM, 'e2', "uth"),
(ITEM, 'e2', "hut"),
(COMM, "# $")
])
self.assertEqual(self.mA, {'a.b': 2, 'e2': 6})
del_item(self.dA, self.mA, 'a.b')
self.assertEqual(self.dA, [
(NL, ""),
(COMM, "#"),
(COMM, "#a.b = 1"),
(NL, ""),
(COMM, "#c.d = 23"),
(ITEM, 'e2', "uth"),
(ITEM, 'e2', "hut"),
(COMM, "# $")
])
self.assertEqual(self.mA, {'e2': 6})
del_item(self.dA, self.mA, 'e2')
self.assertEqual(self.dA, [
(NL, ""),
(COMM, "#"),
(COMM, "#a.b = 1"),
(NL, ""),
(COMM, "#c.d = 23"),
(COMM, "#e2 = uth"),
(COMM, "#e2 = hut"),
(COMM, "# $")
])
self.assertEqual(self.mA, {})
#-def
#-class
class TestMergeItemsCase(unittest.TestCase):
def setUp(self):
self.data_1 = [
(COMM, "# 1."),
(ITEM, 'x', "yy"),
(NL, ""),
(ITEM, 'y', "123"),
(NL, "")
]
self.data_2 = [
(NL, ""),
(ITEM, 'x', "25l"),
(COMM, "= []"),
(ITEM, 'a', "iuo"),
(NL, ""),
(COMM, "# $.")
]
self.data_12 = [
(COMM, "# 1."),
(ITEM, 'x', "yy"),
(NL, ""),
(ITEM, 'y', "123"),
(NL, ""),
(NL, ""),
(ITEM, 'x', "25l"),
(COMM, "= []"),
(ITEM, 'a', "iuo"),
(NL, ""),
(COMM, "# $.")
]
self.data_map_12 = {
'x': 6, 'y': 3, 'a': 8
}
#-def
def test_merge_items(self):
r = merge_items(self.data_1, self.data_2)
self.assertEqual(r[0], self.data_12)
self.assertEqual(r[1], self.data_map_12)
#-def
#-class
class TestConfigToKvMapCase(unittest.TestCase):
def setUp(self):
self.data = [
(COMM, "# 1."),
(ITEM, 'x', "yy"),
(NL, ""),
(ITEM, 'y', "123"),
(NL, ""),
(NL, ""),
(ITEM, 'x', "25l"),
(COMM, "= []"),
(ITEM, 'a', "iuo"),
(NL, ""),
(COMM, "# $.")
]
self.data_map = {
'x': 6, 'y': 3, 'a': 8
}
self.kvmap = {
'x': "25l", 'y': "123", 'a': "iuo"
}
#-def
def test_config_to_kvmap(self):
r = config_to_kvmap(self.data, self.data_map)
self.assertEqual(r, self.kvmap)
#-def
#-class
class TestSaveConfigCase(unittest.TestCase):
def test_save_config(self):
b = DataBuffer()
with OpenContext(0, cfgA, False):
d, m = load_config("fgh")
del_item(d, m, 'pgen.fmtdir')
set_item(d, m, 'pgen.quite', "0")
set_item(d, m, 'pgen.fmtdir', "doit/meta/fmt ")
with OpenContext(0, b, False):
r, m = save_config("cfg", d)
self.assertTrue(r)
self.assertEqual(m, "")
self.assertEqual(b.data,
"pgen.quite = 0\n" \
"\n" \
"# Default format directory:\n" \
"#pgen.fmtdir = ../../fmt\n" \
"\n" \
"ignored line\n" \
"= another ignored line\n" \
"test = 0\n" \
"path =\n" \
"pgen.fmtdir = doit/meta/fmt\n"
)
#-def
def test_save_config_empty(self):
b = DataBuffer()
with OpenContext(0, b, False):
r, m = save_config("cfg", [])
self.assertTrue(r)
self.assertEqual(m, "")
self.assertEqual(b.data, "")
#-def
def test_save_config_fail(self):
b = DataBuffer()
with OpenContext(0, cfgA, False):
d, m = load_config("fgh")
del_item(d, m, 'pgen.fmtdir')
set_item(d, m, 'pgen.quite', "0")
set_item(d, m, 'pgen.fmtdir', "doit/meta/fmt ")
with OpenContext(OPEN_FAIL, b, False):
r, m = save_config("cfg", d)
self.assertFalse(r)
self.assertEqual(m, "Can't write to <cfg>")
#-def
#-class
class TestSetConfigOptionCase(unittest.TestCase):
def test_set_config_option_fails1(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': OPEN_FAIL
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: File <conf> cannot be opened.\n")
#-def
def test_set_config_option_fails2(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': OPEN_FAIL
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (False, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: Can't write to <conf>.\n")
#-def
def test_set_config_option_fails3(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': OPEN_FAIL
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (True, False)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: Can't write to <conf>.\n")
#-def
def test_set_config_option_fails4(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': OPEN_FAIL
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (False, False)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: Can't write to <conf>.\n")
#-def
def test_set_config_option_noconfig1(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y\n1 = 2",
'w': DataBuffer()
}
e = (False, False)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream,
"config: Option 'x' in <conf> has been updated.\n"
)
self.assertEqual(d['w'].data, "x = z\n")
#-def
def test_set_config_option_noconfig2(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y\n1 = 2",
'w': DataBuffer()
}
e = (False, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream,
"config: Option 'x' in <conf> has been updated.\n"
)
self.assertEqual(d['w'].data, "x = z\n")
#-def
def test_set_config_option_noconfig3(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y\n1 = 2",
'w': DataBuffer()
}
e = (True, False)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream,
"config: Option 'x' in <conf> has been updated.\n"
)
self.assertEqual(d['w'].data, "x = z\n")
#-def
def test_set_config_option_save_fails1(self):
c = ConfigCommandMock('config')
b = {
'r': 0,
'w': OPEN_FAIL
}
d = {
'r': "x = y\n1 = 2",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: Can't write to <conf>.\n")
#-def
def test_set_config_option_save_succeed1(self):
c = ConfigCommandMock('config')
b = {
'r': 0,
'w': 0
}
d = {
'r': "x = y\n1 = 2",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream,
"config: Option 'x' in <conf> has been updated.\n"
)
self.assertEqual(d['w'].data, "x = z\n1 = 2\n")
#-def
def test_set_config_option_save_succeed2(self):
c = ConfigCommandMock('config')
b = {
'r': 0,
'w': 0
}
d = {
'r': "x = y\n1 = 2",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = set_config_option(c, "conf", 'x', "z", False)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream, "")
self.assertEqual(d['w'].data, "x = z\n1 = 2\n")
#-def
#-class
class TestUnsetConfigOptionCase(unittest.TestCase):
def test_unset_config_option_fail1(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: File <conf> cannot be opened.\n")
self.assertEqual(d['w'].data, "")
#-def
def test_unset_config_option_noconfig1(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (False, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream, "")
self.assertEqual(d['w'].data, "")
#-def
def test_unset_config_option_noconfig2(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (True, False)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream, "")
self.assertEqual(d['w'].data, "")
#-def
def test_unset_config_option_noconfig3(self):
c = ConfigCommandMock('config')
b = {
'r': OPEN_FAIL,
'w': 0
}
d = {
'r': "x = y",
'w': DataBuffer()
}
e = (False, False)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream, "")
self.assertEqual(d['w'].data, "")
#-def
def test_unset_config_option_savefail1(self):
c = ConfigCommandMock('config')
b = {
'r': 0,
'w': OPEN_FAIL
}
d = {
'r': "x = y\n1 = 2\nx = 3",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', True)
self.assertEqual(xc, EXIT_FAILURE)
self.assertEqual(c.stream, "config: Can't write to <conf>.\n")
self.assertEqual(d['w'].data, "")
#-def
def test_unset_config_option_ok1(self):
c = ConfigCommandMock('config')
b = {
'r': 0,
'w': 0
}
d = {
'r': "x = y\n1 = 2\nx = 3",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', True)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream,
"config: Option 'x' in <conf> has been unset.\n"
)
self.assertEqual(d['w'].data, "#x = y\n1 = 2\n#x = 3\n")
#-def
def test_unset_config_option_ok2(self):
c = ConfigCommandMock('config')
b = {
'r': 0,
'w': 0
}
d = {
'r': "x = y\n1 = 2\nx = 3",
'w': DataBuffer()
}
e = (True, True)
with OsModuleMock(e), OpenContext(b, d, False, make_rwopen):
xc = unset_config_option(c, "conf", 'x', False)
self.assertEqual(xc, EXIT_SUCCESS)
self.assertEqual(c.stream, "")
self.assertEqual(d['w'].data, "#x = y\n1 = 2\n#x = 3\n")
#-def
#-class
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestLoadConfigCase))
suite.addTest(unittest.makeSuite(TestSetItemCase))
suite.addTest(unittest.makeSuite(TestGetItemCase))
suite.addTest(unittest.makeSuite(TestDelItemCase))
suite.addTest(unittest.makeSuite(TestMergeItemsCase))
suite.addTest(unittest.makeSuite(TestConfigToKvMapCase))
suite.addTest(unittest.makeSuite(TestSaveConfigCase))
suite.addTest(unittest.makeSuite(TestSetConfigOptionCase))
suite.addTest(unittest.makeSuite(TestUnsetConfigOptionCase))
return suite
#-def
|
mit
|
tonk/ansible
|
test/lib/ansible_test/_internal/cloud/tower.py
|
7
|
7417
|
"""Tower plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
from ..util import (
display,
ApplicationError,
is_shippable,
SubprocessError,
ConfigParser,
)
from ..util_common import (
run_command,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class TowerCloudProvider(CloudProvider):
"""Tower cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args):
"""
:type args: TestConfig
"""
super(TowerCloudProvider, self).__init__(args)
self.aci = None
self.version = ''
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = get_tower_aci(self.args)
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(TowerCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(TowerCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def check_tower_version(self, fallback=None):
"""Check the Tower version being tested and determine the correct CLI version to use.
:type fallback: str | None
"""
tower_cli_version_map = {
'3.1.5': '3.1.8',
'3.2.3': '3.3.0',
'3.3.5': '3.3.3',
'3.4.3': '3.3.3',
'3.6.3': '3.3.8',
}
cli_version = tower_cli_version_map.get(self.version, fallback)
if not cli_version:
raise ApplicationError('Mapping to ansible-tower-cli version required for Tower version: %s' % self.version)
self._set_cloud_config('tower_cli_version', cli_version)
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
# cleanup on success or failure is not yet supported due to how cleanup is called
if self.aci and self.args.remote_terminate == 'always':
self.aci.stop()
super(TowerCloudProvider, self).cleanup()
def _setup_static(self):
config = TowerConfig.parse(self.config_static_path)
self.version = config.version
self.check_tower_version()
def _setup_dynamic(self):
"""Request Tower credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
# temporary solution to allow version selection
self.version = os.environ.get('TOWER_VERSION', '3.6.3')
self.check_tower_version(os.environ.get('TOWER_CLI_VERSION'))
aci = get_tower_aci(self.args, self.version)
aci.start()
aci.wait()
connection = aci.get()
config = self._read_config_template()
if not self.args.explain:
self.aci = aci
values = dict(
VERSION=self.version,
HOST=connection.hostname,
USERNAME=connection.username,
PASSWORD=connection.password,
)
display.sensitive.add(values['PASSWORD'])
config = self._populate_config_template(config, values)
self._write_config(config)
class TowerCloudEnvironment(CloudEnvironment):
"""Tower cloud environment plugin. Updates integration test environment after delegation."""
def setup(self):
"""Setup which should be done once per environment instead of once per test target."""
self.setup_cli()
self.disable_pendo()
def setup_cli(self):
"""Install the correct Tower CLI for the version of Tower being tested."""
tower_cli_version = self._get_cloud_config('tower_cli_version')
display.info('Installing Tower CLI version: %s' % tower_cli_version)
cmd = self.args.pip_command + ['install', '--disable-pip-version-check', 'ansible-tower-cli==%s' % tower_cli_version]
run_command(self.args, cmd)
cmd = ['tower-cli', 'config', 'verify_ssl', 'false']
run_command(self.args, cmd, capture=True)
def disable_pendo(self):
"""Disable Pendo tracking."""
display.info('Disable Pendo tracking')
config = TowerConfig.parse(self.config_path)
# tower-cli does not recognize TOWER_ environment variables
cmd = ['tower-cli', 'setting', 'modify', 'PENDO_TRACKING_STATE', 'off',
'-h', config.host, '-u', config.username, '-p', config.password]
attempts = 60
while True:
attempts -= 1
try:
run_command(self.args, cmd, capture=True)
return
except SubprocessError as ex:
if not attempts:
raise ApplicationError('Timed out trying to disable Pendo tracking:\n%s' % ex)
time.sleep(5)
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
config = TowerConfig.parse(self.config_path)
env_vars = config.environment
ansible_vars = dict((key.lower(), value) for key, value in env_vars.items())
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
class TowerConfig:
"""Tower settings."""
def __init__(self, values):
self.version = values.get('version')
self.host = values.get('host')
self.username = values.get('username')
self.password = values.get('password')
if self.password:
display.sensitive.add(self.password)
@property
def environment(self):
"""Tower settings as environment variables.
:rtype: dict[str, str]
"""
env = dict(
TOWER_VERSION=self.version,
TOWER_HOST=self.host,
TOWER_USERNAME=self.username,
TOWER_PASSWORD=self.password,
)
return env
@staticmethod
def parse(path):
"""
:type path: str
:rtype: TowerConfig
"""
parser = ConfigParser()
parser.read(path)
keys = (
'version',
'host',
'username',
'password',
)
values = dict((k, parser.get('default', k)) for k in keys)
config = TowerConfig(values)
missing = [k for k in keys if not values.get(k)]
if missing:
raise ApplicationError('Missing or empty Tower configuration value(s): %s' % ', '.join(missing))
return config
def get_tower_aci(args, version=None):
"""
:type args: EnvironmentConfig
:type version: str | None
:rtype: AnsibleCoreCI
"""
if version:
persist = True
else:
version = ''
persist = False
return AnsibleCoreCI(args, 'tower', version, persist=persist, stage=args.remote_stage, provider=args.remote_provider)
|
gpl-3.0
|
t1g0r/npyscreen
|
npyscreen/wgmultiselecttree.py
|
15
|
1474
|
from . import wgmultilinetree as multilinetree
from . import wgcheckbox as checkbox
import weakref
class MultiSelectTree(multilinetree.SelectOneTree):
_contained_widgets = checkbox.Checkbox
def set_up_handlers(self):
super(MultiSelectTree, self).set_up_handlers()
self.handlers.update({
ord("x"): self.h_select_toggle,
curses.ascii.SP: self.h_select_toggle,
ord("X"): self.h_select,
"^U": self.h_select_none,
})
def h_select_none(self, input):
self.value = []
def h_select_toggle(self, input):
try:
working_with = weakref.proxy(self.values[self.cursor_line])
except TypeError:
working_with = self.values[self.cursor_line]
if working_with in self.value:
self.value.remove(working_with)
else:
self.value.append(working_with)
def h_set_filtered_to_selected(self, ch):
self.value = self.get_filtered_values()
def h_select_exit(self, ch):
try:
working_with = weakref.proxy(self.values[self.cursor_line])
except TypeError:
working_with = self.values[self.cursor_line]
if not working_with in self.value:
self.value.append(working_with)
if self.return_exit:
self.editing = False
self.how_exited=True
|
bsd-2-clause
|
chenchiyuan/gym
|
applications/weixin/models/menus.py
|
3
|
2795
|
# -*- coding: utf-8 -*-
# __author__ = chenchiyuan
from __future__ import division, unicode_literals, print_function
from collections import OrderedDict
from applications.weixin.models.apps import App
from applications.weixin.models.rules import Rule
from libs.models.mixins import QuerysetMixin
from libs.models.models import SingletonModel
from django.db import models
class SubscribeItem(SingletonModel, QuerysetMixin):
class Meta:
app_label = "weixin"
db_table = "weixin_subscribe"
verbose_name = verbose_name_plural = u"微信关注回复"
app = models.ForeignKey(App, verbose_name=u"app")
rule = models.ForeignKey(Rule, verbose_name=u"对应关键字")
def __unicode__(self):
return self.id
class MenuItem(models.Model, QuerysetMixin):
class Meta:
app_label = "weixin"
db_table = "weixin_menu"
verbose_name = verbose_name_plural = u"微信菜单"
main = models.CharField(u"主菜单", max_length=64)
secondary = models.CharField(u"二级菜单", max_length=64, blank=True, null=True)
app = models.ForeignKey(App, verbose_name=u"app")
rule = models.ForeignKey(Rule, verbose_name=u"对应关键字", blank=True, null=True)
link = models.CharField(u"链接", max_length=128, blank=True, null=True)
def __unicode__(self):
return self.id
@classmethod
def get_menus_by_app(cls, app):
menus = cls.filter_by_queries(app=app)
menu_dict = OrderedDict()
for menu in menus:
if not menu.main in menu_dict:
menu_dict[menu.main] = []
menu_dict[menu.main].append(menu)
top_three_menus = menu_dict.items()[:3]
result = {
"button": []
}
for menu_main, menu_items in top_three_menus:
if len(menu_items) == 1:
# 一个的时候需要判断下
result_item = menu_items[0].to_button()
result['button'].append(result_item)
else:
sub_buttons = []
for menu_item in menu_items:
sub_buttons.append(menu_item.to_button())
result['button'].append({
"name": menu_main,
"sub_button": sub_buttons
})
return result
@property
def name(self):
if self.secondary:
return self.secondary
return self.main
def to_button(self):
if self.link:
return {
"type": "view",
"name": self.name,
"url": self.link
}
else:
return {
"type": "click",
"name": self.name,
"key": unicode(self.rule_id)
}
|
bsd-3-clause
|
yencarnacion/jaikuengine
|
.google_appengine/lib/PyAMF-0.6.1/pyamf/adapters/_google_appengine_ext_db.py
|
27
|
10846
|
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Google App Engine adapter module.
Sets up basic type mapping and class mappings for using the Datastore API
in Google App Engine.
@see: U{Datastore API on Google App Engine<http://
code.google.com/appengine/docs/python/datastore>}
@since: 0.3.1
"""
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
import datetime
import pyamf
from pyamf.adapters import util
class ModelStub(object):
"""
This class represents a C{db.Model} or C{db.Expando} class as the typed
object is being read from the AMF stream. Once the attributes have been
read from the stream and through the magic of Python, the instance of this
class will be converted into the correct type.
@ivar klass: The referenced class either C{db.Model} or C{db.Expando}.
This is used so we can proxy some of the method calls during decoding.
@type klass: C{db.Model} or C{db.Expando}
@see: L{DataStoreClassAlias.applyAttributes}
"""
def __init__(self, klass):
self.klass = klass
def properties(self):
return self.klass.properties()
def dynamic_properties(self):
return []
class GAEReferenceCollection(dict):
"""
This helper class holds a dict of klass to key/objects loaded from the
Datastore.
@since: 0.4.1
"""
def _getClass(self, klass):
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
return self.setdefault(klass, {})
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then C{KeyError} is raised.
@param klass: The class of the instance.
@param key: The key of the instance.
@return: The instance linked to the C{klass}/C{key}.
@rtype: Instance of L{klass}.
"""
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore.
"""
d = self._getClass(klass)
d[key] = obj
class DataStoreClassAlias(pyamf.ClassAlias):
"""
This class contains all the business logic to interact with Google's
Datastore API's. Any C{db.Model} or C{db.Expando} classes will use this
class alias for encoding/decoding.
We also add a number of indexes to the encoder context to aggressively
decrease the number of Datastore API's that we need to complete.
"""
# The name of the attribute used to represent the key
KEY_ATTR = '_key'
def _compile_base_class(self, klass):
if klass in (db.Model, polymodel.PolyModel):
return
pyamf.ClassAlias._compile_base_class(self, klass)
def getCustomProperties(self):
props = [self.KEY_ATTR]
self.reference_properties = {}
self.properties = {}
reverse_props = []
for name, prop in self.klass.properties().iteritems():
self.properties[name] = prop
props.append(name)
if isinstance(prop, db.ReferenceProperty):
self.reference_properties[name] = prop
if issubclass(self.klass, polymodel.PolyModel):
del self.properties['_class']
props.remove('_class')
# check if the property is a defined as a collection_name. These types
# of properties are read-only and the datastore freaks out if you
# attempt to meddle with it. We delete the attribute entirely ..
for name, value in self.klass.__dict__.iteritems():
if isinstance(value, db._ReverseReferenceProperty):
reverse_props.append(name)
self.encodable_properties.update(self.properties.keys())
self.decodable_properties.update(self.properties.keys())
self.readonly_attrs.update(reverse_props)
if not self.reference_properties:
self.reference_properties = None
if not self.properties:
self.properties = None
self.no_key_attr = self.KEY_ATTR in self.exclude_attrs
def getEncodableAttributes(self, obj, codec=None):
attrs = pyamf.ClassAlias.getEncodableAttributes(self, obj, codec=codec)
gae_objects = getGAEObjects(codec.context) if codec else None
if self.reference_properties and gae_objects:
for name, prop in self.reference_properties.iteritems():
klass = prop.reference_class
key = prop.get_value_for_datastore(obj)
if not key:
continue
try:
attrs[name] = gae_objects.getClassKey(klass, key)
except KeyError:
ref_obj = getattr(obj, name)
gae_objects.addClassKey(klass, key, ref_obj)
attrs[name] = ref_obj
for k in attrs.keys()[:]:
if k.startswith('_'):
del attrs[k]
for attr in obj.dynamic_properties():
attrs[attr] = getattr(obj, attr)
if not self.no_key_attr:
attrs[self.KEY_ATTR] = str(obj.key()) if obj.is_saved() else None
return attrs
def createInstance(self, codec=None):
return ModelStub(self.klass)
def getDecodableAttributes(self, obj, attrs, codec=None):
key = attrs.setdefault(self.KEY_ATTR, None)
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, codec=codec)
del attrs[self.KEY_ATTR]
new_obj = None
# attempt to load the object from the datastore if KEY_ATTR exists.
if key and codec:
new_obj = loadInstanceFromDatastore(self.klass, key, codec)
# clean up the stub
if isinstance(obj, ModelStub) and hasattr(obj, 'klass'):
del obj.klass
if new_obj:
obj.__dict__ = new_obj.__dict__.copy()
obj.__class__ = self.klass
apply_init = True
if self.properties:
for k in [k for k in attrs.keys() if k in self.properties.keys()]:
prop = self.properties[k]
v = attrs[k]
if isinstance(prop, db.FloatProperty) and isinstance(v, (int, long)):
attrs[k] = float(v)
elif isinstance(prop, db.IntegerProperty) and isinstance(v, float):
x = long(v)
# only convert the type if there is no mantissa - otherwise
# let the chips fall where they may
if x == v:
attrs[k] = x
elif isinstance(prop, db.ListProperty) and v is None:
attrs[k] = []
elif isinstance(v, datetime.datetime):
# Date/Time Property fields expect specific types of data
# whereas PyAMF only decodes into datetime.datetime objects.
if isinstance(prop, db.DateProperty):
attrs[k] = v.date()
elif isinstance(prop, db.TimeProperty):
attrs[k] = v.time()
if new_obj is None and isinstance(v, ModelStub) and prop.required and k in self.reference_properties:
apply_init = False
del attrs[k]
# If the object does not exist in the datastore, we must fire the
# class constructor. This sets internal attributes that pyamf has
# no business messing with ..
if new_obj is None and apply_init is True:
obj.__init__(**attrs)
return attrs
def getGAEObjects(context):
"""
Returns a reference to the C{gae_objects} on the context. If it doesn't
exist then it is created.
@param context: The context to load the C{gae_objects} index from.
@return: The C{gae_objects} index reference.
@rtype: Instance of L{GAEReferenceCollection}
@since: 0.4.1
"""
return context.extra.setdefault('gae_objects', GAEReferenceCollection())
def loadInstanceFromDatastore(klass, key, codec=None):
"""
Attempt to load an instance from the datastore, based on C{klass}
and C{key}. We create an index on the codec's context (if it exists)
so we can check that first before accessing the datastore.
@param klass: The class that will be loaded from the datastore.
@type klass: Sub-class of C{db.Model} or C{db.Expando}
@param key: The key which is used to uniquely identify the instance in the
datastore.
@type key: C{str}
@param codec: The codec to reference the C{gae_objects} index. If
supplied,The codec must have have a context attribute.
@return: The loaded instance from the datastore.
@rtype: Instance of C{klass}.
@since: 0.4.1
"""
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
if not isinstance(key, basestring):
raise TypeError('string expected for key, got %s', (repr(key),))
key = str(key)
if codec is None:
return klass.get(key)
gae_objects = getGAEObjects(codec.context)
try:
return gae_objects.getClassKey(klass, key)
except KeyError:
pass
obj = klass.get(key)
gae_objects.addClassKey(klass, key, obj)
return obj
def writeGAEObject(obj, encoder=None):
"""
The GAE Datastore creates new instances of objects for each get request.
This is a problem for PyAMF as it uses the id(obj) of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the datastore key to do the reference checking.
@since: 0.4.1
"""
if not obj.is_saved():
encoder.writeObject(obj)
return
context = encoder.context
kls = obj.__class__
s = obj.key()
gae_objects = getGAEObjects(context)
try:
referenced_object = gae_objects.getClassKey(kls, s)
except KeyError:
referenced_object = obj
gae_objects.addClassKey(kls, s, obj)
encoder.writeObject(referenced_object)
# initialise the module here: hook into pyamf
pyamf.register_alias_type(DataStoreClassAlias, db.Model)
pyamf.add_type(db.Query, util.to_list)
pyamf.add_type(db.Model, writeGAEObject)
|
apache-2.0
|
teamotrinidad/plugin.video.cheguevara
|
commonfunctionsdummy.py
|
40
|
17612
|
'''
Parsedom for XBMC plugins
Copyright (C) 2010-2011 Tobias Ussing And Henrik Mosgaard Jensen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import urllib
import urllib2
import re
import io
import inspect
import time
import HTMLParser
#import chardet
import json
version = u"2.5.1"
plugin = u"CommonFunctions-" + version
print plugin
USERAGENT = u"Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1"
if hasattr(sys.modules["__main__"], "xbmc"):
xbmc = sys.modules["__main__"].xbmc
else:
import xbmc
if hasattr(sys.modules["__main__"], "xbmcgui"):
xbmcgui = sys.modules["__main__"].xbmcgui
else:
import xbmcgui
if hasattr(sys.modules["__main__"], "dbg"):
dbg = sys.modules["__main__"].dbg
else:
dbg = False
if hasattr(sys.modules["__main__"], "dbglevel"):
dbglevel = sys.modules["__main__"].dbglevel
else:
dbglevel = 3
if hasattr(sys.modules["__main__"], "opener"):
urllib2.install_opener(sys.modules["__main__"].opener)
# This function raises a keyboard for user input
def getUserInput(title=u"Input", default=u"", hidden=False):
log("", 5)
result = None
# Fix for when this functions is called with default=None
if not default:
default = u""
keyboard = xbmc.Keyboard(default, title)
keyboard.setHiddenInput(hidden)
keyboard.doModal()
if keyboard.isConfirmed():
result = keyboard.getText()
log(repr(result), 5)
return result
# This function raises a keyboard numpad for user input
def getUserInputNumbers(title=u"Input", default=u""):
log("", 5)
result = None
# Fix for when this functions is called with default=None
if not default:
default = u""
keyboard = xbmcgui.Dialog()
result = keyboard.numeric(0, title, default)
log(repr(result), 5)
return str(result)
def getXBMCVersion():
log("", 3)
version = xbmc.getInfoLabel( "System.BuildVersion" )
log(version, 3)
for key in ["-", " "]:
if version.find(key) -1:
version = version[:version.find(key)]
version = float(version)
log(repr(version))
return version
# Converts the request url passed on by xbmc to the plugin into a dict of key-value pairs
def getParameters(parameterString):
log("", 5)
commands = {}
if getXBMCVersion() >= 12.0:
parameterString = urllib.unquote_plus(parameterString)
splitCommands = parameterString[parameterString.find('?') + 1:].split('&')
for command in splitCommands:
if (len(command) > 0):
splitCommand = command.split('=')
key = splitCommand[0]
try:
value = splitCommand[1].encode("utf-8")
except:
log("Error utf-8 encoding argument value: " + repr(splitCommand[1]))
value = splitCommand[1]
commands[key] = value
log(repr(commands), 5)
return commands
def replaceHTMLCodes(txt):
log(repr(txt), 5)
# Fix missing ; in &#<number>;
txt = re.sub("(&#[0-9]+)([^;^0-9]+)", "\\1;\\2", makeUTF8(txt))
txt = HTMLParser.HTMLParser().unescape(txt)
txt = txt.replace("&", "&")
log(repr(txt), 5)
return txt
def stripTags(html):
log(repr(html), 5)
sub_start = html.find("<")
sub_end = html.find(">")
while sub_start < sub_end and sub_start > -1:
html = html.replace(html[sub_start:sub_end + 1], "").strip()
sub_start = html.find("<")
sub_end = html.find(">")
log(repr(html), 5)
return html
def _getDOMContent(html, name, match, ret): # Cleanup
log("match: " + match, 3)
endstr = u"</" + name # + ">"
start = html.find(match)
end = html.find(endstr, start)
pos = html.find("<" + name, start + 1 )
log(str(start) + " < " + str(end) + ", pos = " + str(pos) + ", endpos: " + str(end), 8)
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(endstr, end + len(endstr))
if tend != -1:
end = tend
pos = html.find("<" + name, pos + 1)
log("loop: " + str(start) + " < " + str(end) + " pos = " + str(pos), 8)
log("start: %s, len: %s, end: %s" % (start, len(match), end), 3)
if start == -1 and end == -1:
result = u""
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
if ret:
endstr = html[end:html.find(">", html.find(endstr)) + 1]
result = match + result + endstr
log("done result length: " + str(len(result)), 3)
return result
def _getDOMAttributes(match, name, ret):
log("", 3)
lst = re.compile('<' + name + '.*?' + ret + '=([\'"].[^>]*?[\'"])>', re.M | re.S).findall(match)
if len(lst) == 0:
lst = re.compile('<' + name + '.*?' + ret + '=(.[^>]*?)>', re.M | re.S).findall(match)
ret = []
for tmp in lst:
cont_char = tmp[0]
if cont_char in "'\"":
log("Using %s as quotation mark" % cont_char, 3)
# Limit down to next variable.
if tmp.find('=' + cont_char, tmp.find(cont_char, 1)) > -1:
tmp = tmp[:tmp.find('=' + cont_char, tmp.find(cont_char, 1))]
# Limit to the last quotation mark
if tmp.rfind(cont_char, 1) > -1:
tmp = tmp[1:tmp.rfind(cont_char)]
else:
log("No quotation mark found", 3)
if tmp.find(" ") > 0:
tmp = tmp[:tmp.find(" ")]
elif tmp.find("/") > 0:
tmp = tmp[:tmp.find("/")]
elif tmp.find(">") > 0:
tmp = tmp[:tmp.find(">")]
ret.append(tmp.strip())
log("Done: " + repr(ret), 3)
return ret
def _getDOMElements(item, name, attrs):
log("", 3)
lst = []
for key in attrs:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item)
if len(lst2) == 0 and attrs[key].find(" ") == -1: # Try matching without quotation marks
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item)
if len(lst) == 0:
log("Setting main list " + repr(lst2), 5)
lst = lst2
lst2 = []
else:
log("Setting new list " + repr(lst2), 5)
test = range(len(lst))
test.reverse()
for i in test: # Delete anything missing from the next list.
if not lst[i] in lst2:
log("Purging mismatch " + str(len(lst)) + " - " + repr(lst[i]), 3)
del(lst[i])
if len(lst) == 0 and attrs == {}:
log("No list found, trying to match on name only", 3)
lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item)
if len(lst) == 0:
lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item)
log("Done: " + str(type(lst)), 3)
return lst
def parseDOM(html, name=u"", attrs={}, ret=False):
log("Name: " + repr(name) + " - Attrs:" + repr(attrs) + " - Ret: " + repr(ret) + " - HTML: " + str(type(html)), 3)
if isinstance(name, str): # Should be handled
try:
name = name #.decode("utf-8")
except:
log("Couldn't decode name binary string: " + repr(name))
if isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
log("Couldn't decode html binary string. Data length: " + repr(len(html)))
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
log("Input isn't list or string/unicode.")
return u""
if not name.strip():
log("Missing tag name")
return u""
ret_lst = []
for item in html:
temp_item = re.compile('(<[^>]*?\n[^>]*?>)').findall(item)
for match in temp_item:
item = item.replace(match, match.replace("\n", " "))
lst = _getDOMElements(item, name, attrs)
if isinstance(ret, str):
log("Getting attribute %s content for %s matches " % (ret, len(lst) ), 3)
lst2 = []
for match in lst:
lst2 += _getDOMAttributes(match, name, ret)
lst = lst2
else:
log("Getting element content for %s matches " % len(lst), 3)
lst2 = []
for match in lst:
log("Getting element content for %s" % match, 4)
temp = _getDOMContent(item, name, match, ret).strip()
item = item[item.find(temp, item.find(match)) + len(temp):]
lst2.append(temp)
lst = lst2
ret_lst += lst
log("Done: " + repr(ret_lst), 3)
return ret_lst
def extractJS(data, function=False, variable=False, match=False, evaluate=False, values=False):
log("")
scripts = parseDOM(data, "script")
if len(scripts) == 0:
log("Couldn't find any script tags. Assuming javascript file was given.")
scripts = [data]
lst = []
log("Extracting", 4)
for script in scripts:
tmp_lst = []
if function:
tmp_lst = re.compile(function + '\(.*?\).*?;', re.M | re.S).findall(script)
elif variable:
tmp_lst = re.compile(variable + '[ ]+=.*?;', re.M | re.S).findall(script)
else:
tmp_lst = [script]
if len(tmp_lst) > 0:
log("Found: " + repr(tmp_lst), 4)
lst += tmp_lst
else:
log("Found nothing on: " + script, 4)
test = range(0, len(lst))
test.reverse()
for i in test:
if match and lst[i].find(match) == -1:
log("Removing item: " + repr(lst[i]), 10)
del lst[i]
else:
log("Cleaning item: " + repr(lst[i]), 4)
if lst[i][0] == u"\n":
lst[i] == lst[i][1:]
if lst[i][len(lst) -1] == u"\n":
lst[i] == lst[i][:len(lst)- 2]
lst[i] = lst[i].strip()
if values or evaluate:
for i in range(0, len(lst)):
log("Getting values %s" % lst[i])
if function:
if evaluate: # include the ( ) for evaluation
data = re.compile("(\(.*?\))", re.M | re.S).findall(lst[i])
else:
data = re.compile("\((.*?)\)", re.M | re.S).findall(lst[i])
elif variable:
tlst = re.compile(variable +".*?=.*?;", re.M | re.S).findall(lst[i])
data = []
for tmp in tlst: # This breaks for some stuff. "ad_tag": "http://ad-emea.doubleclick.net/N4061/pfadx/com.ytpwatch.entertainment/main_563326'' # ends early, must end with }
cont_char = tmp[0]
cont_char = tmp[tmp.find("=") + 1:].strip()
cont_char = cont_char[0]
if cont_char in "'\"":
log("Using %s as quotation mark" % cont_char, 1)
tmp = tmp[tmp.find(cont_char) + 1:tmp.rfind(cont_char)]
else:
log("No quotation mark found", 1)
tmp = tmp[tmp.find("=") + 1: tmp.rfind(";")]
tmp = tmp.strip()
if len(tmp) > 0:
data.append(tmp)
else:
log("ERROR: Don't know what to extract values from")
log("Values extracted: %s" % repr(data))
if len(data) > 0:
lst[i] = data[0]
if evaluate:
for i in range(0, len(lst)):
log("Evaluating %s" % lst[i])
data = lst[i].strip()
try:
try:
lst[i] = json.loads(data)
except:
log("Couldn't json.loads, trying eval")
lst[i] = eval(data)
except:
log("Couldn't eval: %s from %s" % (repr(data), repr(lst[i])))
log("Done: " + str(len(lst)))
return lst
def fetchPage(params={}):
get = params.get
link = get("link")
ret_obj = {}
if get("post_data"):
log("called for : " + repr(params['link']))
else:
log("called for : " + repr(params))
if not link or int(get("error", "0")) > 2:
log("giving up")
ret_obj["status"] = 500
return ret_obj
if get("post_data"):
if get("hide_post_data"):
log("Posting data", 2)
else:
log("Posting data: " + urllib.urlencode(get("post_data")), 2)
request = urllib2.Request(link, urllib.urlencode(get("post_data")))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
else:
log("Got request", 2)
request = urllib2.Request(link)
if get("headers"):
for head in get("headers"):
request.add_header(head[0], head[1])
request.add_header('User-Agent', USERAGENT)
if get("cookie"):
request.add_header('Cookie', get("cookie"))
if get("refering"):
request.add_header('Referer', get("refering"))
try:
log("connecting to server...", 1)
con = urllib2.urlopen(request)
ret_obj["header"] = con.info()
ret_obj["new_url"] = con.geturl()
if get("no-content", "false") == u"false" or get("no-content", "false") == "false":
inputdata = con.read()
#data_type = chardet.detect(inputdata)
#inputdata = inputdata.decode(data_type["encoding"])
ret_obj["content"] = inputdata.decode("utf-8")
con.close()
log("Done")
ret_obj["status"] = 200
return ret_obj
except urllib2.HTTPError, e:
err = str(e)
log("HTTPError : " + err)
log("HTTPError - Headers: " + str(e.headers) + " - Content: " + e.fp.read())
params["error"] = str(int(get("error", "0")) + 1)
ret = fetchPage(params)
if not "content" in ret and e.fp:
ret["content"] = e.fp.read()
return ret
ret_obj["status"] = 500
return ret_obj
except urllib2.URLError, e:
err = str(e)
log("URLError : " + err)
time.sleep(3)
params["error"] = str(int(get("error", "0")) + 1)
ret_obj = fetchPage(params)
return ret_obj
def getCookieInfoAsHTML():
log("", 5)
if hasattr(sys.modules["__main__"], "cookiejar"):
cookiejar = sys.modules["__main__"].cookiejar
cookie = repr(cookiejar)
cookie = cookie.replace("<_LWPCookieJar.LWPCookieJar[", "")
cookie = cookie.replace("), Cookie(version=0,", "></cookie><cookie ")
cookie = cookie.replace(")]>", "></cookie>")
cookie = cookie.replace("Cookie(version=0,", "<cookie ")
cookie = cookie.replace(", ", " ")
log(repr(cookie), 5)
return cookie
log("Found no cookie", 5)
return ""
# This function implements a horrible hack related to python 2.4's terrible unicode handling.
def makeAscii(data):
log(repr(data), 5)
#if sys.hexversion >= 0x02050000:
# return data
try:
return data.encode('ascii', "ignore")
except:
log("Hit except on : " + repr(data))
s = u""
for i in data:
try:
i.encode("ascii", "ignore")
except:
log("Can't convert character", 4)
continue
else:
s += i
log(repr(s), 5)
return s
# This function handles stupid utf handling in python.
def makeUTF8(data):
log(repr(data), 5)
return data
try:
return data.decode('utf8', 'xmlcharrefreplace') # was 'ignore'
except:
log("Hit except on : " + repr(data))
s = u""
for i in data:
try:
i.decode("utf8", "xmlcharrefreplace")
except:
log("Can't convert character", 4)
continue
else:
s += i
log(repr(s), 5)
return s
def openFile(filepath, options=u"r"):
log(repr(filepath) + " - " + repr(options))
if options.find("b") == -1: # Toggle binary mode on failure
alternate = options + u"b"
else:
alternate = options.replace(u"b", u"")
try:
log("Trying normal: %s" % options)
return io.open(filepath, options)
except:
log("Fallback to binary: %s" % alternate)
return io.open(filepath, alternate)
def log(description, level=0):
if dbg and dbglevel > level:
try:
xbmc.log((u"[%s] %s : '%s'" % (plugin, inspect.stack()[1][3], description)).decode("utf-8"), xbmc.LOGNOTICE)
except:
xbmc.log(u"FALLBACK [%s] %s : '%s'" % (plugin, inspect.stack()[1][3], repr(description)), xbmc.LOGNOTICE)
|
gpl-2.0
|
timj/scons
|
test/CacheDir/debug.py
|
5
|
5305
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the --cache-debug option to see if it prints the expected messages.
Note that we don't check for the "race condition" message when someone
else's build populates the CacheDir with a file in between the time we
to build it because it doesn't exist in the CacheDir, and the time our
build of the file completes and we push it out.
"""
import TestSCons
test = TestSCons.TestSCons(match=TestSCons.match_re)
test.subdir('cache', 'src')
cache = test.workpath('cache')
debug_out = test.workpath('cache-debug.out')
test.write(['src', 'SConstruct'], """\
CacheDir(r'%(cache)s')
SConscript('SConscript')
""" % locals())
test.write(['src', 'SConscript'], """\
def cat(env, source, target):
target = str(target[0])
open('cat.out', 'ab').write(target + "\\n")
f = open(target, "wb")
for src in source:
f.write(open(str(src), "rb").read())
f.close()
env = Environment(BUILDERS={'Cat':Builder(action=cat)})
env.Cat('aaa.out', 'aaa.in')
env.Cat('bbb.out', 'bbb.in')
env.Cat('ccc.out', 'ccc.in')
env.Cat('all', ['aaa.out', 'bbb.out', 'ccc.out'])
""")
test.write(['src', 'aaa.in'], "aaa.in\n")
test.write(['src', 'bbb.in'], "bbb.in\n")
test.write(['src', 'ccc.in'], "ccc.in\n")
# Test for messages about files not being in CacheDir, with -n (don't
# actually build or push) and sendinig the message to a file.
expect = \
r"""cat\(\["aaa.out"\], \["aaa.in"\]\)
cat\(\["bbb.out"\], \["bbb.in"\]\)
cat\(\["ccc.out"\], \["ccc.in"\]\)
cat\(\["all"\], \["aaa.out", "bbb.out", "ccc.out"\]\)
"""
test.run(chdir='src',
arguments='-n -Q --cache-debug=%s .' % debug_out,
stdout=expect)
expect = \
r"""CacheRetrieve\(aaa.out\): [0-9a-fA-F]+ not in cache
CacheRetrieve\(bbb.out\): [0-9a-fA-F]+ not in cache
CacheRetrieve\(ccc.out\): [0-9a-fA-F]+ not in cache
CacheRetrieve\(all\): [0-9a-fA-F]+ not in cache
"""
test.must_match(debug_out, expect, mode='r')
# Test for messages about actually pushing to the cache, without -n
# and to standard ouput.
expect = \
r"""CacheRetrieve\(aaa.out\): [0-9a-fA-F]+ not in cache
cat\(\["aaa.out"\], \["aaa.in"\]\)
CachePush\(aaa.out\): pushing to [0-9a-fA-F]+
CacheRetrieve\(bbb.out\): [0-9a-fA-F]+ not in cache
cat\(\["bbb.out"\], \["bbb.in"\]\)
CachePush\(bbb.out\): pushing to [0-9a-fA-F]+
CacheRetrieve\(ccc.out\): [0-9a-fA-F]+ not in cache
cat\(\["ccc.out"\], \["ccc.in"\]\)
CachePush\(ccc.out\): pushing to [0-9a-fA-F]+
CacheRetrieve\(all\): [0-9a-fA-F]+ not in cache
cat\(\["all"\], \["aaa.out", "bbb.out", "ccc.out"\]\)
CachePush\(all\): pushing to [0-9a-fA-F]+
"""
test.run(chdir='src',
arguments='-Q --cache-debug=- .',
stdout=expect)
# Clean up the local targets.
test.run(chdir='src', arguments='-c --cache-debug=%s .' % debug_out)
test.unlink(['src', 'cat.out'])
# Test for messages about retrieving files from CacheDir, with -n
# and sending the messages to standard output.
expect = \
r"""Retrieved `aaa.out' from cache
CacheRetrieve\(aaa.out\): retrieving from [0-9a-fA-F]+
Retrieved `bbb.out' from cache
CacheRetrieve\(bbb.out\): retrieving from [0-9a-fA-F]+
Retrieved `ccc.out' from cache
CacheRetrieve\(ccc.out\): retrieving from [0-9a-fA-F]+
Retrieved `all' from cache
CacheRetrieve\(all\): retrieving from [0-9a-fA-F]+
"""
test.run(chdir='src',
arguments='-n -Q --cache-debug=- .',
stdout=expect)
# And finally test for message about retrieving file from CacheDir
# *without* -n and sending the message to a file.
expect = \
r"""Retrieved `aaa.out' from cache
Retrieved `bbb.out' from cache
Retrieved `ccc.out' from cache
Retrieved `all' from cache
"""
test.run(chdir='src',
arguments='-Q --cache-debug=%s .' % debug_out,
stdout=expect)
expect = \
r"""CacheRetrieve\(aaa.out\): retrieving from [0-9a-fA-F]+
CacheRetrieve\(bbb.out\): retrieving from [0-9a-fA-F]+
CacheRetrieve\(ccc.out\): retrieving from [0-9a-fA-F]+
CacheRetrieve\(all\): retrieving from [0-9a-fA-F]+
"""
test.must_match(debug_out, expect, mode='r')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
SysTheron/adhocracy
|
src/adhocracy/lib/tiles/__init__.py
|
2
|
1325
|
import logging
from adhocracy import model
import badge_tiles as badge
import comment_tiles as comment
import decision_tiles as decision
import delegation_tiles as delegation
import event_tiles as event
import instance_tiles as instance
import milestone_tiles as milestone
import page_tiles as page
import poll_tiles as poll
import proposal_tiles as proposal
import revision_tiles as revision
import selection_tiles as selection
import tag_tiles as tag
import text_tiles as text
import user_tiles as user
log = logging.getLogger(__name__)
def dispatch_row_with_comments(entity):
if isinstance(entity, model.Comment):
return comment.row(entity)
return dispatch_row(entity)
def dispatch_row(entity):
if isinstance(entity, model.User):
return user.row(entity)
elif isinstance(entity, model.Instance):
return instance.row(entity)
elif isinstance(entity, model.Proposal):
return proposal.row(entity)
elif isinstance(entity, model.Milestone):
return milestone.row(entity)
elif isinstance(entity, model.Page):
if entity.function != model.Page.DESCRIPTION:
return page.row(entity)
elif isinstance(entity, model.Tag):
return tag.row(entity)
else:
pass
#log.warn("WARNING: Cannot render %s!" % repr(entity))
|
agpl-3.0
|
openstack/watcher
|
watcher/applier/manager.py
|
2
|
1446
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
# Copyright (c) 2016 Intel Corp
#
# Authors: Jean-Emile DARTOIS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from watcher.applier.messaging import trigger
from watcher.common import service_manager
from watcher import conf
CONF = conf.CONF
class ApplierManager(service_manager.ServiceManager):
@property
def service_name(self):
return 'watcher-applier'
@property
def api_version(self):
return '1.0'
@property
def publisher_id(self):
return CONF.watcher_applier.publisher_id
@property
def conductor_topic(self):
return CONF.watcher_applier.conductor_topic
@property
def notification_topics(self):
return []
@property
def conductor_endpoints(self):
return [trigger.TriggerActionPlan]
@property
def notification_endpoints(self):
return []
|
apache-2.0
|
jonathanmeier5/teamstore
|
tests/dashboard/test_product.py
|
7
|
5663
|
from __future__ import unicode_literals
import pytest
from django import forms
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_text
from mock import Mock
from saleor.dashboard.product.forms import (ProductClassForm,
ProductClassSelectorForm,
ProductForm)
from saleor.product.models import (Product, ProductAttribute, ProductClass,
ProductVariant)
@pytest.mark.integration
@pytest.mark.django_db
def test_stock_record_update_works(admin_client, product_in_stock):
variant = product_in_stock.variants.get()
stock = variant.stock.order_by('-quantity_allocated').first()
quantity = stock.quantity
quantity_allocated = stock.quantity_allocated
url = reverse(
'dashboard:product-stock-update', kwargs={
'product_pk': product_in_stock.pk,
'stock_pk': stock.pk})
admin_client.post(url, {
'variant': stock.variant_id, 'location': stock.location.id,
'cost_price': stock.cost_price.net,
'quantity': quantity + 5})
new_stock = variant.stock.get(pk=stock.pk)
assert new_stock.quantity == quantity + 5
assert new_stock.quantity_allocated == quantity_allocated
def test_valid_product_class_form(color_attribute, size_attribute):
data = {'name': "Testing Class",
'product_attributes': [color_attribute.pk],
'variant_attributes': [size_attribute.pk],
'has_variants': True}
form = ProductClassForm(data)
assert form.is_valid()
# Don't allow same attribute in both fields
data['variant_attributes'] = [color_attribute.pk, size_attribute.pk]
data['product_attributes'] = [size_attribute.pk]
form = ProductClassForm(data)
assert not form.is_valid()
def test_variantless_product_class_form(color_attribute, size_attribute):
data = {'name': "Testing Class",
'product_attributes': [color_attribute.pk],
'variant_attributes': [],
'has_variants': False}
form = ProductClassForm(data)
assert form.is_valid()
# Don't allow variant attributes when no variants
data = {'name': "Testing Class",
'product_attributes': [color_attribute.pk],
'variant_attributes': [size_attribute.pk],
'has_variants': False}
form = ProductClassForm(data)
assert not form.is_valid()
def test_edit_used_product_class(db):
product_class = ProductClass.objects.create(name='New class',
has_variants=True)
product = Product.objects.create(
name='Test product', price=10, product_class=product_class)
ProductVariant.objects.create(product=product, sku='1234')
# When all products have only one variant you can change
# has_variants to false
assert product.variants.all().count() == 1
data = {'name': product_class.name,
'product_attributes': product_class.product_attributes.all(),
'variant_attributes': product_class.variant_attributes.all(),
'has_variants': False}
form = ProductClassForm(data, instance=product_class)
assert form.is_valid()
data = {'name': product_class.name,
'product_attributes': product_class.product_attributes.all(),
'variant_attributes': product_class.variant_attributes.all(),
'has_variants': True}
form = ProductClassForm(data, instance=product_class)
assert form.is_valid()
# Test has_variants validator which prevents turning off when product
# has multiple variants
ProductVariant.objects.create(product=product, sku='12345')
assert product.variants.all().count() == 2
data = {'name': product_class.name,
'product_attributes': product_class.product_attributes.all(),
'variant_attributes': product_class.variant_attributes.all(),
'has_variants': False}
form = ProductClassForm(data, instance=product_class)
assert not form.is_valid()
assert 'has_variants' in form.errors.keys()
def test_product_selector_form():
items = [Mock() for pk
in range(ProductClassSelectorForm.MAX_RADIO_SELECT_ITEMS)]
form_radio = ProductClassSelectorForm(product_classes=items)
assert isinstance(form_radio.fields['product_cls'].widget,
forms.widgets.RadioSelect)
items.append(Mock())
form_select = ProductClassSelectorForm(product_classes=items)
assert isinstance(form_select.fields['product_cls'].widget,
forms.widgets.Select)
def test_change_attributes_in_product_form(db, product_in_stock,
color_attribute):
product = product_in_stock
product_class = product.product_class
text_attribute = ProductAttribute.objects.create(slug='author',
name='Author')
product_class.product_attributes.add(text_attribute)
color_value = color_attribute.values.first()
new_author = 'Main Tester'
new_color = color_value.pk
data = {'name': product.name,
'price': product.price.gross,
'categories': [c.pk for c in product.categories.all()],
'description': 'description',
'attribute-author': new_author,
'attribute-color': new_color}
form = ProductForm(data, instance=product)
assert form.is_valid()
product = form.save()
assert product.get_attribute(color_attribute.pk) == smart_text(new_color)
assert product.get_attribute(text_attribute.pk) == new_author
|
bsd-3-clause
|
twisted/mantissa
|
xmantissa/test/test_signup.py
|
1
|
8937
|
from twisted.trial import unittest
from axiom import store, userbase
from axiom.item import Item
from axiom.attributes import inmemory, integer
from axiom.plugins import mantissacmd
from xmantissa import signup, offering
from xmantissa.plugins import free_signup
from xmantissa.product import Product, Installation
class SignupCreationTestCase(unittest.TestCase):
def setUp(self):
self.store = store.Store()
self.ls = userbase.LoginSystem(store=self.store)
self.admin = self.ls.addAccount(u'admin', u'localhost', None,
internal=True, verified=True)
self.substore = self.admin.avatars.open()
self.sc = signup.SignupConfiguration(store=self.substore)
def _installTestOffering(self):
io = offering.InstalledOffering(
store=self.store,
offeringName=u"mantissa",
application=None)
def createFreeSignup(self, itemClass, url=u'signup', prompt=u'Sign Up!'):
"""
A utility method to ensure that the same arguments are always used to
create signup mechanisms, since these are the arguments that are going
to be coming from the admin form.
"""
product = Product(store=self.store, types=[])
return self.sc.createSignup(
u'testuser@localhost',
itemClass,
{'prefixURL': url},
product,
u'Blank Email Template', prompt)
def testCreateFreeSignups(self):
self._installTestOffering()
for signupMechanismPlugin in [free_signup.freeTicket,
free_signup.userInfo]:
self.createFreeSignup(signupMechanismPlugin.itemClass)
def test_usernameAvailability(self):
"""
Test that the usernames which ought to be available are and that those
which aren't are not:
Only syntactically valid localparts are allowed. Localparts which are
already assigned are not allowed.
Only domains which are actually served by this mantissa instance are
allowed.
"""
signup = self.createFreeSignup(free_signup.userInfo.itemClass)
# Allowed: unused localpart, same domain as the administrator created
# by setUp.
self.failUnless(signup.usernameAvailable(u'alice', u'localhost')[0])
# Not allowed: unused localpart, unknown domain.
self.failIf(signup.usernameAvailable(u'alice', u'example.com')[0])
# Not allowed: used localpart, same domain as the administrator created
# by setUp.
self.failIf(signup.usernameAvailable(u'admin', u'localhost')[0])
self.assertEquals(signup.usernameAvailable(u'fjones', u'localhost'),
[True, u'Username already taken'])
signup.createUser(
realName=u"Frank Jones",
username=u'fjones',
domain=u'localhost',
password=u'asdf',
emailAddress=u'[email protected]')
self.assertEquals(signup.usernameAvailable(u'fjones', u'localhost'),
[False, u'Username already taken'])
ss = self.ls.accountByAddress(u"fjones", u"localhost").avatars.open()
self.assertEquals(ss.query(Installation).count(), 1)
def testUserInfoSignupValidation2(self):
"""
Ensure that invalid characters aren't allowed in usernames, that
usernames are parsable as the local part of an email address and that
usernames shorter than two characters are invalid.
"""
signup = self.createFreeSignup(free_signup.userInfo.itemClass)
self.assertEquals(signup.usernameAvailable(u'foo bar', u'localhost'),
[False, u"Username contains invalid character: ' '"])
self.assertEquals(signup.usernameAvailable(u'foo@bar', u'localhost'),
[False, u"Username contains invalid character: '@'"])
# '~' is not expressly forbidden by the validator in usernameAvailable,
# yet it is rejected by parseAddress (in xmantissa.smtp).
self.assertEquals(signup.usernameAvailable(u'fo~o', u'127.0.0.1'),
[False, u"Username fails to parse"])
self.assertEquals(signup.usernameAvailable(u'f', u'localhost'),
[False, u"Username too short"])
def test_userInfoSignupUserInfo(self):
"""
Check that C{createUser} creates a L{signup.UserInfo} item with its
C{realName} attribute set.
"""
freeSignup = self.createFreeSignup(free_signup.userInfo.itemClass)
freeSignup.createUser(
u'Frank Jones', u'fjones', u'divmod.com',
u'asdf', u'[email protected]')
account = self.ls.accountByAddress(u'fjones', u'divmod.com')
substore = account.avatars.open()
userInfos = list(substore.query(signup.UserInfo))
self.assertEqual(len(userInfos), 1)
userInfo = userInfos[0]
self.assertEqual(userInfo.realName, u'Frank Jones')
def test_userInfoCreatedBeforeProductInstalled(self):
"""
L{UserInfoSignup.createUser} should create a L{UserInfo} item B{before} it
calls L{Product.installProductOn}.
"""
class StubProduct(Item):
"""
L{Product}-alike which records the existing L{UserInfo} items in
the store when it is installed.
"""
required_axiom_attribute_garbage = integer(
doc="""
mandatory Item attribute.
""")
userInfos = inmemory()
def installProductOn(self, substore):
"""
Find all the L{UserInfo} items in the given store and remember
them.
"""
self.userInfos = list(substore.query(signup.UserInfo))
product = StubProduct(store=self.store)
freeSignup = self.createFreeSignup(free_signup.userInfo.itemClass)
freeSignup.product = product
freeSignup.createUser(
u'Frank Jones', u'fjones', u'example.com',
u'password', u'[email protected]')
self.assertEqual(len(product.userInfos), 1)
def test_userInfoLoginMethods(self):
"""
Check that C{createUser} creates only two L{LoginMethod}s on the
account.
"""
username, domain = u'fjones', u'divmod.com'
signup = self.createFreeSignup(free_signup.userInfo.itemClass)
signup.createUser(u'Frank Jones', username, domain, u'asdf',
u'[email protected]')
account = self.ls.accountByAddress(username, domain)
query = list(
self.store.query(userbase.LoginMethod,
userbase.LoginMethod.account == account,
sort=userbase.LoginMethod.internal.ascending))
self.assertEquals(len(query), 2)
self.assertEquals(query[0].internal, False)
self.assertEquals(query[0].verified, False)
self.assertEquals(query[0].localpart, u'fj')
self.assertEquals(query[0].domain, u'example.com')
self.assertEquals(query[1].internal, True)
self.assertEquals(query[1].verified, True)
self.assertEquals(query[1].localpart, username)
self.assertEquals(query[1].domain, domain)
def test_freeSignupsList(self):
"""
Test that if we produce 3 different publicly accessible signups, we get
information about all of them back.
"""
for i, signupMechanismPlugin in enumerate(
[free_signup.freeTicket,
free_signup.userInfo]):
self.createFreeSignup(signupMechanismPlugin.itemClass,
url=u'signup%d' % (i+1,),
prompt=u"Sign Up %d" % (i+1,))
x = list(signup._getPublicSignupInfo(self.store))
x.sort()
self.assertEquals(x, [(u'Sign Up 1', u'/signup1'),
(u'Sign Up 2', u'/signup2')])
class ValidatingSignupFormTests(unittest.TestCase):
"""
Tests for L{ValidatingSignupForm}.
"""
def test_getInitialArguments(self):
"""
L{ValidatingSignupForm.getInitialArguments} should return a tuple
consisting of a unicode string giving the domain name for which this
form will allow signup.
"""
domain = u"example.com"
siteStore = store.Store(filesdir=self.mktemp())
mantissacmd.Mantissa().installSite(siteStore, domain, u"", False)
login = siteStore.findUnique(userbase.LoginSystem)
login.addAccount(u"alice", domain, u"password", internal=True)
userInfo = signup.UserInfoSignup(store=siteStore, prefixURL=u"opaque")
form = signup.ValidatingSignupForm(userInfo)
self.assertEqual(form.getInitialArguments(), (domain,))
|
mit
|
translate/translate
|
translate/lang/test_hy.py
|
2
|
1448
|
from translate.lang import factory
def test_punctranslate():
"""Tests that we can translate punctuation."""
language = factory.getlanguage("hy")
assert language.punctranslate("") == ""
assert language.punctranslate("abc efg") == "abc efg"
assert language.punctranslate("abc efg.") == "abc efg։"
assert language.punctranslate("abc efg. hij.") == "abc efg։ hij։"
assert language.punctranslate("abc efg!") == "abc efg՜"
assert language.punctranslate("Delete file: %s") == "Delete file՝ %s"
# TODO: Find out exactly how questions work
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage("hy")
sentences = language.sentences("")
assert sentences == []
sentences = language.sentences(
"Արխիվն արդեն գոյություն ունի։ Դուք ցանկանու՞մ եք կրկին գրել այն։"
)
assert sentences == [
"Արխիվն արդեն գոյություն ունի։",
"Դուք ցանկանու՞մ եք կրկին գրել այն։",
]
sentences = language.sentences(
"Արխիվն արդեն գոյություն ունի։ դուք ցանկանու՞մ եք կրկին գրել այն։"
)
assert sentences == [
"Արխիվն արդեն գոյություն ունի։ դուք ցանկանու՞մ եք կրկին գրել այն։"
]
|
gpl-2.0
|
EvanK/ansible
|
lib/ansible/modules/remote_management/oneview/oneview_san_manager.py
|
146
|
7717
|
#!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_san_manager
short_description: Manage OneView SAN Manager resources
description:
- Provides an interface to manage SAN Manager resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- hpOneView >= 3.1.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
state:
description:
- Indicates the desired state for the Uplink Set resource.
- C(present) ensures data properties are compliant with OneView.
- C(absent) removes the resource from OneView, if it exists.
- C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
default: present
choices: [present, absent, connection_information_set]
data:
description:
- List with SAN Manager properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
providerDisplayName: Brocade Network Advisor
connectionInfo:
- name: Host
value: 172.18.15.1
- name: Port
value: 5989
- name: Username
value: username
- name: Password
value: password
- name: UseSsl
value: true
delegate_to: localhost
- name: Ensure a Device Manager for the Cisco SAN Provider is present
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
name: 172.18.20.1
providerDisplayName: Cisco
connectionInfo:
- name: Host
value: 172.18.20.1
- name: SnmpPort
value: 161
- name: SnmpUserName
value: admin
- name: SnmpAuthLevel
value: authnopriv
- name: SnmpAuthProtocol
value: sha
- name: SnmpAuthString
value: password
delegate_to: localhost
- name: Sets the SAN Manager connection information
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: connection_information_set
data:
connectionInfo:
- name: Host
value: '172.18.15.1'
- name: Port
value: '5989'
- name: Username
value: 'username'
- name: Password
value: 'password'
- name: UseSsl
value: true
delegate_to: localhost
- name: Refreshes the SAN Manager
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: present
data:
name: 172.18.15.1
refreshState: RefreshPending
delegate_to: localhost
- name: Delete the SAN Manager recently created
oneview_san_manager:
config: /etc/oneview/oneview_config.json
state: absent
data:
name: '172.18.15.1'
delegate_to: localhost
'''
RETURN = '''
san_manager:
description: Has the OneView facts about the SAN Manager.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
class SanManagerModule(OneViewModuleBase):
MSG_CREATED = 'SAN Manager created successfully.'
MSG_UPDATED = 'SAN Manager updated successfully.'
MSG_DELETED = 'SAN Manager deleted successfully.'
MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
argument_spec = dict(
state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
data=dict(type='dict', required=True)
)
def __init__(self):
super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.san_managers
def execute_module(self):
if self.data.get('connectionInfo'):
for connection_hash in self.data.get('connectionInfo'):
if connection_hash.get('name') == 'Host':
resource_name = connection_hash.get('value')
elif self.data.get('name'):
resource_name = self.data.get('name')
else:
msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
raise OneViewModuleValueError(msg.format())
resource = self.resource_client.get_by_name(resource_name)
if self.state == 'present':
changed, msg, san_manager = self._present(resource)
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
elif self.state == 'absent':
return self.resource_absent(resource, method='remove')
elif self.state == 'connection_information_set':
changed, msg, san_manager = self._connection_information_set(resource)
return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
def _present(self, resource):
if not resource:
provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
else:
merged_data = resource.copy()
merged_data.update(self.data)
# Remove 'connectionInfo' from comparison, since it is not possible to validate it.
resource.pop('connectionInfo', None)
merged_data.pop('connectionInfo', None)
if self.compare(resource, merged_data):
return False, self.MSG_ALREADY_PRESENT, resource
else:
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
return True, self.MSG_UPDATED, updated_san_manager
def _connection_information_set(self, resource):
if not resource:
return self._present(resource)
else:
merged_data = resource.copy()
merged_data.update(self.data)
merged_data.pop('refreshState', None)
if not self.data.get('connectionInfo', None):
raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
return True, self.MSG_UPDATED, updated_san_manager
def _get_provider_uri_by_display_name(self, data):
display_name = data.get('providerDisplayName')
provider_uri = self.resource_client.get_provider_uri(display_name)
if not provider_uri:
raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
return provider_uri
def main():
SanManagerModule().run()
if __name__ == '__main__':
main()
|
gpl-3.0
|
realsaiko/odoo
|
addons/report_intrastat/__init__.py
|
377
|
1079
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_intrastat
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
laborautonomo/poedit
|
deps/boost/tools/build/v2/tools/midl.py
|
18
|
5657
|
# Copyright (c) 2005 Alexey Pakhunov.
# Copyright (c) 2011 Juraj Ivancic
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
# Microsoft Interface Definition Language (MIDL) related routines
from b2.build import scanner, type
from b2.build.toolset import flags
from b2.build.feature import feature
from b2.manager import get_manager
from b2.tools import builtin, common
from b2.util import regex
def init():
pass
type.register('IDL', ['idl'])
# A type library (.tlb) is generated by MIDL compiler and can be included
# to resources of an application (.rc). In order to be found by a resource
# compiler its target type should be derived from 'H' - otherwise
# the property '<implicit-dependency>' will be ignored.
type.register('MSTYPELIB', 'tlb', 'H')
# Register scanner for MIDL files
class MidlScanner(scanner.Scanner):
def __init__ (self, includes=[]):
scanner.Scanner.__init__(self)
self.includes = includes
# List of quoted strings
re_strings = "[ \t]*\"([^\"]*)\"([ \t]*,[ \t]*\"([^\"]*)\")*[ \t]*" ;
# 'import' and 'importlib' directives
self.re_import = "import" + re_strings + "[ \t]*;" ;
self.re_importlib = "importlib[ \t]*[(]" + re_strings + "[)][ \t]*;" ;
# C preprocessor 'include' directive
self.re_include_angle = "#[ \t]*include[ \t]*<(.*)>" ;
self.re_include_quoted = "#[ \t]*include[ \t]*\"(.*)\"" ;
def pattern():
# Match '#include', 'import' and 'importlib' directives
return "((#[ \t]*include|import(lib)?).+(<(.*)>|\"(.*)\").+)"
def process(self, target, matches, binding):
included_angle = regex.transform(matches, self.re_include_angle)
included_quoted = regex.transform(matches, self.re_include_quoted)
imported = regex.transform(matches, self.re_import, [1, 3])
imported_tlbs = regex.transform(matches, self.re_importlib, [1, 3])
# CONSIDER: the new scoping rule seem to defeat "on target" variables.
g = bjam.call('get-target-variable', target, 'HDRGRIST')
b = os.path.normalize_path(os.path.dirname(binding))
# Attach binding of including file to included targets.
# When target is directly created from virtual target
# this extra information is unnecessary. But in other
# cases, it allows to distinguish between two headers of the
# same name included from different places.
g2 = g + "#" + b
g = "<" + g + ">"
g2 = "<" + g2 + ">"
included_angle = [ g + x for x in included_angle ]
included_quoted = [ g + x for x in included_quoted ]
imported = [ g + x for x in imported ]
imported_tlbs = [ g + x for x in imported_tlbs ]
all = included_angle + included_quoted + imported
bjam.call('INCLUDES', [target], all)
bjam.call('DEPENDS', [target], imported_tlbs)
bjam.call('NOCARE', all + imported_tlbs)
engine.set_target_variable(included_angle , 'SEARCH', ungrist(self.includes))
engine.set_target_variable(included_quoted, 'SEARCH', b + ungrist(self.includes))
engine.set_target_variable(imported , 'SEARCH', b + ungrist(self.includes))
engine.set_target_variable(imported_tlbs , 'SEARCH', b + ungrist(self.includes))
get_manager().scanners().propagate(type.get_scanner('CPP', PropertySet(self.includes)), included_angle + included_quoted)
get_manager().scanners().propagate(self, imported)
scanner.register(MidlScanner, 'include')
type.set_scanner('IDL', MidlScanner)
# Command line options
feature('midl-stubless-proxy', ['yes', 'no'], ['propagated'] )
feature('midl-robust', ['yes', 'no'], ['propagated'] )
flags('midl.compile.idl', 'MIDLFLAGS', ['<midl-stubless-proxy>yes'], ['/Oicf' ])
flags('midl.compile.idl', 'MIDLFLAGS', ['<midl-stubless-proxy>no' ], ['/Oic' ])
flags('midl.compile.idl', 'MIDLFLAGS', ['<midl-robust>yes' ], ['/robust' ])
flags('midl.compile.idl', 'MIDLFLAGS', ['<midl-robust>no' ], ['/no_robust'])
# Architecture-specific options
architecture_x86 = ['<architecture>' , '<architecture>x86']
address_model_32 = ['<address-model>', '<address-model>32']
address_model_64 = ['<address-model>', '<address-model>64']
flags('midl.compile.idl', 'MIDLFLAGS', [ar + '/' + m for ar in architecture_x86 for m in address_model_32 ], ['/win32'])
flags('midl.compile.idl', 'MIDLFLAGS', [ar + '/<address-model>64' for ar in architecture_x86], ['/x64'])
flags('midl.compile.idl', 'MIDLFLAGS', ['<architecture>ia64/' + m for m in address_model_64], ['/ia64'])
flags('midl.compile.idl', 'DEFINES', [], ['<define>'])
flags('midl.compile.idl', 'UNDEFS', [], ['<undef>'])
flags('midl.compile.idl', 'INCLUDES', [], ['<include>'])
builtin.register_c_compiler('midl.compile.idl', ['IDL'], ['MSTYPELIB', 'H', 'C(%_i)', 'C(%_proxy)', 'C(%_dlldata)'], [])
# MIDL does not always generate '%_proxy.c' and '%_dlldata.c'. This behavior
# depends on contents of the source IDL file. Calling TOUCH_FILE below ensures
# that both files will be created so bjam will not try to recreate them
# constantly.
get_manager().engine().register_action(
'midl.compile.idl',
'''midl /nologo @"@($(<[1]:W).rsp:E=
"$(>:W)"
-D$(DEFINES)
"-I$(INCLUDES)"
-U$(UNDEFS)
$(MIDLFLAGS)
/tlb "$(<[1]:W)"
/h "$(<[2]:W)"
/iid "$(<[3]:W)"
/proxy "$(<[4]:W)"
/dlldata "$(<[5]:W)")"
{touch} "$(<[4]:W)"
{touch} "$(<[5]:W)"'''.format(touch=common.file_creation_command()))
|
mit
|
linsalrob/EdwardsLab
|
crAssphage/NCBI_SRA_Submission.py
|
1
|
6447
|
"""
Submit my data to the SRA.
"""
import os
import sys
import argparse
from roblib import bcolors
__author__ = 'Rob Edwards'
our_data = {
'library_strategy' : 'AMPLICON',
'library_source' : 'METAGENOMIC',
'library_selection' : 'PCR',
'library_layout' : 'SINGLE',
'platform' : 'capillary',
'instrument_model' : 'AB 3730 Genetic Analyzer'
}
def read_sra_data(ifile='sra_ids_experiments.txt', verbose=False):
"""
Read the SRA experiment ID that we generated with an SQL command
"""
sra_data = {}
# library_strategy,library_source,library_selection,library_layout,platform,instrument_model,platform_parameters
with open(ifile, 'r') as f:
for l in f:
p = l.rstrip("\n").split("\t")
sra_data[p[0]] = {}
if 'PAIRED' in p[4]:
p[4] = 'PAIRED'
if 'SINGLE' in p[4]:
p[4] = 'SINGLE'
if 'unspecified' in p[6]:
p[6] = 'Illumina HiSeq 1000'
for i,j in enumerate(['library_strategy', 'library_source', 'library_selection', 'library_layout', 'platform', 'instrument_model', 'platform_parameters']):
sra_data[p[0]][j]=p[i+1] # the first element is the id :)
return sra_data
def parse_file(ifile, ofile, sra_data, out_dir, verbose=False):
"""
Parse the file and create a new metadata file.
Writes everything to the directory SRA_Submission, including a set of fasta files,
one per biosample.
"""
cols = {'bioproject_accession' : 0, 'biosample_accession' : 1, 'library_ID' : 2,
'title' : 3, 'library_strategy' : 4, 'library_source' : 5, 'library_selection' : 6,
'library_layout' : 7, 'platform' : 8, 'instrument_model' : 9, 'design_description' : 10,
'filetype' : 11, 'filename' : 12, 'filename2' : 13,
'filename3' : 14, 'filename4' : 15, 'assembly' : 16}
if os.path.exists(out_dir):
sys.stderr.write(f"{bcolors.FAIL}ERROR: {out_dir} exists. Not overwriting\n")
sys.exit(-1)
os.mkdir(out_dir)
volume = {}
linecount = 1
filecount = 0
if ".tsv" in ofile:
ofile = ofile.replace(".tsv", "")
out = open(f"{out_dir}/{ofile}.{filecount}.tsv", 'w')
out.write("bioproject_accession\tbiosample_accession\tlibrary_ID\ttitle\tlibrary_strategy\tlibrary_source\tlibrary_selection\tlibrary_layout\tplatform\tinstrument_model\tdesign_description\tfiletype\tfilename\tfilename2\tfilename3\tfilename4\tassembly\n")
os.mkdir(os.path.join(out_dir, str(filecount)))
increment_filecount = False
with open(ifile, 'r') as f:
for l in f:
if l.startswith("BioProject_Accession"):
continue
# this is to ensure that we write the fasta sequences to the correct subdir
# for sequences that we are not processing further
if increment_filecount:
filecount+=1
increment_filecount = False
out.close()
out = open(f"{out_dir}/{ofile}.{filecount}.tsv", 'w')
out.write("bioproject_accession\tbiosample_accession\tlibrary_ID\ttitle\tlibrary_strategy\tlibrary_source\tlibrary_selection\tlibrary_layout\tplatform\tinstrument_model\tdesign_description\tfiletype\tfilename\tfilename2\tfilename3\tfilename4\tassembly\n")
os.mkdir(os.path.join(out_dir, str(filecount)))
p = l.rstrip("\n").split("\t")
# important columns:
# bioproject_accession: 0
# biosample_accession: 1
# sequence_ID: 2
# sample name: 3
# title: 188
# src: 200
# sra_id: 199
# sequence: 234
# write the sequence out
# note that we do this before we process the line because we need
# all sequences, but we don't process all of them if we have already
# seen the biosample ID before
subdir = str(filecount)
if p[1] in volume:
subdir = str(volume[p[1]])
fao = open(os.path.join(out_dir, subdir, f"{p[1]}.fasta"), 'a')
fao.write(f">{p[2]}\n{p[234]}\n")
fao.close()
if p[1] in volume:
continue
volume[p[1]] = filecount
linecount+=1
if linecount > 999:
linecount = 1
increment_filecount = True
row = [p[0], p[1], p[3], p[188], "", "", "", "", "", "", "", "fastq", f"{p[1]}.fasta", "", "", "", ""]
if 'SRA' == p[200]:
if not p[199]:
sys.stderr.write(f"{bcolors.FAIL}FATAL: for {p[2]} src is SRA but there is no SRA ID{bcolors.ENDC}\n")
sys.exit(-1)
if p[199] not in sra_data:
sys.stderr.write(f"{bcolors.FAIL}FATAL: for {p[2]} SRA ID {p[199]} is not in the sra data{bcolors.ENDC}\n")
sys.exit(-1)
for k in ['library_strategy', 'library_source', 'library_selection', 'library_layout', 'platform', 'instrument_model']:
if k not in sra_data[p[199]]:
sys.stderr.write(f"{bcolors.FAIL}FATAL: {k} not in SRA data for {p[199]}\n")
continue
row[cols[k]] = sra_data[p[199]][k]
row[cols['design_description']] = f"Extracted from SRA run {p[199]} using Gretel version 0.0.8 (https://github.com/SamStudio8/gretel)"
else:
for k in our_data:
row[cols[k]] = our_data[k]
row[cols['design_description']] = f"PCR amplified from a raw environmental sample and seqeunced using Sanger sequencing. "
out.write("{}\n".format("\t".join(row)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-f', help='all_data biosamples file', required=True)
parser.add_argument('-o', help='file to write', required=True)
parser.add_argument('-d', help='directory to write to', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
sra_data = read_sra_data('sra_ids_experiments.txt', args.v)
parse_file(args.f, args.o, sra_data, args.d, args.v)
|
mit
|
aptana/Pydev
|
bundles/org.python.pydev.jython/Lib/encodings/cp1253.py
|
9
|
5558
|
""" Python Character Mapping Codec generated from 'CP1253.TXT' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0081: None, # UNDEFINED
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: None, # UNDEFINED
0x0089: 0x2030, # PER MILLE SIGN
0x008a: None, # UNDEFINED
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: None, # UNDEFINED
0x008d: None, # UNDEFINED
0x008e: None, # UNDEFINED
0x008f: None, # UNDEFINED
0x0090: None, # UNDEFINED
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: None, # UNDEFINED
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: None, # UNDEFINED
0x009b: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x009c: None, # UNDEFINED
0x009d: None, # UNDEFINED
0x009e: None, # UNDEFINED
0x009f: None, # UNDEFINED
0x00a1: 0x0385, # GREEK DIALYTIKA TONOS
0x00a2: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00aa: None, # UNDEFINED
0x00af: 0x2015, # HORIZONTAL BAR
0x00b4: 0x0384, # GREEK TONOS
0x00b8: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00b9: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ba: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00bc: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00be: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00bf: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00c0: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
0x00c1: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x00c2: 0x0392, # GREEK CAPITAL LETTER BETA
0x00c3: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00c4: 0x0394, # GREEK CAPITAL LETTER DELTA
0x00c5: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x00c6: 0x0396, # GREEK CAPITAL LETTER ZETA
0x00c7: 0x0397, # GREEK CAPITAL LETTER ETA
0x00c8: 0x0398, # GREEK CAPITAL LETTER THETA
0x00c9: 0x0399, # GREEK CAPITAL LETTER IOTA
0x00ca: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x00cb: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x00cc: 0x039c, # GREEK CAPITAL LETTER MU
0x00cd: 0x039d, # GREEK CAPITAL LETTER NU
0x00ce: 0x039e, # GREEK CAPITAL LETTER XI
0x00cf: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x00d0: 0x03a0, # GREEK CAPITAL LETTER PI
0x00d1: 0x03a1, # GREEK CAPITAL LETTER RHO
0x00d2: None, # UNDEFINED
0x00d3: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00d4: 0x03a4, # GREEK CAPITAL LETTER TAU
0x00d5: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x00d6: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00d7: 0x03a7, # GREEK CAPITAL LETTER CHI
0x00d8: 0x03a8, # GREEK CAPITAL LETTER PSI
0x00d9: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00da: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00db: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00dc: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00dd: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00de: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00df: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e0: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
0x00e1: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e2: 0x03b2, # GREEK SMALL LETTER BETA
0x00e3: 0x03b3, # GREEK SMALL LETTER GAMMA
0x00e4: 0x03b4, # GREEK SMALL LETTER DELTA
0x00e5: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00e6: 0x03b6, # GREEK SMALL LETTER ZETA
0x00e7: 0x03b7, # GREEK SMALL LETTER ETA
0x00e8: 0x03b8, # GREEK SMALL LETTER THETA
0x00e9: 0x03b9, # GREEK SMALL LETTER IOTA
0x00ea: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00eb: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00ec: 0x03bc, # GREEK SMALL LETTER MU
0x00ed: 0x03bd, # GREEK SMALL LETTER NU
0x00ee: 0x03be, # GREEK SMALL LETTER XI
0x00ef: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00f0: 0x03c0, # GREEK SMALL LETTER PI
0x00f1: 0x03c1, # GREEK SMALL LETTER RHO
0x00f2: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00f3: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00f4: 0x03c4, # GREEK SMALL LETTER TAU
0x00f5: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00f6: 0x03c6, # GREEK SMALL LETTER PHI
0x00f7: 0x03c7, # GREEK SMALL LETTER CHI
0x00f8: 0x03c8, # GREEK SMALL LETTER PSI
0x00f9: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00fa: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00fc: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00fd: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00fe: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ff: None, # UNDEFINED
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
epl-1.0
|
NickDaly/GemRB-MultipleConfigs
|
gemrb/GUIScripts/bg2/GUIOPT7.py
|
5
|
4063
|
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#sound options
import GemRB
SoundWindow = 0
TextAreaControl = 0
def OnLoad():
global SoundWindow, TextAreaControl
GemRB.LoadWindowPack("GUIOPT", 640, 480)
SoundWindow = GemRB.LoadWindow(7)
TextAreaControl = SoundWindow.GetControl(14)
AmbientButton = SoundWindow.GetControl(16)
AmbientSlider = SoundWindow.GetControl(1)
SoundEffectsButton = SoundWindow.GetControl(17)
SoundEffectsSlider = SoundWindow.GetControl(2)
DialogueButton = SoundWindow.GetControl(18)
DialogueSlider = SoundWindow.GetControl(3)
MusicButton = SoundWindow.GetControl(19)
MusicSlider = SoundWindow.GetControl(4)
MoviesButton = SoundWindow.GetControl(20)
MoviesSlider = SoundWindow.GetControl(22)
EnvironmentalButton = SoundWindow.GetControl(28)
EnvironmentalButtonB = SoundWindow.GetControl(26)
CharacterSoundButton = SoundWindow.GetControl(13)
OkButton = SoundWindow.GetControl(24)
CancelButton = SoundWindow.GetControl(25)
TextAreaControl.SetText(18040)
CharacterSoundButton.SetText(17778)
OkButton.SetText(11973)
CancelButton.SetText(13727)
AmbientButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, AmbientPress)
AmbientSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, AmbientPress)
AmbientSlider.SetVarAssoc("Volume Ambients",10)
SoundEffectsButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, SoundEffectsPress)
SoundEffectsSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, SoundEffectsPress)
SoundEffectsSlider.SetVarAssoc("Volume SFX",10)
DialogueButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DialoguePress)
DialogueSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, DialoguePress)
DialogueSlider.SetVarAssoc("Volume Voices",10)
MusicButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, MusicPress)
MusicSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, MusicPress)
MusicSlider.SetVarAssoc("Volume Music",10)
MoviesButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, MoviesPress)
MoviesSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, MoviesPress)
MoviesSlider.SetVarAssoc("Volume Movie",10)
EnvironmentalButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, EnvironmentalPress)
EnvironmentalButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, EnvironmentalPress)
EnvironmentalButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
EnvironmentalButtonB.SetVarAssoc("Environmental Audio",1)
CharacterSoundButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CharacterSoundPress)
OkButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, OkPress)
OkButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
SoundWindow.SetVisible(WINDOW_VISIBLE)
return
def AmbientPress():
TextAreaControl.SetText(18008)
GemRB.UpdateAmbientsVolume ()
return
def SoundEffectsPress():
TextAreaControl.SetText(18009)
return
def DialoguePress():
TextAreaControl.SetText(18010)
return
def MusicPress():
TextAreaControl.SetText(18011)
GemRB.UpdateMusicVolume ()
return
def MoviesPress():
TextAreaControl.SetText(18012)
return
def EnvironmentalPress():
TextAreaControl.SetText(18022)
return
def CharacterSoundPress():
if SoundWindow:
SoundWindow.Unload()
GemRB.SetNextScript("GUIOPT12")
return
def OkPress():
if SoundWindow:
SoundWindow.Unload()
GemRB.SetNextScript("StartOpt")
return
def CancelPress():
if SoundWindow:
SoundWindow.Unload()
GemRB.SetNextScript("StartOpt")
return
|
gpl-2.0
|
zakuro9715/lettuce
|
tests/integration/lib/Django-1.3/tests/modeltests/invalid_models/models.py
|
30
|
26190
|
"""
26. Invalid models
This example exists purely to point out errors in models.
"""
from django.contrib.contenttypes import generic
from django.db import models
class FieldErrors(models.Model):
charfield = models.CharField()
charfield2 = models.CharField(max_length=-1)
charfield3 = models.CharField(max_length="bad")
decimalfield = models.DecimalField()
decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1)
decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad")
decimalfield4 = models.DecimalField(max_digits=9, decimal_places=10)
decimalfield5 = models.DecimalField(max_digits=10, decimal_places=10)
filefield = models.FileField()
choices = models.CharField(max_length=10, choices='bad')
choices2 = models.CharField(max_length=10, choices=[(1,2,3),(1,2,3)])
index = models.CharField(max_length=10, db_index='bad')
field_ = models.CharField(max_length=10)
nullbool = models.BooleanField(null=True)
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash1 = models.CharField(max_length=10)
clash2 = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Clash1(models.Model):
src_safe = models.CharField(max_length=10)
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
class Clash2(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
class Target2(models.Model):
clash3 = models.CharField(max_length=10)
foreign_tgt = models.ForeignKey(Target)
clashforeign_set = models.ForeignKey(Target)
m2m_tgt = models.ManyToManyField(Target)
clashm2m_set = models.ManyToManyField(Target)
class Clash3(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt')
foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt')
m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt')
m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt')
class ClashForeign(models.Model):
foreign = models.ForeignKey(Target2)
class ClashM2M(models.Model):
m2m = models.ManyToManyField(Target2)
class SelfClashForeign(models.Model):
src_safe = models.CharField(max_length=10)
selfclashforeign = models.CharField(max_length=10)
selfclashforeign_set = models.ForeignKey("SelfClashForeign")
foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id')
foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe')
class ValidM2M(models.Model):
src_safe = models.CharField(max_length=10)
validm2m = models.CharField(max_length=10)
# M2M fields are symmetrical by default. Symmetrical M2M fields
# on self don't require a related accessor, so many potential
# clashes are avoided.
validm2m_set = models.ManyToManyField("self")
m2m_1 = models.ManyToManyField("self", related_name='id')
m2m_2 = models.ManyToManyField("self", related_name='src_safe')
m2m_3 = models.ManyToManyField('self')
m2m_4 = models.ManyToManyField('self')
class SelfClashM2M(models.Model):
src_safe = models.CharField(max_length=10)
selfclashm2m = models.CharField(max_length=10)
# Non-symmetrical M2M fields _do_ have related accessors, so
# there is potential for clashes.
selfclashm2m_set = models.ManyToManyField("self", symmetrical=False)
m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False)
m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False)
m2m_3 = models.ManyToManyField('self', symmetrical=False)
m2m_4 = models.ManyToManyField('self', symmetrical=False)
class Model(models.Model):
"But it's valid to call a model Model."
year = models.PositiveIntegerField() #1960
make = models.CharField(max_length=10) #Aston Martin
name = models.CharField(max_length=10) #DB 4 GT
class Car(models.Model):
colour = models.CharField(max_length=5)
model = models.ForeignKey(Model)
class MissingRelations(models.Model):
rel1 = models.ForeignKey("Rel1")
rel2 = models.ManyToManyField("Rel2")
class MissingManualM2MModel(models.Model):
name = models.CharField(max_length=5)
missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel")
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary")
class GroupTwo(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership")
secondary = models.ManyToManyField(Group, through="MembershipMissingFK")
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
not_default_or_null = models.CharField(max_length=5)
class MembershipMissingFK(models.Model):
person = models.ForeignKey(Person)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Relationship")
too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK")
class PersonSelfRefM2MExplicit(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_added = models.DateTimeField()
class ExplicitRelationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set")
date_added = models.DateTimeField()
class RelationshipTripleFK(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2")
third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far")
date_added = models.DateTimeField()
class RelationshipDoubleFK(models.Model):
first = models.ForeignKey(Person, related_name="first_related_name")
second = models.ForeignKey(Person, related_name="second_related_name")
third = models.ForeignKey(Group, related_name="rel_to_set")
date_added = models.DateTimeField()
class AbstractModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
abstract = True
class AbstractRelationModel(models.Model):
fk1 = models.ForeignKey('AbstractModel')
fk2 = models.ManyToManyField('AbstractModel')
class UniqueM2M(models.Model):
""" Model to test for unique ManyToManyFields, which are invalid. """
unique_people = models.ManyToManyField(Person, unique=True)
class NonUniqueFKTarget1(models.Model):
""" Model to test for non-unique FK target in yet-to-be-defined model: expect an error """
tgt = models.ForeignKey('FKTarget', to_field='bad')
class UniqueFKTarget1(models.Model):
""" Model to test for unique FK target in yet-to-be-defined model: expect no error """
tgt = models.ForeignKey('FKTarget', to_field='good')
class FKTarget(models.Model):
bad = models.IntegerField()
good = models.IntegerField(unique=True)
class NonUniqueFKTarget2(models.Model):
""" Model to test for non-unique FK target in previously seen model: expect an error """
tgt = models.ForeignKey(FKTarget, to_field='bad')
class UniqueFKTarget2(models.Model):
""" Model to test for unique FK target in previously seen model: expect no error """
tgt = models.ForeignKey(FKTarget, to_field='good')
class NonExistingOrderingWithSingleUnderscore(models.Model):
class Meta:
ordering = ("does_not_exist",)
class InvalidSetNull(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_NULL)
class InvalidSetDefault(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_DEFAULT)
class Tag(models.Model):
name = models.CharField("name", max_length=20)
class TaggedObject(models.Model):
object_id = models.PositiveIntegerField("Object ID")
tag = models.ForeignKey(Tag)
content_object = generic.GenericForeignKey()
class UserTaggedObject(models.Model):
object_tag = models.ForeignKey(TaggedObject)
class ArticleAttachment(models.Model):
tags = generic.GenericRelation(TaggedObject)
user_tags = generic.GenericRelation(UserTaggedObject)
model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield4": DecimalFields require a "max_digits" attribute value that is greater than the value of the "decimal_places" attribute.
invalid_models.fielderrors: "decimalfield5": DecimalFields require a "max_digits" attribute value that is greater than the value of the "decimal_places" attribute.
invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute.
invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list).
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "index": "db_index" should be either None, True or False.
invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.
invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead.
invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract.
invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract.
invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo
invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo
invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed
invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead.
invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted.
invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted.
invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'.
invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonexistingorderingwithsingleunderscore: "ordering" refers to "does_not_exist", a field that doesn't exist.
invalid_models.invalidsetnull: 'fk' specifies on_delete=SET_NULL, but cannot be null.
invalid_models.invalidsetdefault: 'fk' specifies on_delete=SET_DEFAULT, but has no default value.
invalid_models.articleattachment: Model 'UserTaggedObject' must have a GenericForeignKey in order to create a GenericRelation that points to it.
"""
|
gpl-3.0
|
dwrpayne/zulip
|
zerver/management/commands/bankrupt_users.py
|
7
|
1427
|
from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_update_message_flags
from zerver.models import UserProfile, Message, get_user_profile_by_email
class Command(BaseCommand):
help = """Bankrupt one or many users."""
def add_arguments(self, parser):
parser.add_argument('emails', metavar='<email>', type=str, nargs='+',
help='email address to bankrupt')
def handle(self, *args, **options):
for email in options['emails']:
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
print("e-mail %s doesn't exist in the system, skipping" % (email,))
continue
do_update_message_flags(user_profile, "add", "read", None, True)
messages = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
old_pointer = user_profile.pointer
new_pointer = messages[0].id
user_profile.pointer = new_pointer
user_profile.save(update_fields=["pointer"])
print("%s: %d => %d" % (email, old_pointer, new_pointer))
else:
print("%s has no messages, can't bankrupt!" % (email,))
|
apache-2.0
|
sbalde/edx-platform
|
lms/djangoapps/verify_student/migrations/0002_auto__add_field_softwaresecurephotoverification_window.py
|
114
|
6711
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SoftwareSecurePhotoVerification.window'
db.add_column('verify_student_softwaresecurephotoverification', 'window',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reverification.MidcourseReverificationWindow'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SoftwareSecurePhotoVerification.window'
db.delete_column('verify_student_softwaresecurephotoverification', 'window_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'<function uuid4 at 0x21d4398>'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'})
}
}
complete_apps = ['verify_student']
|
agpl-3.0
|
jaredjennings/snowy
|
wsgi/snowy/snowy/lib/piston/models.py
|
11
|
4896
|
import urllib, time, urlparse
# Django imports
from django.db.models.signals import post_save, post_delete
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.core.mail import send_mail, mail_admins
# Piston imports
from managers import TokenManager, ConsumerManager, ResourceManager
from signals import consumer_post_save, consumer_post_delete
KEY_SIZE = 18
SECRET_SIZE = 32
VERIFIER_SIZE = 10
CONSUMER_STATES = (
('pending', 'Pending'),
('accepted', 'Accepted'),
('canceled', 'Canceled'),
('rejected', 'Rejected')
)
def generate_random(length=SECRET_SIZE):
return User.objects.make_random_password(length=length)
class Nonce(models.Model):
token_key = models.CharField(max_length=KEY_SIZE)
consumer_key = models.CharField(max_length=KEY_SIZE)
key = models.CharField(max_length=255)
def __unicode__(self):
return u"Nonce %s for %s" % (self.key, self.consumer_key)
admin.site.register(Nonce)
class Consumer(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
key = models.CharField(max_length=KEY_SIZE)
secret = models.CharField(max_length=SECRET_SIZE)
status = models.CharField(max_length=16, choices=CONSUMER_STATES, default='pending')
user = models.ForeignKey(User, null=True, blank=True, related_name='consumers')
objects = ConsumerManager()
def __unicode__(self):
return u"Consumer %s with key %s" % (self.name, self.key)
def generate_random_codes(self):
"""
Used to generate random key/secret pairings. Use this after you've
added the other data in place of save().
c = Consumer()
c.name = "My consumer"
c.description = "An app that makes ponies from the API."
c.user = some_user_object
c.generate_random_codes()
"""
key = User.objects.make_random_password(length=KEY_SIZE)
secret = generate_random(SECRET_SIZE)
while Consumer.objects.filter(key__exact=key, secret__exact=secret).count():
secret = generate_random(SECRET_SIZE)
self.key = key
self.secret = secret
self.save()
admin.site.register(Consumer)
class Token(models.Model):
REQUEST = 1
ACCESS = 2
TOKEN_TYPES = ((REQUEST, u'Request'), (ACCESS, u'Access'))
key = models.CharField(max_length=KEY_SIZE)
secret = models.CharField(max_length=SECRET_SIZE)
verifier = models.CharField(max_length=VERIFIER_SIZE)
token_type = models.IntegerField(choices=TOKEN_TYPES)
timestamp = models.IntegerField(default=long(time.time()))
is_approved = models.BooleanField(default=False)
user = models.ForeignKey(User, null=True, blank=True, related_name='tokens')
consumer = models.ForeignKey(Consumer)
callback = models.CharField(max_length=255, null=True, blank=True)
callback_confirmed = models.BooleanField(default=False)
objects = TokenManager()
def __unicode__(self):
return u"%s Token %s for %s" % (self.get_token_type_display(), self.key, self.consumer)
def to_string(self, only_key=False):
token_dict = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
'oauth_callback_confirmed': 'true',
}
if self.verifier:
token_dict.update({ 'oauth_verifier': self.verifier })
if only_key:
del token_dict['oauth_token_secret']
return urllib.urlencode(token_dict)
def generate_random_codes(self):
key = User.objects.make_random_password(length=KEY_SIZE)
secret = generate_random(SECRET_SIZE)
while Token.objects.filter(key__exact=key, secret__exact=secret).count():
secret = generate_random(SECRET_SIZE)
self.key = key
self.secret = secret
self.save()
# -- OAuth 1.0a stuff
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def set_callback(self, callback):
if callback != "oob": # out of band, says "we can't do this!"
self.callback = callback
self.callback_confirmed = True
self.save()
admin.site.register(Token)
# Attach our signals
post_save.connect(consumer_post_save, sender=Consumer)
post_delete.connect(consumer_post_delete, sender=Consumer)
|
agpl-3.0
|
hachreak/invenio-demosite
|
invenio_demosite/base/recordext/functions/get_number_of_reviews.py
|
7
|
1073
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
def get_number_of_reviews(recid):
"""
Returns number of reviews for given record.
:param recid:
:return: Number of reviews
"""
from invenio.legacy.webcomment.adminlib import get_nb_reviews
if recid:
return get_nb_reviews(recid)
|
gpl-2.0
|
Teaonly/easyLearning.js
|
TensorExpress/aten/src/ATen/gen.py
|
1
|
15896
|
import argparse
import os
import yaml
from collections import OrderedDict
import cwrap_parser
import nn_parse
import native_parse
import preprocess_declarations
import function_wrapper
import copy_wrapper
from code_template import CodeTemplate
# This file is the top-level entry point for code generation in ATen.
# It takes an arbitrary number of arguments specifying metadata files to
# process (.cwrap, .yaml and .h) and outputs a number generated header
# and cpp files in ATen/ (see invocations of 'write' for each file that
# is written.) It is invoked from cmake; look for the 'cwrap_files'
# variable for an up-to-date list of files which are passed.
parser = argparse.ArgumentParser(description='Generate ATen source files')
parser.add_argument('files', help='cwrap files', nargs='+')
parser.add_argument(
'-s',
'--source-path',
help='path to source directory for ATen',
default='.')
parser.add_argument(
'-o',
'--output-dependencies',
help='output a list of dependencies into the given file and exit')
parser.add_argument(
'-n',
'--no-cuda',
action='store_true',
help='disable generation of cuda files')
parser.add_argument(
'-d', '--output-dir', help='output directory', default='ATen')
options = parser.parse_args()
if options.output_dir is not None and not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
class FileManager(object):
def __init__(self):
self.filenames = set()
self.outputs_written = False
self.undeclared_files = []
def will_write(self, filename):
filename = '{}/{}'.format(options.output_dir, filename)
if self.outputs_written:
raise Exception("'will_write' can only be called before " +
"the call to write_outputs, refactor so outputs are registered " +
"before running the generators")
self.filenames.add(filename)
def _write_if_changed(self, filename, contents):
try:
with open(filename, 'r') as f:
old_contents = f.read()
except IOError:
old_contents = None
if contents != old_contents:
with open(filename, 'w') as f:
f.write(contents)
def write_outputs(self, filename):
"""Write a file containing the list of all outputs which are
generated by this script."""
self._write_if_changed(
filename,
''.join(name + ";" for name in sorted(self.filenames)))
self.outputs_written = True
def write(self, filename, s):
filename = '{}/{}'.format(options.output_dir, filename)
self._write_if_changed(filename, s)
if filename not in self.filenames:
self.undeclared_files.append(filename)
else:
self.filenames.remove(filename)
def check_all_files_written(self):
if len(self.undeclared_files) > 0:
raise Exception(
"trying to write files {} which are not ".format(self.undeclared_files) +
"in the list of outputs this script produces. " +
"use will_write to add them.")
if len(self.filenames) > 0:
raise Exception("Outputs declared with 'will_write' were " +
"never written: {}".format(self.filenames))
TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
TEMPLATE_PATH + "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")
TENSOR_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDerived.cpp")
TENSOR_SPARSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorSparse.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDense.cpp")
TENSOR_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDerived.h")
TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
NATIVE_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/NativeFunctions.h")
file_manager = FileManager()
generators = {
'CPUGenerator.h': {
'name': 'CPU',
'th_generator': 'THGenerator * generator;',
'header': 'TH/TH.h',
},
'CUDAGenerator.h': {
'name': 'CUDA',
'th_generator': 'THCGenerator * generator;',
'header': 'THC/THC.h'
},
}
backends = ['CPU']
if not options.no_cuda:
backends.append('CUDA')
densities = ['Dense', 'Sparse']
# scalar_name, c_type, accreal, th_scalar_type, is_floating_type
scalar_types = [
('Byte', 'uint8_t', 'Long', 'uint8_t', False),
('Char', 'int8_t', 'Long', 'int8_t', False),
('Double', 'double', 'Double', 'double', True),
('Float', 'float', 'Double', 'float', True),
('Int', 'int', 'Long', 'int32_t', False),
('Long', 'int64_t', 'Long', 'int64_t', False),
('Short', 'int16_t', 'Long', 'int16_t', False),
('Half', 'Half', 'Double', 'THHalf', True),
]
# shared environment for non-derived base classes Type.h Tensor.h Storage.h
top_env = {
'type_registrations': [],
'type_headers': [],
'type_method_declarations': [],
'type_method_definitions': [],
'type_method_inline_definitions': [],
'tensor_method_declarations': [],
'tensor_method_definitions': [],
'function_declarations': [],
'function_definitions': [],
'type_ids': [],
'native_function_declarations': [],
}
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def postprocess_output_declarations(output_declarations):
# ensure each return has a name associated with it
for decl in output_declarations:
has_named_ret = False
for n, ret in enumerate(decl.returns):
if 'name' not in ret:
assert not has_named_ret
if decl.inplace:
ret['name'] = 'self'
elif len(decl.returns) == 1:
ret['name'] = 'result'
else:
ret['name'] = 'result' + str(n)
else:
has_named_ret = True
def remove_key_if_none(dictionary, key):
if key in dictionary.keys() and dictionary[key] is None:
del dictionary[key]
return dictionary
return [remove_key_if_none(decl._asdict(), 'buffers')
for decl in output_declarations]
def format_yaml(data):
if options.output_dependencies:
# yaml formatting is slow so don't do it if we will ditch it.
return ""
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
# Support serializing OrderedDict
noalias_dumper.add_representer(OrderedDict, dict_representer)
return yaml.dump(data, default_flow_style=False, Dumper=noalias_dumper)
def generate_storage_type_and_tensor(backend, density, scalar_type, declarations):
scalar_name, c_type, accreal, th_scalar_type, is_floating_type = scalar_type
env = {}
density_tag = 'Sparse' if density == 'Sparse' else ''
th_density_tag = 'S' if density == 'Sparse' else ''
env['Density'] = density
env['ScalarName'] = scalar_name
env['ScalarType'] = c_type
env['THScalarType'] = th_scalar_type
env['AccScalarName'] = accreal
env['isFloatingType'] = is_floating_type
env['isIntegralType'] = not is_floating_type
env['Storage'] = "{}{}Storage".format(backend, scalar_name)
env['Type'] = "{}{}{}Type".format(density_tag, backend, scalar_name)
env['Tensor'] = "{}{}{}Tensor".format(density_tag, backend, scalar_name)
env['DenseTensor'] = "{}{}Tensor".format(backend, scalar_name)
env['SparseTensor'] = "Sparse{}{}Tensor".format(backend, scalar_name)
env['Backend'] = density_tag + backend
env['DenseBackend'] = backend
# used for generating switch logic for external functions
tag = density_tag + backend + scalar_name
env['TypeID'] = 'TypeID::' + tag
top_env['type_ids'].append(tag + ',')
if backend == 'CUDA':
env['th_headers'] = [
'#include <THC/THC.h>',
'#include <THCUNN/THCUNN.h>',
'#undef THNN_',
'#undef THCIndexTensor_',
'#include <THCS/THCS.h>',
'#undef THCIndexTensor_',
]
env['extra_cuda_headers'] = ['#include <ATen/cuda/CUDAHalf.cuh>']
sname = '' if scalar_name == "Float" else scalar_name
env['THType'] = 'Cuda{}'.format(sname)
env['THStorage'] = 'THCuda{}Storage'.format(sname)
if density == 'Dense':
env['THTensor'] = 'THCuda{}Tensor'.format(sname)
else:
env['THTensor'] = 'THCS{}Tensor'.format(scalar_name)
env['THIndexTensor'] = 'THCudaLongTensor'
env['state'] = ['context->thc_state']
env['isCUDA'] = 'true'
env['storage_device'] = 'return storage->device;'
env['Generator'] = 'CUDAGenerator'
else:
env['th_headers'] = [
'#include <TH/TH.h>',
'#include <THNN/THNN.h>',
'#undef THNN_',
'#include <THS/THS.h>',
]
env['extra_cuda_headers'] = []
env['THType'] = scalar_name
env['THStorage'] = "TH{}Storage".format(scalar_name)
env['THTensor'] = 'TH{}{}Tensor'.format(th_density_tag, scalar_name)
env['THIndexTensor'] = 'THLongTensor'
env['state'] = []
env['isCUDA'] = 'false'
env['storage_device'] = 'throw std::runtime_error("CPU storage has no device");'
env['Generator'] = 'CPUGenerator'
env['AS_REAL'] = env['ScalarType']
if scalar_name == "Half":
env['SparseTensor'] = 'Tensor'
if backend == "CUDA":
env['to_th_type'] = 'HalfFix<__half,Half>'
env['to_at_type'] = 'HalfFix<Half,__half>'
env['AS_REAL'] = 'convert<half,double>'
env['THScalarType'] = 'half'
else:
env['to_th_type'] = 'HalfFix<THHalf,Half>'
env['to_at_type'] = 'HalfFix<Half,THHalf>'
elif scalar_name == 'Long':
env['to_th_type'] = 'long'
env['to_at_type'] = 'int64_t'
else:
env['to_th_type'] = ''
env['to_at_type'] = ''
declarations, definitions = function_wrapper.create_derived(
env, declarations)
env['type_derived_method_declarations'] = declarations
env['type_derived_method_definitions'] = definitions
if density != 'Sparse':
# there are no special storage types for Sparse, they are composed
# of Dense tensors
file_manager.write(env['Storage'] + ".cpp", STORAGE_DERIVED_CPP.substitute(env))
file_manager.write(env['Storage'] + ".h", STORAGE_DERIVED_H.substitute(env))
env['TensorDenseOrSparse'] = TENSOR_DENSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimension'
else:
env['TensorDenseOrSparse'] = TENSOR_SPARSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimensionI + tensor->nDimensionV'
file_manager.write(env['Type'] + ".cpp", TYPE_DERIVED_CPP.substitute(env))
file_manager.write(env['Type'] + ".h", TYPE_DERIVED_H.substitute(env))
file_manager.write(env['Tensor'] + ".cpp", TENSOR_DERIVED_CPP.substitute(env))
file_manager.write(env['Tensor'] + ".h", TENSOR_DERIVED_H.substitute(env))
type_register = (('context->type_registry[static_cast<int>(Backend::{})]' +
'[static_cast<int>(ScalarType::{})].reset(new {}(context));')
.format(env['Backend'], scalar_name, env['Type']))
top_env['type_registrations'].append(type_register)
top_env['type_headers'].append(
'#include "ATen/{}.h"'.format(env['Type']))
return env
def iterate_types():
for backend in backends:
for density in densities:
for scalar_type in scalar_types:
if density == 'Sparse' and scalar_type[0] == 'Half':
# THS does not do half type yet.
continue
yield (backend, density, scalar_type)
###################
# declare what files will be output _before_ we do any work
# so that the script runs quickly when we are just querying the
# outputs
def declare_outputs():
files = ['Declarations.yaml', 'Type.h', 'Type.cpp', 'Tensor.h',
'TensorMethods.h', 'Functions.h',
'Copy.cpp', 'NativeFunctions.h']
for f in files:
file_manager.will_write(f)
for fname in sorted(generators.keys()):
if generators[fname]['name'] in backends:
file_manager.will_write(fname)
for backend, density, scalar_types in iterate_types():
scalar_name = scalar_types[0]
full_backend = "Sparse" + backend if density == "Sparse" else backend
for kind in ["Storage", "Type", "Tensor"]:
if kind == 'Storage' and density == "Sparse":
continue
file_manager.will_write("{}{}{}.h".format(full_backend, scalar_name, kind))
file_manager.will_write("{}{}{}.cpp".format(full_backend, scalar_name, kind))
def filter_by_extension(files, *extensions):
filtered_files = []
for file in files:
for extension in extensions:
if file.endswith(extension):
filtered_files.append(file)
return filtered_files
def generate_outputs():
cwrap_files = filter_by_extension(options.files, '.cwrap')
nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
native_files = filter_by_extension(options.files, 'native_functions.yaml')
declarations = [d
for file in cwrap_files
for d in cwrap_parser.parse(file)]
declarations += nn_parse.run(nn_files)
declarations += native_parse.run(native_files)
declarations = preprocess_declarations.run(declarations)
for fname, env in generators.items():
if env['name'] in backends:
file_manager.write(fname, GENERATOR_DERIVED.substitute(env))
# note: this will fill in top_env['type/tensor_method_declarations/definitions']
# and modify the declarations to include any information that will all_backends
# be used by function_wrapper.create_derived
output_declarations = function_wrapper.create_generic(top_env, declarations)
output_declarations = postprocess_output_declarations(output_declarations)
file_manager.write("Declarations.yaml", format_yaml(output_declarations))
# populated by generate_storage_type_and_tensor
all_types = []
for backend, density, scalar_type in iterate_types():
all_types.append(generate_storage_type_and_tensor(
backend, density, scalar_type, declarations))
file_manager.write('Type.h', TYPE_H.substitute(top_env))
file_manager.write('Type.cpp', TYPE_CPP.substitute(top_env))
file_manager.write('Tensor.h', TENSOR_H.substitute(top_env))
file_manager.write('TensorMethods.h', TENSOR_METHODS_H.substitute(top_env))
file_manager.write('Functions.h', FUNCTIONS_H.substitute(top_env))
file_manager.write('Copy.cpp', copy_wrapper.create(all_types))
file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H.substitute(top_env))
file_manager.check_all_files_written()
declare_outputs()
if options.output_dependencies is not None:
file_manager.write_outputs(options.output_dependencies)
else:
generate_outputs()
|
mit
|
Yuudachimoe/HikariChun-RedBot
|
lib/youtube_dl/extractor/screencast.py
|
44
|
4330
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class ScreencastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?screencast\.com/t/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'http://www.screencast.com/t/3ZEjQXlT',
'md5': '917df1c13798a3e96211dd1561fded83',
'info_dict': {
'id': '3ZEjQXlT',
'ext': 'm4v',
'title': 'Color Measurement with Ocean Optics Spectrometers',
'description': 'md5:240369cde69d8bed61349a199c5fb153',
'thumbnail': r're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/V2uXehPJa1ZI',
'md5': 'e8e4b375a7660a9e7e35c33973410d34',
'info_dict': {
'id': 'V2uXehPJa1ZI',
'ext': 'mov',
'title': 'The Amadeus Spectrometer',
'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit',
'thumbnail': r're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/aAB3iowa',
'md5': 'dedb2734ed00c9755761ccaee88527cd',
'info_dict': {
'id': 'aAB3iowa',
'ext': 'mp4',
'title': 'Google Earth Export',
'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.',
'thumbnail': r're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/X3ddTrYh',
'md5': '669ee55ff9c51988b4ebc0877cc8b159',
'info_dict': {
'id': 'X3ddTrYh',
'ext': 'wmv',
'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression',
'description': 'md5:7b9f393bc92af02326a5c5889639eab0',
'thumbnail': r're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://screencast.com/t/aAB3iowa',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'<embed name="Video".*?src="([^"]+)"', webpage,
'QuickTime embed', default=None)
if video_url is None:
flash_vars_s = self._html_search_regex(
r'<param name="flashVars" value="([^"]+)"', webpage, 'flash vars',
default=None)
if not flash_vars_s:
flash_vars_s = self._html_search_regex(
r'<param name="initParams" value="([^"]+)"', webpage, 'flash vars',
default=None)
if flash_vars_s:
flash_vars_s = flash_vars_s.replace(',', '&')
if flash_vars_s:
flash_vars = compat_parse_qs(flash_vars_s)
video_url_raw = compat_urllib_request.quote(
flash_vars['content'][0])
video_url = video_url_raw.replace('http%3A', 'http:')
if video_url is None:
video_meta = self._html_search_meta(
'og:video', webpage, default=None)
if video_meta:
video_url = self._search_regex(
r'src=(.*?)(?:$|&)', video_meta,
'meta tag video URL', default=None)
if video_url is None:
raise ExtractorError('Cannot find video')
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
[r'<b>Title:</b> ([^<]+)</div>',
r'class="tabSeperator">></span><span class="tabText">(.+?)<',
r'<title>([^<]+)</title>'],
webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage, default=None)
if description is None:
description = self._html_search_meta('description', webpage)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
gpl-3.0
|
EdisonResearch/fast-rcnn
|
tools/train_svms.py
|
42
|
13247
|
#!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
return ((w * self.feature_scale, b * self.feature_scale),
pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_roidb()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
|
mit
|
pacificclimate/pydap.responses.aaigrid
|
src/pydap/responses/aaigrid/__init__.py
|
1
|
13427
|
import os
from os.path import basename, sep
import logging
from tempfile import gettempdir, SpooledTemporaryFile
from itertools import imap, izip, chain, izip_longest, repeat
from zipfile import ZipFile, ZIP_DEFLATED
import re
import gdal
import osr
import numpy
from numpy import ma
from webob.exc import HTTPBadRequest
from pydap.responses.lib import BaseResponse
from pydap.model import *
from pydap.lib import walk, get_var
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('pydap.responses.aaigrid')
# FIXME: this code should be factored out... it's used in two places!
def ziperator(responders):
'''This method creates and returns an iterator which yields bytes for a :py:class:`ZipFile` that contains a set of files from OPeNDAP requests. The method will spool the first one gigabyte in memory using a :py:class:`SpooledTemporaryFile`, after which it will use disk.
:param responders: A list of (``name``, ``generator``) pairs where ``name`` is the filename to use in the zip archive and ``generator`` should yield all bytes for a single file.
:rtype: iterator
'''
with SpooledTemporaryFile(1024*1024*1024) as f:
yield 'PK' # Response headers aren't sent until the first chunk of data is sent. Let's get this repsonse moving!
z = ZipFile(f, 'w', ZIP_DEFLATED, True)
for name, responder in responders:
pos = 2 if f.tell() == 0 else f.tell()
z.writestr(name, ''.join([x for x in responder]))
f.seek(pos)
yield f.read()
pos = f.tell()
z.close()
f.seek(pos)
yield f.read()
numpy_to_gdal = {'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
'int64': gdal.GDT_Int32,
'int16': gdal.GDT_Int16,
'int8': gdal.GDT_Byte}
class AAIGridResponse(BaseResponse):
'''A Pydap responder which uses GDAL to convert grids into Arc/Info ASCII Grid files
'''
def __init__(self, dataset):
if not dataset:
raise HTTPBadRequest("The ArcASCII Grid (aaigrid) response did not receive required dataset parameter")
# We will (should?) always get a _DatasetType_ and should use pydap.lib.walk to walk through all of the Grids
self.grids = walk(dataset, GridType)
if not self.grids:
raise HTTPBadRequest("The ArcASCII Grid (aaigrid) response only supports GridTypes, yet none are included in the requested dataset: {}".format(dataset))
for grid in self.grids:
l = len(grid.maps)
if l not in (2, 3):
raise HTTPBadRequest("The ArcASCII Grid (aaigrid) response only supports Grids with 2 or 3 dimensions, but one of the requested grids contains {} dimension{}".format(l, 's' if l > 1 else ''))
try:
detect_dataset_transform(grid)
except Exception, e:
raise HTTPBadRequest("The ArcASCII Grid (aaigrid) response could not detect the grid transform for grid {}: {}".format(grid.name, e.message))
# FIXME: Verify this
self.srs = osr.SpatialReference()
self.srs.SetWellKnownGeogCS('WGS84')
BaseResponse.__init__(self, dataset)
# We're about to (re-)set the Content-disposition header... make sure that it is not included before-hand
new_headers = [ (name, value) for name, value in self.headers if name != 'Content-disposition' ]
new_headers.extend([
('Content-type','application/zip'),
('Content-disposition', 'attachment; filename="arc_ascii_grid.zip"')
])
self.headers = new_headers
def __iter__(self):
grids = walk(self.dataset, GridType)
def generate_aaigrid_files(grid):
'''Generator that yields multiple file names for each layer of the grid parameter
This function delegates the actual creation of the '.asc' files to _grid_array_to_gdal_files()
Files get writted to temp space on disk (by the delegatee)
and then filenames are yielded from this generator
'''
logger.debug("In generate_aaigrid_files for grid {}".format(grid))
missval = find_missval(grid)
srs = self.srs
geo_transform = detect_dataset_transform(grid)
output_fmt = grid.name + '_{i}.asc'
for file_ in _grid_array_to_gdal_files(grid.array, srs, geo_transform, filename_fmt=output_fmt, missval=missval):
yield file_
# Send each of the grids through _grid_array_to_gdal_files
# which will generate multiple files per grid
logger.debug("__iter__: creating the file generator for grids {}".format(grids))
file_generator = chain.from_iterable(imap(generate_aaigrid_files, grids))
return ziperator(file_generator)
def named_file_generator(filename):
'''Generator that yields pairs of (filename, file_content_generator)
to be consumed by the ziperator
'''
def content():
with open(filename, 'r') as my_file:
for chunk in my_file:
yield chunk
logger.debug("deleting {}".format(filename))
os.unlink(filename)
return basename(filename), content()
def _grid_array_to_gdal_files(dap_grid_array, srs, geo_transform, filename_fmt='{i}.asc', missval=None):
'''Generator which creates an Arc/Info ASCII Grid file for each "layer" (i.e. one step of X by Y) in a given grid
:param dap_grid_array: Multidimensional arrary of rank 2 or 3
:type dap_grid_array: numpy.ndarray
:param srs: Spatial reference system
:type srs: osr.SpatialReference
:param geo_transform: GDAL affine transform which applies to this grid
:type geo_transform: list
:param filename_fmt: Proposed filename template for output files. "{i}" can be included and will be filled in with the layer number.
:type filename_fmt: str
:param missval: Value for which data should be identified as missing
:type missval: numpy.array
:returns: A generator which yields pairs of (filename, file_content_generator) of the created files. Note that there will likely be more than one file for layer (e.g. an .asc file and a .prj file).
'''
logger.debug("_grid_array_to_gdal_files: translating this grid {} of this srs {} transform {} to this file {}".format(dap_grid_array, srs, geo_transform, filename_fmt))
logger.debug("Investigating the shape of this grid: {}".format(dap_grid_array))
shp = dap_grid_array.shape
if len(shp) == 2:
ylen, xlen = shp
data = [ dap_grid_array ]
elif len(shp) == 3:
_, ylen, xlen = shp
data = iter(dap_grid_array.data)
else:
raise ValueError("_grid_array_to_gdal_files received a grid of rank {} rather than the required 2 or 3".format(len(shp)))
target_type = numpy_to_gdal[dap_grid_array.dtype.name]
meta_ds = create_gdal_mem_dataset(xlen, ylen, geo_transform, srs, target_type, missval)
for i, layer in enumerate(data):
if missval:
layer = ma.masked_equal(layer, missval)
logger.debug("Data: {}".format(layer))
meta_ds.GetRasterBand(1).WriteArray( numpy.flipud(layer) )
driver = gdal.GetDriverByName('AAIGrid')
outfile = gettempdir() + sep + filename_fmt.format(i=i)
dst_ds = driver.CreateCopy(outfile, meta_ds, 0)
file_list = dst_ds.GetFileList()
# Once we're done, close properly the dataset
dst_ds = None
for filename in file_list:
yield named_file_generator(filename)
meta_ds = None
def create_gdal_mem_dataset(xlen, ylen, geo_transform, srs, target_type, missval=None):
'''Create and return a single layer GDAL dataset in RAM.
This dataset can have values repeatedly set to it with dst.GetRasterBand(1).WriteArray()
and then can be written out to any GDAL format by creating a driver and then using
`driver.CreateCopy([filename], dst)`
:param xlen: Number of grid cells in the X (longitude) dimension
:type xlen: int
:param ylen: Number of grid cells in the Y (latitude) dimension
:type ylen: int
:param srs: Spatial reference system
:type srs: osr.SpatialReference
:param geo_transform: GDAL affine transform which applies to this grid
:type geo_transform: list
:param target_type: A known `gdal data type <http://gdal.org/gdal_8h.html#a22e22ce0a55036a96f652765793fb7a4>`_
:type target_type: gdal.GDALDataType
:param missval: Value for which data should be identified as missing
:type missval: numpy.array
:returns: A single layer gdal.Dataset driven by the MEM driver
'''
logger.debug("Creating a GDAL driver ({}, {}) of type {}".format(xlen, ylen, target_type))
# Because we're using the MEM driver, we can use an empty filename and it will never go to disk
# GDAL's AAIGrid driver only works in CreateCopy mode,
# so we have to create the dataset with something else first
driver = gdal.GetDriverByName('MEM')
metadata = driver.GetMetadata()
assert metadata.has_key(gdal.DCAP_CREATE)
assert metadata[gdal.DCAP_CREATE] == 'YES'
dst = driver.Create('', xlen, ylen, 1, target_type)
dst.SetGeoTransform( geo_transform )
dst.SetProjection( srs.ExportToWkt() )
if missval:
dst.GetRasterBand(1).SetNoDataValue(missval.astype('float'))
else:
# To clear the nodata value, set with an "out of range" value per GDAL docs
dst.GetRasterBand(1).SetNoDataValue(-9999)
return dst
def find_missval(grid):
'''Search grid attributes for indications of a missing value
:param grid: An instance of the Pydap GridType
:type grid: GridType
:returns: the missing value if available (None, otherwise)
'''
missval = None
for key in ('missing_value', '_FillValue'):
if key in grid.attributes:
missval = grid.attributes[key][0]
return missval
def get_map(dst, axis):
'''Search grid attributes for the 'axis' attribute for a particular axis and return a mapping
:param dst: An instance of a Pydap Dataset (typically a GridType)
:type dst: GridType
:param axis: The character abbreviation for the axis for which to search. E.g. 'X', 'Y', 'Z' or 'T'.
:type axis: str
:returns: The Pydap BaseType which corresponds to the mapping for the given axis
:rtype: BaseType
'''
for map_name, map_ in dst.maps.iteritems():
if map_.attributes.has_key('axis'):
if map_.attributes['axis'] == axis:
return map_
return None
def get_time_map(dst):
'''Search a grid for the time axis using a variety of hueristics.
According to http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/cf-conventions.html#time-coordinate
the time coordinate is identifiable by its units alone,
though optionally it can be indicated by using the standard_name and/or axis='T'
This function searches for those in reverse order and returns the first match.
:param dst: An instance of the Pydap Dataset (typically a GridType)
:type dst: GridType
:returns: The Pydap Basetype which corresponds to the time axis
:rtype: BaseType
'''
for map_name, map_ in dst.maps.iteritems():
attrs = map_.attributes
if attrs.has_key('axis') and attrs['axis'] == 'T':
return map_
if attrs.has_key('standard_name') and attrs['standard_name'] == 'time':
return map_
if attrs.has_key('units') and re.match('(day|d|hour|h|hr|minute|min|second|sec|s)s? since .+', attrs['units']):
return map_
return None
def detect_dataset_transform(dst):
'''Detects and calculates the affine transform for a given GridType dataset. See http://www.gdal.org/gdal_datamodel.html for more on the transform parameters.
:param dst: An instance of a Pydap GridType for which to caculate an affine transform
:type dst: GridType
:returns: The GDAL affine transform in the form of [ upper_left_x, pixel_width, 0, upper_left_y, 0, pixel_height ]
:rtype: list
'''
# dst must be a Grid
if type(dst) != GridType:
raise Exception("Dataset must be of type Grid, not {}".format(type(dst)))
# Iterate through maps, searching for axis attributes
xmap, ymap = get_map(dst, 'X'), get_map(dst, 'Y')
if xmap is None or ymap is None:
raise Exception("Dataset does not have a map for both the X and Y axes")
if type(xmap.data) == numpy.ndarray:
xarray = xmap.data
else:
xarray = iter(xmap.data).next() # Might to iterate over proxy objects to actually get the data
xd = numpy.diff(xarray)
pix_width = xd[0]
assert numpy.isclose(pix_width, xd).all(), "No support for irregular grids"
if type(ymap.data) == numpy.ndarray:
yarray = ymap.data
else:
yarray = iter(ymap.data).next()
yd = numpy.diff(yarray)
pix_height = yd[0]
assert numpy.isclose(pix_height, yd).all(), "No support for irregular grids"
ulx = numpy.min(xarray) - pix_width
uly = numpy.max(yarray) + pix_height # north up
return [ ulx, pix_width, 0, uly, 0, pix_height ]
|
gpl-3.0
|
openstack/glance
|
glance/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py
|
3
|
2182
|
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from migrate.changeset import UniqueConstraint
from oslo_db import exception as db_exception
from sqlalchemy import MetaData, Table
from sqlalchemy.exc import OperationalError, ProgrammingError
NEW_KEYNAME = 'image_members_image_id_member_deleted_at_key'
ORIGINAL_KEYNAME_RE = re.compile('image_members_image_id.*_key')
def upgrade(migrate_engine):
image_members = _get_image_members_table(migrate_engine)
if migrate_engine.name in ('mysql', 'postgresql'):
try:
UniqueConstraint('image_id',
name=_get_original_keyname(migrate_engine.name),
table=image_members).drop()
except (OperationalError, ProgrammingError, db_exception.DBError):
UniqueConstraint('image_id',
name=_infer_original_keyname(image_members),
table=image_members).drop()
UniqueConstraint('image_id',
'member',
'deleted_at',
name=NEW_KEYNAME,
table=image_members).create()
def _get_image_members_table(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
return Table('image_members', meta, autoload=True)
def _get_original_keyname(db):
return {'mysql': 'image_id',
'postgresql': 'image_members_image_id_member_key'}[db]
def _infer_original_keyname(table):
for i in table.indexes:
if ORIGINAL_KEYNAME_RE.match(i.name):
return i.name
|
apache-2.0
|
phillxnet/rockstor-core
|
src/rockstor/storageadmin/views/share_acl.py
|
2
|
2538
|
"""
Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from django.db import transaction
from django.conf import settings
from storageadmin.models import Share
from storageadmin.serializers import ShareSerializer
from fs.btrfs import mount_share, umount_root
from storageadmin.views import ShareListView
from system.acl import chown, chmod
class ShareACLView(ShareListView):
@transaction.atomic
def post(self, request, sid):
with self._handle_exception(request):
share = Share.objects.get(id=sid)
options = {
"owner": "root",
"group": "root",
"perms": "755",
"orecursive": True,
"precursive": True,
}
options["owner"] = request.data.get("owner", options["owner"])
options["group"] = request.data.get("group", options["group"])
options["perms"] = request.data.get("perms", options["perms"])
options["orecursive"] = request.data.get(
"orecursive", options["orecursive"]
)
options["precursive"] = request.data.get(
"precursive", options["precursive"]
)
share.owner = options["owner"]
share.group = options["group"]
share.perms = options["perms"]
share.save()
mnt_pt = "%s%s" % (settings.MNT_PT, share.name)
force_mount = False
if not share.is_mounted:
mount_share(share, mnt_pt)
force_mount = True
chown(mnt_pt, options["owner"], options["group"], options["orecursive"])
chmod(mnt_pt, options["perms"], options["precursive"])
if force_mount is True:
umount_root(mnt_pt)
return Response(ShareSerializer(share).data)
|
gpl-3.0
|
nclsHart/glances
|
glances/core/glances_snmp.py
|
1
|
4872
|
# -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
# Import Glances libs
from glances.core.glances_logging import logger
# Import mandatory PySNMP lib
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
except ImportError:
logger.critical("PySNMP library not found. To install it: pip install pysnmp")
sys.exit(2)
class GlancesSNMPClient(object):
"""SNMP client class (based on pysnmp library)."""
def __init__(self, host='localhost', port=161, version='2c',
community='public', user='private', auth=''):
super(GlancesSNMPClient, self).__init__()
self.cmdGen = cmdgen.CommandGenerator()
self.version = version
self.host = host
self.port = port
self.community = community
self.user = user
self.auth = auth
def __buid_result(self, varBinds):
"""Build the results"""
ret = {}
for name, val in varBinds:
if str(val) == '':
ret[name.prettyPrint()] = ''
else:
ret[name.prettyPrint()] = val.prettyPrint()
# In Python 3, prettyPrint() return 'b'linux'' instead of 'linux'
if ret[name.prettyPrint()].startswith('b\''):
ret[name.prettyPrint()] = ret[name.prettyPrint()][2:-1]
return ret
def __get_result__(self, errorIndication, errorStatus, errorIndex, varBinds):
"""Put results in table."""
ret = {}
if not errorIndication or not errorStatus:
ret = self.__buid_result(varBinds)
return ret
def get_by_oid(self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds)
def __bulk_result__(self, errorIndication, errorStatus, errorIndex, varBindTable):
ret = []
if not errorIndication or not errorStatus:
for varBindTableRow in varBindTable:
ret.append(self.__buid_result(varBindTableRow))
return ret
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
"""SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts
"""
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
# Bulk request are not available with SNMP version 1
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable)
|
lgpl-3.0
|
wshallum/ansible
|
lib/ansible/modules/windows/win_chocolatey.py
|
23
|
3537
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Trond Hindenes <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: win_chocolatey
version_added: "1.9"
short_description: Installs packages using chocolatey
description:
- Installs packages using Chocolatey (http://chocolatey.org/). If Chocolatey is missing from the system, the module will install it. List of packages can be found at http://chocolatey.org/packages
options:
name:
description:
- Name of the package to be installed
required: true
state:
description:
- State of the package on the system
choices:
- present
- absent
default: present
force:
description:
- Forces install of the package (even if it already exists). Using Force will cause ansible to always report that a change was made
choices:
- yes
- no
default: no
upgrade:
description:
- If package is already installed it, try to upgrade to the latest version or to the specified version
choices:
- yes
- no
default: no
version:
description:
- Specific version of the package to be installed
- Ignored when state == 'absent'
source:
description:
- Specify source rather than using default chocolatey repository
install_args:
description:
- Arguments to pass to the native installer
version_added: '2.1'
params:
description:
- Parameters to pass to the package
version_added: '2.1'
allow_empty_checksums:
description:
- Allow empty Checksums to be used
require: false
default: false
version_added: '2.2'
ignore_checksums:
description:
- Ignore Checksums
require: false
default: false
version_added: '2.2'
ignore_dependencies:
description:
- Ignore dependencies, only install/upgrade the package itself
default: false
version_added: '2.1'
author: "Trond Hindenes (@trondhindenes), Peter Mounce (@petemounce), Pepe Barbe (@elventear), Adam Keech (@smadam813)"
'''
# TODO:
# * Better parsing when a package has dependencies - currently fails
# * Time each item that is run
# * Support 'changed' with gems - would require shelling out to `gem list` first and parsing, kinda defeating the point of using chocolatey.
EXAMPLES = '''
# Install git
win_chocolatey:
name: git
# Install notepadplusplus version 6.6
win_chocolatey:
name: notepadplusplus.install
version: '6.6'
# Uninstall git
win_chocolatey:
name: git
state: absent
# Install git from specified repository
win_chocolatey:
name: git
source: https://someserver/api/v2/
'''
|
gpl-3.0
|
siripuramrk/namebench
|
nb_third_party/dns/reversename.py
|
248
|
2931
|
# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Reverse Map Names.
@var ipv4_reverse_domain: The DNS IPv4 reverse-map domain, in-addr.arpa.
@type ipv4_reverse_domain: dns.name.Name object
@var ipv6_reverse_domain: The DNS IPv6 reverse-map domain, ip6.arpa.
@type ipv6_reverse_domain: dns.name.Name object
"""
import dns.name
import dns.ipv6
import dns.ipv4
ipv4_reverse_domain = dns.name.from_text('in-addr.arpa.')
ipv6_reverse_domain = dns.name.from_text('ip6.arpa.')
def from_address(text):
"""Convert an IPv4 or IPv6 address in textual form into a Name object whose
value is the reverse-map domain name of the address.
@param text: an IPv4 or IPv6 address in textual form (e.g. '127.0.0.1',
'::1')
@type text: str
@rtype: dns.name.Name object
"""
try:
parts = list(dns.ipv6.inet_aton(text).encode('hex_codec'))
origin = ipv6_reverse_domain
except:
parts = ['%d' % ord(byte) for byte in dns.ipv4.inet_aton(text)]
origin = ipv4_reverse_domain
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_address(name):
"""Convert a reverse map domain name into textual address form.
@param name: an IPv4 or IPv6 address in reverse-map form.
@type name: dns.name.Name object
@rtype: str
"""
if name.is_subdomain(ipv4_reverse_domain):
name = name.relativize(ipv4_reverse_domain)
labels = list(name.labels)
labels.reverse()
text = '.'.join(labels)
# run through inet_aton() to check syntax and make pretty.
return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
elif name.is_subdomain(ipv6_reverse_domain):
name = name.relativize(ipv6_reverse_domain)
labels = list(name.labels)
labels.reverse()
parts = []
i = 0
l = len(labels)
while i < l:
parts.append(''.join(labels[i:i+4]))
i += 4
text = ':'.join(parts)
# run through inet_aton() to check syntax and make pretty.
return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
else:
raise dns.exception.SyntaxError('unknown reverse-map address family')
|
apache-2.0
|
ramitalat/odoo
|
addons/hr_holidays/report/holidays_summary_report.py
|
333
|
10372
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
import openerp
from openerp.osv import fields, osv
from openerp.report.interface import report_rml
from openerp.report.interface import toxml
from openerp.report import report_sxw
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp.tools import to_xml
def lengthmonth(year, month):
if month == 2 and ((year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))):
return 29
return [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
def strToDate(dt):
if dt:
dt_date=datetime.date(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
return dt_date
else:
return
def emp_create_xml(self, cr, uid, dept, holiday_type, row_id, empid, name, som, eom):
display={}
if dept==0:
count=0
registry = openerp.registry(cr.dbname)
holidays_ids = registry['hr.holidays'].search(cr, uid, [('employee_id','in',[empid,False]), ('type', '=', 'remove')])
ids_date = registry['hr.holidays'].read(cr, uid, holidays_ids, ['date_from','date_to','holiday_status_id','state'])
for index in range(1,61):
diff=index-1
current=som+datetime.timedelta(diff)
for item in ids_date:
if current >= strToDate(item['date_from']) and current <= strToDate(item['date_to']):
if item['state'] in holiday_type:
display[index]=item['holiday_status_id'][0]
count=count +1
else:
display[index]=' '
break
else:
display[index]=' '
else:
for index in range(1,61):
display[index]=' '
count=''
data_xml=['<info id="%d" number="%d" val="%s" />' % (row_id,x,display[x]) for x in range(1,len(display)+1) ]
# Computing the xml
xml = '''
%s
<employee row="%d" id="%d" name="%s" sum="%s">
</employee>
''' % (data_xml,row_id,dept, ustr(toxml(name)),count)
return xml
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, data, context):
registry = openerp.registry(cr.dbname)
obj_dept = registry['hr.department']
obj_emp = registry['hr.employee']
depts=[]
emp_id={}
rpt_obj = registry['hr.holidays']
rml_obj=report_sxw.rml_parse(cr, uid, rpt_obj._name,context)
cr.execute("SELECT name FROM res_company")
res=cr.fetchone()[0]
date_xml=[]
date_today=time.strftime('%Y-%m-%d %H:%M:%S')
date_xml +=['<res name="%s" today="%s" />' % (to_xml(res),date_today)]
cr.execute("SELECT id, name, color_name FROM hr_holidays_status ORDER BY id")
legend=cr.fetchall()
today=datetime.datetime.today()
first_date=data['form']['date_from']
som = strToDate(first_date)
eom = som+datetime.timedelta(59)
day_diff=eom-som
name = ''
if len(data['form'].get('emp', ())) == 1:
name = obj_emp.read(cr, uid, data['form']['emp'][0], ['name'])['name']
if data['form']['holiday_type']!='both':
type=data['form']['holiday_type']
if data['form']['holiday_type']=='Confirmed':
holiday_type=('confirm')
else:
holiday_type=('validate')
else:
type="Confirmed and Approved"
holiday_type=('confirm','validate')
date_xml.append('<from>%s</from>\n'% (str(rml_obj.formatLang(som.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<to>%s</to>\n' %(str(rml_obj.formatLang(eom.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<type>%s</type>'%(type))
date_xml.append('<name>%s</name>'%(name))
# date_xml=[]
for l in range(0,len(legend)):
date_xml += ['<legend row="%d" id="%d" name="%s" color="%s" />' % (l+1,legend[l][0],_(legend[l][1]),legend[l][2])]
date_xml += ['<date month="%s" year="%d" />' % (ustr(som.strftime('%B')), som.year),'<days>']
cell=1
if day_diff.days>=30:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
if day_diff.days>=(lengthmonth(som.year, som.month)-som.day):
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, eom.day+1)]
cell=x-som.day+1
day_diff1=day_diff.days-cell+1
width_dict={}
month_dict={}
i=1
j=1
year=som.year
month=som.month
month_dict[j]=som.strftime('%B')
width_dict[j]=cell
while day_diff1>0:
if month+i<=12:
if day_diff1 > lengthmonth(year,i+month): # Not on 30 else you have problems when entering 01-01-2009 for example
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(year,i+month)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
else:
years=year+1
year=years
month=0
i=1
if day_diff1>=30:
som1=datetime.date(years,i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(years,i)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(years,i,1)
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
date_xml.append('</days>')
date_xml.append('<cols>3.5cm%s,0.4cm</cols>\n' % (',0.4cm' * (60)))
date_xml = ''.join(date_xml)
st='<cols_months>3.5cm'
for m in range(1,len(width_dict)+1):
st+=',' + str(0.4 *width_dict[m])+'cm'
st+=',0.4cm</cols_months>\n'
months_xml =['<months number="%d" name="%s"/>' % (x, _(month_dict[x])) for x in range(1,len(month_dict)+1) ]
months_xml.append(st)
emp_xml=''
row_id=1
if data['model'] == 'hr.employee':
for items in obj_emp.read(cr, uid, data['form']['emp'], ['id', 'name']):
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, items['id'], items['name'], som, eom)
row_id = row_id +1
elif data['model']=='ir.ui.menu':
for dept in obj_dept.browse(cr, uid, data['form']['depts'], context=context):
emp_ids = obj_emp.search(cr, uid, [('department_id', '=', dept.id)], context=context)
if emp_ids==[]:
continue
dept_done=0
for item in obj_emp.read(cr, uid, emp_ids, ['id', 'name']):
if dept_done==0:
emp_xml += emp_create_xml(self, cr, uid, 1, holiday_type, row_id, dept.id, dept.name, som, eom)
row_id = row_id +1
dept_done=1
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, item['id'], item['name'], som, eom)
row_id = row_id +1
header_xml = '''
<header>
<date>%s</date>
<company>%s</company>
</header>
''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")),to_xml(registry['res.users'].browse(cr,uid,uid).company_id.name))
# Computing the xml
xml='''<?xml version="1.0" encoding="UTF-8" ?>
<report>
%s
%s
%s
%s
</report>
''' % (header_xml,months_xml,date_xml, ustr(emp_xml))
return xml
report_custom('report.holidays.summary', 'hr.holidays', '', 'addons/hr_holidays/report/holidays_summary.xsl')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Mitali-Sodhi/CodeLingo
|
Dataset/python/test_cert_verification (14).py
|
126
|
1447
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.redshift
class RedshiftCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
redshift = True
regions = boto.redshift.regions()
def sample_service_call(self, conn):
conn.describe_cluster_versions()
|
mit
|
jbedorf/tensorflow
|
tensorflow/python/kernel_tests/distributions/bijector_test.py
|
13
|
11452
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class BaseBijectorTest(test.TestCase):
"""Tests properties of the Bijector base-class."""
def testIsAbstract(self):
with self.assertRaisesRegexp(TypeError,
("Can't instantiate abstract class Bijector "
"with abstract methods __init__")):
bijector.Bijector() # pylint: disable=abstract-class-instantiated
def testDefaults(self):
class _BareBonesBijector(bijector.Bijector):
"""Minimal specification of a `Bijector`."""
def __init__(self):
super(_BareBonesBijector, self).__init__(forward_min_event_ndims=0)
bij = _BareBonesBijector()
self.assertEqual([], bij.graph_parents)
self.assertEqual(False, bij.is_constant_jacobian)
self.assertEqual(False, bij.validate_args)
self.assertEqual(None, bij.dtype)
self.assertEqual("bare_bones_bijector", bij.name)
for shape in [[], [1, 2], [1, 2, 3]]:
forward_event_shape_ = self.evaluate(
bij.inverse_event_shape_tensor(shape))
inverse_event_shape_ = self.evaluate(
bij.forward_event_shape_tensor(shape))
self.assertAllEqual(shape, forward_event_shape_)
self.assertAllEqual(shape, bij.forward_event_shape(shape))
self.assertAllEqual(shape, inverse_event_shape_)
self.assertAllEqual(shape, bij.inverse_event_shape(shape))
with self.assertRaisesRegexp(
NotImplementedError, "inverse not implemented"):
bij.inverse(0)
with self.assertRaisesRegexp(
NotImplementedError, "forward not implemented"):
bij.forward(0)
with self.assertRaisesRegexp(
NotImplementedError, "inverse_log_det_jacobian not implemented"):
bij.inverse_log_det_jacobian(0, event_ndims=0)
with self.assertRaisesRegexp(
NotImplementedError, "forward_log_det_jacobian not implemented"):
bij.forward_log_det_jacobian(0, event_ndims=0)
class IntentionallyMissingError(Exception):
pass
class BrokenBijector(bijector.Bijector):
"""Forward and inverse are not inverses of each other."""
def __init__(
self, forward_missing=False, inverse_missing=False, validate_args=False):
super(BrokenBijector, self).__init__(
validate_args=validate_args, forward_min_event_ndims=0, name="broken")
self._forward_missing = forward_missing
self._inverse_missing = inverse_missing
def _forward(self, x):
if self._forward_missing:
raise IntentionallyMissingError
return 2 * x
def _inverse(self, y):
if self._inverse_missing:
raise IntentionallyMissingError
return y / 2.
def _inverse_log_det_jacobian(self, y): # pylint:disable=unused-argument
if self._inverse_missing:
raise IntentionallyMissingError
return -math_ops.log(2.)
def _forward_log_det_jacobian(self, x): # pylint:disable=unused-argument
if self._forward_missing:
raise IntentionallyMissingError
return math_ops.log(2.)
class BijectorTestEventNdims(test.TestCase):
def testBijectorNonIntegerEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegexp(ValueError, "Expected integer"):
bij.forward_log_det_jacobian(1., event_ndims=1.5)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
bij.inverse_log_det_jacobian(1., event_ndims=1.5)
def testBijectorArrayEventNdims(self):
bij = BrokenBijector()
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=(1, 2))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=(1, 2))
@test_util.run_deprecated_v1
def testBijectorDynamicEventNdims(self):
bij = BrokenBijector(validate_args=True)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=None)
with self.cached_session():
with self.assertRaisesOpError("Expected scalar"):
bij.forward_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
with self.assertRaisesOpError("Expected scalar"):
bij.inverse_log_det_jacobian(1., event_ndims=event_ndims).eval({
event_ndims: (1, 2)})
@six.add_metaclass(abc.ABCMeta)
class BijectorCachingTestBase(object):
@abc.abstractproperty
def broken_bijector_cls(self):
# return a BrokenBijector type Bijector, since this will test the caching.
raise IntentionallyMissingError("Not implemented")
def testCachingOfForwardResults(self):
broken_bijector = self.broken_bijector_cls(inverse_missing=True)
x = constant_op.constant(1.1)
# Call forward and forward_log_det_jacobian one-by-one (not together).
y = broken_bijector.forward(x)
_ = broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
# Now, everything should be cached if the argument is y.
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
try:
broken_bijector.inverse(y)
broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.inverse_log_det_jacobian(y, event_ndims=1)
def testCachingOfInverseResults(self):
broken_bijector = self.broken_bijector_cls(forward_missing=True)
y = constant_op.constant(1.1)
# Call inverse and inverse_log_det_jacobian one-by-one (not together).
x = broken_bijector.inverse(y)
_ = broken_bijector.inverse_log_det_jacobian(y, event_ndims=0)
# Now, everything should be cached if the argument is x.
try:
broken_bijector.forward(x)
broken_bijector.forward_log_det_jacobian(x, event_ndims=0)
except IntentionallyMissingError:
raise AssertionError("Tests failed! Cached values not used.")
# Different event_ndims should not be cached.
with self.assertRaises(IntentionallyMissingError):
broken_bijector.forward_log_det_jacobian(x, event_ndims=1)
class BijectorCachingTest(BijectorCachingTestBase, test.TestCase):
"""Test caching with BrokenBijector."""
@property
def broken_bijector_cls(self):
return BrokenBijector
class ExpOnlyJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ExpOnlyJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=False,
forward_min_event_ndims=forward_min_event_ndims,
name="exp")
def _inverse_log_det_jacobian(self, y):
return -math_ops.log(y)
def _forward_log_det_jacobian(self, x):
return math_ops.log(x)
class ConstantJacobian(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super(ConstantJacobian, self).__init__(
validate_args=False,
is_constant_jacobian=True,
forward_min_event_ndims=forward_min_event_ndims,
name="c")
def _inverse_log_det_jacobian(self, y):
return constant_op.constant(2., y.dtype)
def _forward_log_det_jacobian(self, x):
return constant_op.constant(-2., x.dtype)
class BijectorReduceEventDimsTest(test.TestCase):
"""Test caching with BrokenBijector."""
def testReduceEventNdimsForward(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
np.log(x),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(np.log(x), axis=-1),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(np.log(x), axis=(-1, -2)),
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsForwardRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, "must be larger than"):
bij.forward_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsInverse(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian()
self.assertAllClose(
-np.log(x),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
np.sum(-np.log(x), axis=-1),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
np.sum(-np.log(x), axis=(-1, -2)),
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseRaiseError(self):
x = [[[1., 2.], [3., 4.]]]
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
with self.assertRaisesRegexp(ValueError, "must be larger than"):
bij.inverse_log_det_jacobian(x, event_ndims=0)
def testReduceEventNdimsForwardConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
-2.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
-4.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
-8.,
self.evaluate(bij.forward_log_det_jacobian(x, event_ndims=2)))
def testReduceEventNdimsInverseConstJacobian(self):
x = [[[1., 2.], [3., 4.]]]
bij = ConstantJacobian()
self.assertAllClose(
2.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
4.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
8.,
self.evaluate(bij.inverse_log_det_jacobian(x, event_ndims=2)))
@test_util.run_deprecated_v1
def testHandlesNonStaticEventNdims(self):
x_ = [[[1., 2.], [3., 4.]]]
x = array_ops.placeholder_with_default(x_, shape=None)
event_ndims = array_ops.placeholder(dtype=np.int32, shape=[])
bij = ExpOnlyJacobian(forward_min_event_ndims=1)
bij.inverse_log_det_jacobian(x, event_ndims=event_ndims)
with self.cached_session() as sess:
ildj = sess.run(bij.inverse_log_det_jacobian(x, event_ndims=event_ndims),
feed_dict={event_ndims: 1})
self.assertAllClose(-np.log(x_), ildj)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Chilledheart/gyp
|
pylib/gyp/generator/make.py
|
3
|
91125
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
#
# Note: flock is used to seralize linking. Linking is a memory-intensive
# process so running parallel links can often lead to thrashing. To disable
# the serialization, override LINK via an envrionment variable as follows:
#
# export LINK=g++
#
# This will allow make to invoke N linker processes as specified in -jN.
LINK ?= %(flock)s $(builddir)/linker.lock $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?=
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?=
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp -af "$<" "$@")
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs + ['FORCE_DO_CMD'], actions)
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
# TODO(evanm): just make order_only a list of deps instead of these hacks.
if order_only:
order_insert = '| '
pick_output = ' '.join(outputs)
else:
order_insert = ''
pick_output = outputs[0]
if force:
force_append = ' FORCE_DO_CMD'
else:
force_append = ''
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
self.WriteLn('%s: %s%s%s' % (pick_output, order_insert, ' '.join(inputs),
force_append))
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
if not order_only and len(outputs) > 1:
# If we have more than one output, a rule like
# foo bar: baz
# that for *each* output we must run the action, potentially
# in parallel. That is not what we're trying to write -- what
# we want is that we run the action once and it generates all
# the files.
# http://www.gnu.org/software/hello/manual/automake/Multiple-Outputs.html
# discusses this problem and has this solution:
# 1) Write the naive rule that would produce parallel runs of
# the action.
# 2) Make the outputs seralized on each other, so we won't start
# a parallel run until the first run finishes, at which point
# we'll have generated all the outputs and we're done.
self.WriteLn('%s: %s' % (' '.join(outputs[1:]), outputs[0]))
# Add a dummy command to the "extra outputs" rule, otherwise make seems to
# think these outputs haven't (couldn't have?) changed, and thus doesn't
# flag them as changed (i.e. include in '$?') when evaluating dependent
# rules, which in turn causes do_cmd() to skip running dependent commands.
self.WriteLn('%s: ;' % (' '.join(outputs[1:])))
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'aix':
header_params.update({
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
wrappers['LINK'] = '%s $(builddir)/linker.lock' % flock_command
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
bsd-3-clause
|
puhoy/storedDict
|
tests/test_storedDict.py
|
1
|
1786
|
import unittest
import json
import os
from storedDict import StoredDict
class TestStoredDict(unittest.TestCase):
def test_save_and_load(self):
val = 1
sd = StoredDict('testing.json')
sd['one'] = val
sd.commit()
sd = StoredDict('testing.json')
self.assertEqual(val, sd['one'])
val2 = 2
sd['two'] = 2
sd.commit()
self.assertEqual(val, sd['one'])
self.assertEqual(val2, sd['two'])
def test_autovivification(self):
sd = StoredDict('testing.json')
sd['one']['two']['three'] = 3
val = sd['one']['two']['three']
sd.commit()
with open('testing.json', 'r') as f:
loaded_sd = json.load(f)
self.assertEqual(val, loaded_sd['one']['two']['three'])
def test_autocommit(self):
sd = StoredDict('testing.json', autocommit=True)
sd['one'] = 3
val = sd['one']
# no manual commit here
with open('testing.json', 'r') as f:
loaded_sd = json.load(f)
self.assertEqual(sd['one'], loaded_sd['one'])
def test_autoviv_autocommit(self):
sd = StoredDict('testing.json', autocommit=True)
sd['one']['two']['three'] = 3
val = sd['one']['two']['three']
# no manual commit here
with open('testing.json', 'r') as f:
loaded_sd = json.load(f)
self.assertEqual(val, loaded_sd['one']['two']['three'])
def test_load_saved_dict(self):
sd2 = StoredDict('tests/testing2.json')
sd2.commit()
self.assertEqual(3, sd2['one']['two']['three'])
def tearDown(self):
try:
os.remove('testing.json')
pass
except:
pass
if __name__ == '__main__':
unittest.main()
|
mit
|
nelmiux/CarnotKE
|
jyhton/ast/astview.py
|
7
|
2075
|
#!/usr/bin/env python
"""lispify_ast - returns a tuple representation of the AST
Uses 2.5's _ast, not other AST implementations in CPython, since these
are not used by the compilation phase. And that's what we're
interested in.
Since this is a tuple, we can directly compare, and this is going to
be handy when comparing Jython's implementation vs CPython.
"""
import _ast
import sys
if sys.platform.startswith('java'):
def get_class_name(t):
result = t.__class__.__name__
if result == "AugLoad":
result = "Load"
elif result == "AugStore":
result = "Store"
return result
else:
get_class_name = lambda node: node.__class__.__name__
get_lines_and_cols = True
def lispify_ast(node):
return tuple(lispify_ast2(node))
def lispify_ast2(node):
result = get_class_name(node)
if get_lines_and_cols and hasattr(node, 'lineno') and hasattr(node, 'col_offset'):
result = "%s (%s,%s)" % (result, node.lineno, node.col_offset)
yield result
try:
for field in node._fields:
yield tuple(lispify_field(field, getattr(node, field)))
except:
pass
def lispify_field(field, child):
yield field
if isinstance(child, list):
children = child
else:
children = [child]
for node in children:
if isinstance(node, _ast.AST):
yield lispify_ast(node)
else:
if isinstance(node, float):
#XXX: stringify floats so they match Java's float representation better
#This may mask problems for very small numbers.
if .0001 < node < 10000:
yield "%5.5f" % node
else:
yield "%.5e" % node
else:
yield node
def tree(pyfile):
try:
node = compile(open(pyfile).read(), pyfile, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
return "SyntaxError",
return lispify_ast(node)
if __name__ == '__main__':
import pprint
pprint.pprint(tree(sys.argv[1]))
|
apache-2.0
|
JoeHsiao/bioformats
|
components/xsd-fu/python/generateDS/SWIG/testsuper.py
|
33
|
41337
|
#!/usr/bin/env python
import sys
import getopt
from xml.dom import minidom
from xml.dom import Node
#import yaml
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://www-hep.colorado.edu/~fperez/ipython.
#
#from IPython.Shell import IPythonShellEmbed
#IPShell = IPythonShellEmbed('-nosep',
# banner = 'Entering interpreter. Ctrl-D to exit.',
# exit_msg = 'Leaving Interpreter.')
# Use the following line where and when you want to drop into the
# IPython shell:
# IPShell(vars(), '<a msg>')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
#
# Data representation classes.
#
class top:
subclass = None
def __init__(self, attributelist=None, include=None, id='', addr=''):
self.attributelist = attributelist
if include is None:
self.include = []
else:
self.include = include
self.id = id
self.addr = addr
def factory(*args):
if top.subclass:
return apply(top.subclass, args)
else:
return apply(top, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getInclude(self): return self.include
def addInclude(self, include): self.include.append(include)
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<top id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
for include in self.include:
include.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</top>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'include':
obj = include.factory()
obj.build(child)
self.include.append(obj)
# end class top
class attributelist:
subclass = None
def __init__(self, attribute=None, kwargs=None, parmlist=None, id='', addr=''):
if attribute is None:
self.attribute = []
else:
self.attribute = attribute
if kwargs is None:
self.kwargs = []
else:
self.kwargs = kwargs
self.parmlist = parmlist
self.id = id
self.addr = addr
def factory(*args):
if attributelist.subclass:
return apply(attributelist.subclass, args)
else:
return apply(attributelist, args)
factory = staticmethod(factory)
def getAttribute(self): return self.attribute
def addAttribute(self, attribute): self.attribute.append(attribute)
def getKwargs(self): return self.kwargs
def addKwargs(self, kwargs): self.kwargs.append(kwargs)
def getParmlist(self): return self.parmlist
def setParmlist(self, parmlist): self.parmlist = parmlist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<attributelist id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
for attribute in self.attribute:
attribute.export(outfile, level)
for kwargs in self.kwargs:
kwargs.export(outfile, level)
if self.parmlist:
self.parmlist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</attributelist>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attribute':
obj = attribute.factory()
obj.build(child)
self.attribute.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'kwargs':
obj = kwargs.factory()
obj.build(child)
self.kwargs.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'parmlist':
obj = parmlist.factory()
obj.build(child)
self.setParmlist(obj)
# end class attributelist
class attribute:
subclass = None
def __init__(self, name='', value='', id='', addr=''):
self.name = name
self.value = value
self.id = id
self.addr = addr
def factory(*args):
if attribute.subclass:
return apply(attribute.subclass, args)
else:
return apply(attribute, args)
factory = staticmethod(factory)
def getName(self): return self.name
def setName(self, name): self.name = name
def getValue(self): return self.value
def setValue(self, value): self.value = value
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<attribute name="%s" value="%s" id="%s" addr="%s">\n' % (self.name, self.value, self.id, self.addr, ))
level += 1
level -= 1
showIndent(outfile, level)
outfile.write('</attribute>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('name'):
self.name = attrs.get('name').value
if attrs.get('value'):
self.value = attrs.get('value').value
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
pass
# end class attribute
class include:
subclass = None
def __init__(self, attributelist=None, include=None, typemap=None, insert=None, module=None, cdecl=None, klass=None, emport=None, enum=None, id='', addr=''):
self.attributelist = attributelist
if include is None:
self.include = []
else:
self.include = include
if typemap is None:
self.typemap = []
else:
self.typemap = typemap
if insert is None:
self.insert = []
else:
self.insert = insert
if module is None:
self.module = []
else:
self.module = module
if cdecl is None:
self.cdecl = []
else:
self.cdecl = cdecl
if klass is None:
self.klass = []
else:
self.klass = klass
if emport is None:
self.emport = []
else:
self.emport = emport
if enum is None:
self.enum = []
else:
self.enum = enum
self.id = id
self.addr = addr
def factory(*args):
if include.subclass:
return apply(include.subclass, args)
else:
return apply(include, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getInclude(self): return self.include
def addInclude(self, include): self.include.append(include)
def getTypemap(self): return self.typemap
def addTypemap(self, typemap): self.typemap.append(typemap)
def getInsert(self): return self.insert
def addInsert(self, insert): self.insert.append(insert)
def getModule(self): return self.module
def addModule(self, module): self.module.append(module)
def getCdecl(self): return self.cdecl
def addCdecl(self, cdecl): self.cdecl.append(cdecl)
def getClass(self): return self.klass
def addClass(self, klass): self.klass.append(klass)
def getImport(self): return self.emport
def addImport(self, emport): self.emport.append(emport)
def getEnum(self): return self.enum
def addEnum(self, enum): self.enum.append(enum)
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<include id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
for include in self.include:
include.export(outfile, level)
for typemap in self.typemap:
typemap.export(outfile, level)
for insert in self.insert:
insert.export(outfile, level)
for module in self.module:
module.export(outfile, level)
for cdecl in self.cdecl:
cdecl.export(outfile, level)
for klass in self.klass:
klass.export(outfile, level)
for emport in self.emport:
emport.export(outfile, level)
for enum in self.enum:
enum.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</include>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'include':
obj = include.factory()
obj.build(child)
self.include.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'typemap':
obj = typemap.factory()
obj.build(child)
self.typemap.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'insert':
obj = insert.factory()
obj.build(child)
self.insert.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'module':
obj = module.factory()
obj.build(child)
self.module.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'cdecl':
obj = cdecl.factory()
obj.build(child)
self.cdecl.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'class':
obj = klass.factory()
obj.build(child)
self.klass.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'import':
obj = emport.factory()
obj.build(child)
self.emport.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'enum':
obj = enum.factory()
obj.build(child)
self.enum.append(obj)
# end class include
class emport:
subclass = None
def __init__(self, attributelist=None, include=None, typemap=None, insert=None, module=None, cdecl=None, klass=None, emport=None, enum=None, id='', addr=''):
self.attributelist = attributelist
if include is None:
self.include = []
else:
self.include = include
if typemap is None:
self.typemap = []
else:
self.typemap = typemap
if insert is None:
self.insert = []
else:
self.insert = insert
if module is None:
self.module = []
else:
self.module = module
if cdecl is None:
self.cdecl = []
else:
self.cdecl = cdecl
if klass is None:
self.klass = []
else:
self.klass = klass
if emport is None:
self.emport = []
else:
self.emport = emport
if enum is None:
self.enum = []
else:
self.enum = enum
self.id = id
self.addr = addr
def factory(*args):
if emport.subclass:
return apply(emport.subclass, args)
else:
return apply(emport, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getInclude(self): return self.include
def addInclude(self, include): self.include.append(include)
def getTypemap(self): return self.typemap
def addTypemap(self, typemap): self.typemap.append(typemap)
def getInsert(self): return self.insert
def addInsert(self, insert): self.insert.append(insert)
def getModule(self): return self.module
def addModule(self, module): self.module.append(module)
def getCdecl(self): return self.cdecl
def addCdecl(self, cdecl): self.cdecl.append(cdecl)
def getClass(self): return self.klass
def addClass(self, klass): self.klass.append(klass)
def getImport(self): return self.emport
def addImport(self, emport): self.emport.append(emport)
def getEnum(self): return self.enum
def addEnum(self, enum): self.enum.append(enum)
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<import id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
for include in self.include:
include.export(outfile, level)
for typemap in self.typemap:
typemap.export(outfile, level)
for insert in self.insert:
insert.export(outfile, level)
for module in self.module:
module.export(outfile, level)
for cdecl in self.cdecl:
cdecl.export(outfile, level)
for klass in self.klass:
klass.export(outfile, level)
for emport in self.emport:
emport.export(outfile, level)
for enum in self.enum:
enum.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</import>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'include':
obj = include.factory()
obj.build(child)
self.include.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'typemap':
obj = typemap.factory()
obj.build(child)
self.typemap.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'insert':
obj = insert.factory()
obj.build(child)
self.insert.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'module':
obj = module.factory()
obj.build(child)
self.module.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'cdecl':
obj = cdecl.factory()
obj.build(child)
self.cdecl.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'class':
obj = klass.factory()
obj.build(child)
self.klass.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'import':
obj = emport.factory()
obj.build(child)
self.emport.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'enum':
obj = enum.factory()
obj.build(child)
self.enum.append(obj)
# end class emport
class enum:
subclass = None
def __init__(self, attributelist=None, enumitem=None, id='', addr=''):
self.attributelist = attributelist
if enumitem is None:
self.enumitem = []
else:
self.enumitem = enumitem
self.id = id
self.addr = addr
def factory(*args):
if enum.subclass:
return apply(enum.subclass, args)
else:
return apply(enum, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getEnumitem(self): return self.enumitem
def addEnumitem(self, enumitem): self.enumitem.append(enumitem)
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<enum id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
for enumitem in self.enumitem:
enumitem.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</enum>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'enumitem':
obj = enumitem.factory()
obj.build(child)
self.enumitem.append(obj)
# end class enum
class enumitem:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if enumitem.subclass:
return apply(enumitem.subclass, args)
else:
return apply(enumitem, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<enumitem id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</enumitem>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class enumitem
class kwargs:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if kwargs.subclass:
return apply(kwargs.subclass, args)
else:
return apply(kwargs, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<kwargs id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</kwargs>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class kwargs
class typemap:
subclass = None
def __init__(self, attributelist=None, typemapitem=None, id='', addr=''):
self.attributelist = attributelist
if typemapitem is None:
self.typemapitem = []
else:
self.typemapitem = typemapitem
self.id = id
self.addr = addr
def factory(*args):
if typemap.subclass:
return apply(typemap.subclass, args)
else:
return apply(typemap, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getTypemapitem(self): return self.typemapitem
def addTypemapitem(self, typemapitem): self.typemapitem.append(typemapitem)
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<typemap id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
for typemapitem in self.typemapitem:
typemapitem.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</typemap>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'typemapitem':
obj = typemapitem.factory()
obj.build(child)
self.typemapitem.append(obj)
# end class typemap
class typemapitem:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if typemapitem.subclass:
return apply(typemapitem.subclass, args)
else:
return apply(typemapitem, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<typemapitem id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</typemapitem>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class typemapitem
class klass:
subclass = None
def __init__(self, attributelist=None, cdecl=None, access=None, constructor=None, destructor=None, id='', addr=''):
self.attributelist = attributelist
if cdecl is None:
self.cdecl = []
else:
self.cdecl = cdecl
self.access = access
if constructor is None:
self.constructor = []
else:
self.constructor = constructor
self.destructor = destructor
self.id = id
self.addr = addr
def factory(*args):
if klass.subclass:
return apply(klass.subclass, args)
else:
return apply(klass, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getCdecl(self): return self.cdecl
def addCdecl(self, cdecl): self.cdecl.append(cdecl)
def getAccess(self): return self.access
def setAccess(self, access): self.access = access
def getConstructor(self): return self.constructor
def addConstructor(self, constructor): self.constructor.append(constructor)
def getDestructor(self): return self.destructor
def setDestructor(self, destructor): self.destructor = destructor
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<class id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
for cdecl in self.cdecl:
cdecl.export(outfile, level)
if self.access:
self.access.export(outfile, level)
for constructor in self.constructor:
constructor.export(outfile, level)
if self.destructor:
self.destructor.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</class>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'cdecl':
obj = cdecl.factory()
obj.build(child)
self.cdecl.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'access':
obj = access.factory()
obj.build(child)
self.setAccess(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'constructor':
obj = constructor.factory()
obj.build(child)
self.constructor.append(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'destructor':
obj = destructor.factory()
obj.build(child)
self.setDestructor(obj)
# end class klass
class cdecl:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if cdecl.subclass:
return apply(cdecl.subclass, args)
else:
return apply(cdecl, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<cdecl id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</cdecl>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class cdecl
class access:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if access.subclass:
return apply(access.subclass, args)
else:
return apply(access, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<access id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</access>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class access
class parmlist:
subclass = None
def __init__(self, parm=None, id='', addr=''):
if parm is None:
self.parm = []
else:
self.parm = parm
self.id = id
self.addr = addr
def factory(*args):
if parmlist.subclass:
return apply(parmlist.subclass, args)
else:
return apply(parmlist, args)
factory = staticmethod(factory)
def getParm(self): return self.parm
def addParm(self, parm): self.parm.append(parm)
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<parmlist id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
for parm in self.parm:
parm.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</parmlist>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'parm':
obj = parm.factory()
obj.build(child)
self.parm.append(obj)
# end class parmlist
class parm:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if parm.subclass:
return apply(parm.subclass, args)
else:
return apply(parm, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<parm id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</parm>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class parm
class constructor:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if constructor.subclass:
return apply(constructor.subclass, args)
else:
return apply(constructor, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<constructor id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</constructor>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class constructor
class destructor:
subclass = None
def __init__(self, attributelist=None, id='', addr=''):
self.attributelist = attributelist
self.id = id
self.addr = addr
def factory(*args):
if destructor.subclass:
return apply(destructor.subclass, args)
else:
return apply(destructor, args)
factory = staticmethod(factory)
def getAttributelist(self): return self.attributelist
def setAttributelist(self, attributelist): self.attributelist = attributelist
def getId(self): return self.id
def setId(self, id): self.id = id
def getAddr(self): return self.addr
def setAddr(self, addr): self.addr = addr
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<destructor id="%s" addr="%s">\n' % (self.id, self.addr, ))
level += 1
if self.attributelist:
self.attributelist.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</destructor>\n')
def build(self, node_):
attrs = node_.attributes
if attrs.get('id'):
self.id = attrs.get('id').value
if attrs.get('addr'):
self.addr = attrs.get('addr').value
for child in node_.childNodes:
if child.nodeType == Node.ELEMENT_NODE and \
child.nodeName == 'attributelist':
obj = attributelist.factory()
obj.build(child)
self.setAttributelist(obj)
# end class destructor
class module:
subclass = None
def __init__(self):
pass
def factory(*args):
if module.subclass:
return apply(module.subclass, args)
else:
return apply(module, args)
factory = staticmethod(factory)
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<module>\n')
level += 1
level -= 1
showIndent(outfile, level)
outfile.write('</module>\n')
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
pass
# end class module
class insert:
subclass = None
def __init__(self):
pass
def factory(*args):
if insert.subclass:
return apply(insert.subclass, args)
else:
return apply(insert, args)
factory = staticmethod(factory)
def export(self, outfile, level):
showIndent(outfile, level)
outfile.write('<insert>\n')
level += 1
level -= 1
showIndent(outfile, level)
outfile.write('</insert>\n')
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
pass
# end class insert
USAGE_TEXT = """
Usage: python <Parser>.py <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(-1)
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = top.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('?xml version="1.0" ?>')
rootObj.export(sys.stdout, 0)
#yamlObj = rootObj.exportYaml()
#yaml.dumpToFile(sys.stdout, yamlObj)
return rootObj
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
parse(args[0])
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
|
gpl-2.0
|
kylelwm/ponus
|
venv/build/django/django/contrib/auth/urls.py
|
104
|
1443
|
# The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^password_change/$', 'django.contrib.auth.views.password_change', name='password_change'),
url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', name='password_change_done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', name='password_reset_done'),
# Support old style base36 password reset links; remove in Django 1.7
url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm_uidb36'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'django.contrib.auth.views.password_reset_confirm',
name='password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', name='password_reset_complete'),
)
|
mit
|
Stanford-Online/edx-platform
|
openedx/core/djangoapps/certificates/tests/test_api.py
|
14
|
4403
|
from contextlib import contextmanager
from datetime import datetime, timedelta
import itertools
import ddt
import pytz
import waffle
from django.test import TestCase
from course_modes.models import CourseMode
from openedx.core.djangoapps.certificates import api
from openedx.core.djangoapps.certificates.config import waffle as certs_waffle
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
# TODO: Copied from lms.djangoapps.certificates.models,
# to be resolved per https://openedx.atlassian.net/browse/EDUCATOR-1318
class CertificateStatuses(object):
"""
Enum for certificate statuses
"""
deleted = 'deleted'
deleting = 'deleting'
downloadable = 'downloadable'
error = 'error'
generating = 'generating'
notpassing = 'notpassing'
restricted = 'restricted'
unavailable = 'unavailable'
auditing = 'auditing'
audit_passing = 'audit_passing'
audit_notpassing = 'audit_notpassing'
unverified = 'unverified'
invalidated = 'invalidated'
requesting = 'requesting'
ALL_STATUSES = (
deleted, deleting, downloadable, error, generating, notpassing, restricted, unavailable, auditing,
audit_passing, audit_notpassing, unverified, invalidated, requesting
)
class MockGeneratedCertificate(object):
"""
We can't import GeneratedCertificate from LMS here, so we roll
our own minimal Certificate model for testing.
"""
def __init__(self, user=None, course_id=None, mode=None, status=None):
self.user = user
self.course_id = course_id
self.mode = mode
self.status = status
def is_valid(self):
"""
Return True if certificate is valid else return False.
"""
return self.status == CertificateStatuses.downloadable
@contextmanager
def configure_waffle_namespace(feature_enabled):
namespace = certs_waffle.waffle()
with namespace.override(certs_waffle.AUTO_CERTIFICATE_GENERATION, active=feature_enabled):
yield
@ddt.ddt
class CertificatesApiTestCase(TestCase):
def setUp(self):
super(CertificatesApiTestCase, self).setUp()
self.course = CourseOverviewFactory.create(
start=datetime(2017, 1, 1, tzinfo=pytz.UTC),
end=datetime(2017, 1, 31, tzinfo=pytz.UTC),
certificate_available_date=None
)
self.user = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(
user=self.user,
course_id=self.course.id,
is_active=True,
mode='audit',
)
self.certificate = MockGeneratedCertificate(
user=self.user,
course_id=self.course.id
)
@ddt.data(True, False)
def test_auto_certificate_generation_enabled(self, feature_enabled):
with configure_waffle_namespace(feature_enabled):
self.assertEqual(feature_enabled, api.auto_certificate_generation_enabled())
@ddt.data(
(True, True, False), # feature enabled and self-paced should return False
(True, False, True), # feature enabled and instructor-paced should return True
(False, True, False), # feature not enabled and self-paced should return False
(False, False, False), # feature not enabled and instructor-paced should return False
)
@ddt.unpack
def test_can_show_certificate_available_date_field(
self, feature_enabled, is_self_paced, expected_value
):
self.course.self_paced = is_self_paced
with configure_waffle_namespace(feature_enabled):
self.assertEqual(expected_value, api.can_show_certificate_available_date_field(self.course))
@ddt.data(
(CourseMode.VERIFIED, CertificateStatuses.downloadable, True),
(CourseMode.VERIFIED, CertificateStatuses.notpassing, False),
(CourseMode.AUDIT, CertificateStatuses.downloadable, False)
)
@ddt.unpack
def test_is_certificate_valid(self, enrollment_mode, certificate_status, expected_value):
self.enrollment.mode = enrollment_mode
self.enrollment.save()
self.certificate.mode = CourseMode.VERIFIED
self.certificate.status = certificate_status
self.assertEqual(expected_value, api.is_certificate_valid(self.certificate))
|
agpl-3.0
|
klahnakoski/JsonSchemaToMarkdown
|
vendor/tuid/sql.py
|
3
|
2226
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from mo_logs import Log
from pyLibrary.sql.sqlite import quote_value, Sqlite
DEBUG = False
TRACE = True
class Sql:
def __init__(self, config):
self.db = Sqlite(config)
def execute(self, sql, params=None):
Log.error("Use a transaction")
def commit(self):
Log.error("Use a transaction")
def rollback(self):
Log.error("Use a transaction")
def get(self, sql, params=None):
if params:
for p in params:
sql = sql.replace('?', quote_value(p), 1)
return self.db.query(sql).data
def get_one(self, sql, params=None):
return self.get(sql, params)[0]
def transaction(self):
return Transaction(self.db.transaction())
@property
def pending_transactions(self):
"""
:return: NUMBER OF TRANSACTIONS IN THE QUEUE
"""
return len(self.db.available_transactions)
class Transaction():
def __init__(self, transaction):
self.transaction = transaction
def __enter__(self):
self.transaction.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.transaction.__exit__(exc_type, exc_val, exc_tb)
def execute(self, sql, params=None):
if params:
for p in params:
sql = sql.replace('?', quote_value(p), 1)
return self.transaction.execute(sql)
def get(self, sql, params=None):
if params:
for p in params:
sql = sql.replace('?', quote_value(p), 1)
return self.transaction.query(sql).data
def get_one(self, sql, params=None):
return self.get(sql, params)[0]
def query(self, query):
return self.transaction.query(query)
def commit(self):
Log.error("do not know how to handle")
def rollback(self):
Log.error("do not know how to handle")
|
mpl-2.0
|
AudreyFrancisco/AliPhysics
|
PWGJE/EMCALJetTasks/Tracks/analysis/old/CompareRawSpectraTriggers.py
|
42
|
6008
|
#! /usr/bin/env python
from Helper import ReadHistList, MakeRatio, HistNotFoundException
from Graphics import Frame, Style
from SpectrumContainer import DataContainer
from ROOT import TCanvas, TLegend, TPaveText
from ROOT import kBlack, kRed, kBlue, kGreen, kOrange
gPlot = None
class Plot:
"""
Class representation of the resulting plot
The plot has n rows and 2 columns, one row per cut and within a column for the spectrum and a column for the ratio
The first (upper) row will get a legend.
"""
def __init__(self):
self.__data = {}
self.__canvas = None
self.__legend = None
self.__labels = []
self.__axes = { "Spectrum" : Frame("tmplSpectrum", 0., 100., 1e-10, 100.), \
"Ratio" : Frame("tmplRatio", 0., 100., 0., 1000.)}
self.__axes["Spectrum"].SetXtitle("p_{t} (GeV/c)")
self.__axes["Ratio"].SetXtitle("p_{t} (GeV/c)")
self.__axes["Spectrum"].SetYtitle("1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")
self.__axes["Ratio"].SetYtitle("Ratio to Min. Bias")
self.__cutnames = {0 : "No cuts" , 1 : "Standard track cuts", 2 : "Hybrid track cuts"}
def SetData(self, data, cutid):
"""
Add data for a given cut id to the plot
"""
self.__data[cutid] = data
def GetCanvas(self):
"""
Return resulting canvas
"""
return self.__canvas
def SaveAs(self, filenamebase):
"""
Save plot as image file
"""
types = ["eps", "pdf", "jpeg", "gif", "png"]
for t in types:
self.__canvas.SaveAs("%s.%s" %(filenamebase, t))
def Create(self):
"""
Create the final canvas
"""
self.__canvas = TCanvas("plot", "Raw spectra comparison", 600, len(self.__data) * 300)
self.__canvas.Divide(2, len(self.__data))
row = 0
for cut in self.__data.keys():
self.__DrawCut(cut, row)
row = row + 1
return self.__canvas
def __DrawCut(self, cutID, row):
"""
Draw row with spectra comparison and ratios for a given cut combination
"""
spectrumpad = self.__canvas.cd(row * 2 + 1)
spectrumpad.SetGrid(False, False)
spectrumpad.SetLogx()
spectrumpad.SetLogy()
self.__axes["Spectrum"].Draw()
drawlegend = False
if not self.__legend:
self.__legend = TLegend(0.65, 0.55, 0.89, 0.89)
self.__legend.SetBorderSize(0)
self.__legend.SetFillStyle(0)
self.__legend.SetTextFont(42)
drawlegend = True
for trg in self.__data[cutID]["Spectra"].keys():
self.__data[cutID]["Spectra"][trg].Draw("epsame")
if drawlegend:
self.__legend.AddEntry(self.__data[cutID]["Spectra"][trg], trg, "lep")
if drawlegend:
self.__legend.Draw()
cutlab = TPaveText(0.15, 0.15, 0.55, 0.22, "NDC")
cutlab.SetBorderSize(0)
cutlab.SetFillStyle(0)
cutlab.SetTextFont(42)
cutlab.AddText(self.__cutnames[cutID])
cutlab.Draw()
self.__labels.append(cutlab)
ratiopad = self.__canvas.cd(row * 2 + 2)
ratiopad.SetGrid(False, False)
ratiopad.SetLogx()
self.__axes["Ratio"].Draw()
for trg in self.__data[cutID]["Ratios"].keys():
self.__data[cutID]["Ratios"][trg].Draw("epsame")
self.__canvas.cd()
def ReadSpectra(filename, triggers):
"""
Read the spectra for different trigger classes from the root file
Returns a dictionary of triggers - spectrum container
"""
hlist = ReadHistList(filename, "PtEMCalTriggerTask")
result = {}
for trg in triggers:
result[trg] = DataContainer(eventHist = hlist.FindObject("hEventHist%s" %(trg)), trackHist = hlist.FindObject("hTrackHist%s" %(trg)))
return result
def MakeSpectraCut(inputdata, cutid):
"""
Create for all trigger classes rawspectra for a given cut id and for events within +-10 cm in z-Vertex with
pileup rejection on and the ratios to min. bias events
"""
styles = {"MinBias" : Style(kBlack, 20), "EMCJHigh" : Style(kRed, 24), "EMCJLow" : Style(kOrange, 26), "EMCGHigh" : Style(kBlue, 25), "EMCGLow" : Style(kGreen, 27)}
rawspectra = {}
for trg in inputdata.keys():
inputdata[trg].SetVertexRange(-10., 10.)
inputdata[trg].SetPileupRejection(True)
inputdata[trg].SelectTrackCuts(cutid)
rawspectra[trg] = inputdata[trg].MakeProjection(0, "ptSpectrum%s", "p_{t} (GeV/c)", "1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")
rawspectra[trg].SetMarkerColor(styles[trg].GetColor())
rawspectra[trg].SetLineColor(styles[trg].GetColor())
rawspectra[trg].SetMarkerStyle(styles[trg].GetMarker())
inputdata[trg].Reset()
ratios = {}
for trg in rawspectra.keys():
if trg == "MinBias":
continue
ratios[trg] = MakeRatio(rawspectra[trg], rawspectra["MinBias"])
ratios[trg].SetMarkerColor(styles[trg].GetColor())
ratios[trg].SetLineColor(styles[trg].GetColor())
ratios[trg].SetMarkerStyle(styles[trg].GetMarker())
result = {"Spectra" : rawspectra, "Ratios" : ratios}
return result
def MakeRawSpectraComparisonPlot(filename, doSave = False):
"""
Create the final comparison plot
"""
triggers = ["MinBias", "EMCJHigh", "EMCJLow", "EMCGHigh", "EMCGLow"]
data = ReadSpectra(filename, triggers)
plot = Plot()
for cut in range(0, 3):
plot.SetData(MakeSpectraCut(data, cut), cut)
plot.Create()
gPlot = plot
if doSave:
plot.SaveAs("TriggercomparisonCuts")
return plot
def main():
"""
Main function: Delegate drawing
"""
inputfile = sys.argv[1]
MakeSpectraComparisonPlot(inputfile, True)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
carrerasrodrigo/django-post_office
|
post_office/south_migrations/0010_auto__add_field_log_exception_type.py
|
10
|
4547
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Log.exception_type'
db.add_column(u'post_office_log', 'exception_type',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Log.exception_type'
db.delete_column(u'post_office_log', 'exception_type')
models = {
u'post_office.attachment': {
'Meta': {'object_name': 'Attachment'},
'emails': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attachments'", 'symmetrical': 'False', 'to': u"orm['post_office.Email']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'post_office.email': {
'Meta': {'object_name': 'Email'},
'bcc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'context': ('jsonfield.fields.JSONField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'from_email': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'headers': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'html_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'scheduled_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['post_office.EmailTemplate']", 'null': 'True', 'blank': 'True'}),
'to': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'post_office.emailtemplate': {
'Meta': {'object_name': 'EmailTemplate'},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'html_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'post_office.log': {
'Meta': {'object_name': 'Log'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['post_office.Email']"}),
'exception_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
}
}
complete_apps = ['post_office']
|
mit
|
PulsePod/old-www-do-not-use
|
lib/python2.7/site-packages/flask/testsuite/ext.py
|
563
|
5156
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import unittest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask.testsuite import FlaskTestCase
from flask._compat import PY2
class ExtImportHookTestCase(FlaskTestCase):
def setup(self):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext') and value is not None:
sys.modules.pop(entry, None)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
self.assert_equal(import_hooks, 1)
def teardown(self):
from flask import ext
for key in ext.__dict__:
self.assert_not_in('.', key)
def test_flaskext_new_simple_import_normal(self):
from flask.ext.newext_simple import ext_id
self.assert_equal(ext_id, 'newext_simple')
def test_flaskext_new_simple_import_module(self):
from flask.ext import newext_simple
self.assert_equal(newext_simple.ext_id, 'newext_simple')
self.assert_equal(newext_simple.__name__, 'flask_newext_simple')
def test_flaskext_new_package_import_normal(self):
from flask.ext.newext_package import ext_id
self.assert_equal(ext_id, 'newext_package')
def test_flaskext_new_package_import_module(self):
from flask.ext import newext_package
self.assert_equal(newext_package.ext_id, 'newext_package')
self.assert_equal(newext_package.__name__, 'flask_newext_package')
def test_flaskext_new_package_import_submodule_function(self):
from flask.ext.newext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_new_package_import_submodule(self):
from flask.ext.newext_package import submodule
self.assert_equal(submodule.__name__, 'flask_newext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_simple_import_normal(self):
from flask.ext.oldext_simple import ext_id
self.assert_equal(ext_id, 'oldext_simple')
def test_flaskext_old_simple_import_module(self):
from flask.ext import oldext_simple
self.assert_equal(oldext_simple.ext_id, 'oldext_simple')
self.assert_equal(oldext_simple.__name__, 'flaskext.oldext_simple')
def test_flaskext_old_package_import_normal(self):
from flask.ext.oldext_package import ext_id
self.assert_equal(ext_id, 'oldext_package')
def test_flaskext_old_package_import_module(self):
from flask.ext import oldext_package
self.assert_equal(oldext_package.ext_id, 'oldext_package')
self.assert_equal(oldext_package.__name__, 'flaskext.oldext_package')
def test_flaskext_old_package_import_submodule(self):
from flask.ext.oldext_package import submodule
self.assert_equal(submodule.__name__, 'flaskext.oldext_package.submodule')
self.assert_equal(submodule.test_function(), 42)
def test_flaskext_old_package_import_submodule_function(self):
from flask.ext.oldext_package.submodule import test_function
self.assert_equal(test_function(), 42)
def test_flaskext_broken_package_no_module_caching(self):
for x in range(2):
with self.assert_raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(self):
try:
import flask.ext.broken
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
self.assert_true(exc_type is ImportError)
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
self.assert_equal(str(exc_value), message)
self.assert_true(tb.tb_frame.f_globals is globals())
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = tb.tb_next.tb_next
if not PY2:
next = next.tb_next
self.assert_in('flask_broken/__init__.py', next.tb_frame.f_code.co_filename)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExtImportHookTestCase))
return suite
|
mit
|
micropython-IMU/micropython-fusion
|
remote/capture.py
|
1
|
2477
|
# capture.py Data capture for remote operation. Uses LCD display and uasyncio.
# Author: Peter Hinch
# Released under the MIT License (MIT)
# Copyright (c) 2018 Peter Hinch
# Requires:
# uasyncio (official or modified version)
# MPU9150 on X position
# Normally open pushbutton connected between pin Y7 and ground
# LCD driver alcd.py from https://github.com/peterhinch/micropython-async.git
# Hitachi HD44780 2 row LCD display wired using 4 bit data bus as follows:
# Name LCD connector Board
# Rs 4 1 red Y1
# E 6 2 Y2
# D7 14 3 Y3
# D6 13 4 Y4
# D5 12 5 Y5
# D4 11 6 Y6
from machine import Pin
import uasyncio as asyncio
import ujson
import utime as time
import gc
from mpu9150 import MPU9150
from fusion_async import Fusion # Using async version
from alcd import LCD, PINLIST # Library supporting Hitachi LCD module
switch = Pin('Y7', Pin.IN, pull=Pin.PULL_UP) # Switch to ground on Y7
imu = MPU9150('X') # Attached to 'X' bus, 1 device, disable interruots
lcd = LCD(PINLIST, cols = 24) # Should work with 16 column LCD
f = open('/sd/mpudata', 'w')
async def read_coro():
imu.mag_trigger()
await asyncio.sleep_ms(20) # Plenty of time for mag to be ready
f.write(ujson.dumps([imu.accel.xyz, imu.gyro.xyz, imu.mag_nonblocking.xyz, time.ticks_us()]))
f.write('\n')
return imu.accel.xyz, imu.gyro.xyz, imu.mag_nonblocking.xyz
fuse = Fusion(read_coro)
async def mem_manage(): # Necessary for long term stability
while True:
await asyncio.sleep_ms(100)
gc.collect()
gc.threshold(gc.mem_free() // 4 + gc.mem_alloc())
async def display():
lcd[0] = "{:5s}{:5s} {:5s}".format("Yaw","Pitch","Roll")
while not switch.value():
lcd[1] = "{:4.0f} {:4.0f} {:4.0f}".format(fuse.heading, fuse.pitch, fuse.roll)
await asyncio.sleep_ms(500)
f.close()
return
async def lcd_task():
print('Running test...')
if switch.value() == 1:
lcd[0] = "Calibrate. Push switch"
lcd[1] = "when done"
await asyncio.sleep_ms(100) # Let LCD coro run
await fuse.calibrate(lambda : not switch.value())
f.write('cal_end\n')
print(fuse.magbias)
print('Turn switch off to close the file and terminate.')
await fuse.start() # Start the update task
await display()
loop = asyncio.get_event_loop()
loop.create_task(mem_manage())
loop.run_until_complete(lcd_task())
|
mit
|
petrjasek/superdesk-core
|
superdesk/data_updates/00006_20171124-195408_content_types.py
|
2
|
2154
|
# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Author : mugur
# Creation: 2017-11-24 19:54
from copy import deepcopy
from superdesk.commands.data_updates import BaseDataUpdate
from eve.utils import config
class DataUpdate(BaseDataUpdate):
replace_values_forward = {
"picture": "media",
"unorderedlist": "unordered list",
"orderedlist": "ordered list",
"anchor": "link",
"removeFormat": None,
}
replace_values_backward = {
"media": "picture",
"unordered list": "unorderedlist",
"ordered list": "orderedlist",
"link": "anchor",
}
resource = "content_types"
def forwards(self, mongodb_collection, mongodb_database):
self._process_content_type(mongodb_collection, self.replace_values_forward)
def backwards(self, mongodb_collection, mongodb_database):
self._process_content_type(self.replace_values_backward)
def _process_content_type(self, mongodb_collection, replace_values):
for content_type in mongodb_collection.find({}):
if "editor" not in content_type:
continue
original_editor = deepcopy(content_type["editor"])
for field, description in content_type["editor"].items():
if description and description.get("formatOptions"):
for original, new in replace_values.items():
if original in description["formatOptions"]:
description["formatOptions"].remove(original)
if new:
description["formatOptions"].append(new)
if original_editor != content_type["editor"]:
print("update editor in content type", content_type["label"])
mongodb_collection.update(
{"_id": content_type.get(config.ID_FIELD)}, {"$set": {"editor": content_type["editor"]}}
)
|
agpl-3.0
|
heli522/scikit-learn
|
benchmarks/bench_random_projections.py
|
397
|
8900
|
"""
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
|
bsd-3-clause
|
otbrown/OpenBLAS
|
benchmark/scripts/NUMPY/cgemm.py
|
15
|
1024
|
#!/usr/bin/python
import os
import sys
import time
import numpy
from numpy.random import randn
def run_cgemm(N,l):
A = randn(N,N).astype('float32') + randn(N,N).astype('float32') * 1j;
B = randn(N,N).astype('float32') + randn(N,N).astype('float32') * 1j;
start = time.time();
for i in range(0,l):
ref = numpy.dot(A,B)
end = time.time()
timediff = (end -start)
mflops = ( 8*N*N*N) *l / timediff
mflops *= 1e-6
size = "%dx%d" % (N,N)
print("%14s :\t%20f MFlops\t%20f sec" % (size,mflops,timediff))
if __name__ == "__main__":
N=128
NMAX=2048
NINC=128
LOOPS=1
z=0
for arg in sys.argv:
if z == 1:
N = int(arg)
elif z == 2:
NMAX = int(arg)
elif z == 3:
NINC = int(arg)
elif z == 4:
LOOPS = int(arg)
z = z + 1
if 'OPENBLAS_LOOPS' in os.environ:
p = os.environ['OPENBLAS_LOOPS']
if p:
LOOPS = int(p);
print("From: %d To: %d Step=%d Loops=%d" % (N, NMAX, NINC, LOOPS))
print("\tSIZE\t\t\tFlops\t\t\t\t\tTime")
for i in range (N,NMAX+NINC,NINC):
run_cgemm(i,LOOPS)
|
bsd-3-clause
|
russellhaering/hyde
|
hydeengine/site_post_processors.py
|
2
|
8494
|
from __future__ import with_statement
import os
import string
from django.conf import settings
from django.template.loader import render_to_string
from file_system import File
from datetime import datetime
from hydeengine.templatetags.hydetags import xmldatetime
import commands
import codecs
class FolderFlattener:
@staticmethod
def process(folder, params):
class Flattener:
def __init__(self, folder, params):
self.folder = folder
self.remove_processed_folders = \
params["remove_processed_folders"]
self.previous_folder = None
def visit_file(self, file):
if not self.folder.is_parent_of(file):
file.copy_to(self.folder)
def visit_folder(self, this_folder):
if self.previous_folder and self.remove_processed_folders:
self.previous_folder.delete()
if not self.folder.same_as(this_folder):
self.previous_folder = this_folder
def visit_complete(self):
if self.previous_folder and self.remove_processed_folders:
self.previous_folder.delete()
folder.walk(Flattener(folder, params), params["pattern"])
SITEMAP_CONFIG = \
"""<?xml version="1.0" encoding="UTF-8"?>
<site
base_url="%(base_url)s"
store_into="%(sitemap_path)s"
suppress_search_engine_notify="1"
verbose="1"
>
<urllist path="%(url_list_file)s"/>
</site>"""
class GoogleSitemapGenerator:
@staticmethod
def process(folder, params):
site = settings.CONTEXT['site']
sitemap_path = params["sitemap_file"]
url_list_file = File(sitemap_path).parent.child("urllist.txt")
config_file = File(sitemap_path).parent.child("sitemap_config.xml")
urllist = open(url_list_file, 'w')
for page in site.walk_pages():
if not page.display_in_list and not page.listing:
continue
created = xmldatetime(page.created)
updated = xmldatetime(page.updated)
url = page.full_url
priority = 0.5
if page.listing:
priority = 1.0
changefreq = "weekly"
urllist.write(
"%(url)s lastmod=%(updated)s changefreq=%(changefreq)s \
priority=%(priority).1f\n"
% locals())
urllist.close()
base_url = settings.SITE_WWW_URL
config = open(config_file, 'w')
config.write(SITEMAP_CONFIG % locals())
config.close()
generator = params["generator"]
command = u"python %s --config=%s" % (generator, config_file)
status, output = commands.getstatusoutput(command)
if status > 0:
print output
File(config_file).delete()
File(url_list_file).delete()
class RssGenerator:
"""
Can create a rss feed for a blog and its categories whenever
specified in params
"""
@staticmethod
def process(folder, params):
#defaults initialisation
site = settings.CONTEXT['site']
node = params['node']
by_categories = False
categories = None
output_folder = 'feed'
generator = Rss2FeedGenerator()
if params.has_key('output_folder'):
output_folder = params['output_folder']
if params.has_key('generate_by_categories'):
by_categories = params['generate_by_categories']
if hasattr(node, 'categories'):
categories = node.categories
if categories != None:
#feed generation for each category
for category in categories.keys():
#create a ContentNode adapter for categories to walk through the collection (walk_pages function)
#the same way than through the site's ContentNode
category_adapter = ContentNodeAdapter(categories[category])
feed = generator.generate(category_adapter)
feed_filename = "%s.xml" % (category.lower().replace(' ','_'))
feed_url = "%s/%s/%s/%s" % (settings.SITE_WWW_URL, site.url, output_folder, feed_filename)
node.categories[category].feed_url = feed_url
RssGenerator._write_feed(feed, output_folder, feed_filename)
feed = generator.generate(node)
node.feed_url = "%s/%s/%s/%s" % (settings.SITE_WWW_URL, site.url, output_folder, "feed.xml")
RssGenerator._write_feed(feed, output_folder, "feed.xml")
@staticmethod
def _write_feed(feed, folder, file_name):
output = os.path.join(settings.CONTENT_DIR, folder)
if not os.path.isdir(output):
os.makedirs(output)
filename = os.path.join(output, file_name)
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(feed)
class ContentNodeAdapter:
"""
Adapter for a collection of posts to fulfill the ContentNode
walk_pages contract
"""
def __init__(self, category):
self.category = category
def walk_pages(self):
for post in self.category.posts:
yield post
class FeedGenerator:
"""
Base abstract class for the generation of syndication feeds
"""
def __init__(self):
pass
def generate(self, node):
"""
Template method calling child implementations
"""
#generate items
items = self.generate_items(node)
#generate feed with items inside
feed = self.generate_feed(items)
return feed
def generate_items(self, node):
raise TypeError('abstract function')
def generate_feed(self, items):
raise TypeError('abstract function')
RSS2_FEED = \
"""
<?xml version="1.0"?>
<rss version="2.0">
<channel>
<title>%(title)s</title>
<link>%(url)s/</link>
<description>%(description)s</description>
<language>%(language)s</language>
<pubDate>%(publication_date)s</pubDate>
<lastBuildDate>%(last_build_date)s</lastBuildDate>
<docs>http://blogs.law.harvard.edu/tech/rss</docs>
<generator>Hyde</generator>
<webMaster>%(webmaster)s</webMaster>
%(items)s
</channel>
</rss>
"""
RSS2_ITEMS = \
"""
<item>
<title>%(item_title)s</title>
<link>%(item_link)s</link>
<description>%(description)s</description>
<pubDate>%(publication_date)s</pubDate>
<author>%(author)s</author>
</item>"""
class Rss2FeedGenerator(FeedGenerator):
"""
Implementation of a rss version 2 generator
"""
def __init__(self):
FeedGenerator.__init__(self)
def generate_items(self, node):
items = ""
author = settings.SITE_AUTHOR_EMAIL or [''][0]
for post in node.walk_pages():
if hasattr(post, 'listing') and post.listing:
continue
item_title = post.title
item_link = post.full_url
description = ''
publication_date = post.created
#TODO let customisation of RSS2_ITEMS
cur_item = RSS2_ITEMS % locals()
items = "%s%s" % (items, cur_item)
return items
def generate_feed(self, items):
title = settings.SITE_NAME
url = settings.SITE_WWW_URL
description = ''
language = settings.LANGUAGE_CODE or 'en-us'
publication_date = ""
last_build_date = ""
webmaster = settings.SITE_AUTHOR_EMAIL
return RSS2_FEED % locals()
class CategoriesArchiveGenerator:
@staticmethod
def process(folder, params):
node = params['node']
if hasattr(node, 'categories'):
categories = node.categories
else:
raise ValueError("No categories member on node %s" % (node))
output_folder = 'archives'
if hasattr(params, 'output_folder') and params.output_folder is not None \
and len(params.output_folder) > 0:
output_folder = params.output_folder
output_folder = os.path.join(output.CONTENT_DIR, output_folder)
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
for category, posts in categories.iteritems():
#let customization provide a template in config accessing
#possible variables (post info, category info)
pass
|
mit
|
undoware/neutron-drive
|
google_appengine/lib/grizzled/grizzled/net/__init__.py
|
19
|
3602
|
# $Id: f8ce5bf718c826df5fb3cd06701dc2bf6e144acb $
"""
Network-related methods and classes.
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext en'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import urlparse
import shutil
import tempfile
import urllib2
import logging
import os
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['download']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
log = logging.getLogger('grizzled.net')
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def download(url, directory=None, bufsize=8192):
"""
Download the specified URL to a directory. This module properly handles
HTTP authentication for URLs like this one::
https://user:password@localhost:8080/foo/bar/baz.tgz
Note, however, that user/password authentication is only supported for
"http" and "https" URLs.
:Parameters:
url : str
the URL to download
directory : str
The directory to receive the downloaded file. If this parameter is
omitted, ``download()`` will create a temporary directory to
contain the file.
bufsize : int
buffer size to use when reading URL
:rtype: tuple
:return: A (*download_directory*, *downloaded_file*) tuple
"""
pieces = urlparse.urlparse(url)
path = pieces.path
if not directory:
directory = tempfile.mkdtemp(prefix='download')
outputPath = os.path.join(directory, os.path.basename(path))
# Handle user/password explicitly.
if pieces.scheme.startswith('http') and pieces.username:
# Initialize basic HTTP authentication for this URL.
# See http://aspn.activestate.com/ASPN/docs/ActivePython/2.5/howto/urllib2/index.html
#
# NOTE: This is necessary because urllib doesn't handle URLs like
# http://user:password@host:port/...
# Get the user name and password from the URL.
user, password = pieces.username, pieces.password
netloc = pieces.hostname
if pieces.port:
pieces.hostname += ':%d' % pieces.port
newPieces = (pieces.scheme, netloc, pieces.path, pieces.query,
pieces.params, pieces.fragment)
url = urlparse.urlunparse(newPieces)
log.debug('Installing authorization handler for URL %s' % url)
passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
passwordMgr.add_password(realm=None,
uri=url,
user=user,
passwd=password)
authHandler = urllib2.HTTPBasicAuthHandler(passwordMgr)
opener = urllib2.build_opener(authHandler)
opener.open(url)
urllib2.install_opener(opener)
log.debug('Downloading "%s" to "%s"' % (url, outputPath))
shutil.copyfileobj(urllib2.urlopen(url), open(outputPath, 'wb'), bufsize)
return (outputPath, directory)
|
bsd-3-clause
|
jolevq/odoopub
|
addons/account/wizard/account_move_bank_reconcile.py
|
385
|
2684
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_move_bank_reconcile(osv.osv_memory):
"""
Bank Reconciliation
"""
_name = "account.move.bank.reconcile"
_description = "Move bank reconcile"
_columns = {
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account move bank reconcile’s ID or list of IDs
@return: dictionary of Open account move line on given journal_id.
"""
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
cr.execute('select default_credit_account_id \
from account_journal where id=%s', (data['journal_id'],))
account_id = cr.fetchone()[0]
if not account_id:
raise osv.except_osv(_('Error!'), _('You have to define \
the bank account\nin the journal definition for reconciliation.'))
return {
'domain': "[('journal_id','=',%d), ('account_id','=',%d), ('state','<>','draft')]" % (data['journal_id'], account_id),
'name': _('Standard Encoding'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'context': "{'journal_id': %d}" % (data['journal_id'],),
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
sharoonthomas/trytond-pos
|
sale.py
|
1
|
22737
|
# -*- coding: utf-8 -*-
"""
sale.py
"""
from datetime import datetime, timedelta
from sql import Literal
from trytond.model import fields
from trytond.pool import Pool, PoolMeta
from trytond.transaction import Transaction
from trytond.rpc import RPC
from trytond.model import ModelView
from trytond.pyson import Eval, Bool, And
from trytond import backend
from math import floor
from decimal import Decimal
__metaclass__ = PoolMeta
__all__ = ["Sale", "SaleChannel", "SaleLine"]
class SaleConfiguration:
'Sale Configuration'
__name__ = 'sale.configuration'
round_down_account = fields.Property(
fields.Many2One('account.account', 'Round Down Account', required=True)
)
class SaleChannel:
__name__ = 'sale.channel'
anonymous_customer = fields.Many2One(
'party.party', "Anonymous Customer", states={
'required': Eval('source') == 'pos',
'invisible': Eval('source') != 'pos',
}
)
# The warehouse from which backorders will be shipped.
#
# TODO: Default to channel's warehouse.
backorder_warehouse = fields.Many2One(
'stock.location', "Warehouse (Backorder)",
domain=[('type', '=', 'warehouse')], states={
'required': Eval('source') == 'pos',
'invisible': Eval('source') != 'pos',
}
)
delivery_mode = fields.Selection([
('pick_up', 'Pick Up'),
('ship', 'Ship'),
], 'Delivery Mode', required=True)
@classmethod
def get_source(cls):
"""
Override the get_source method to add 'pos' as a source in channel
"""
sources = super(SaleChannel, cls).get_source()
sources.append(('pos', 'POS'))
return sources
@staticmethod
def default_delivery_mode():
return 'ship'
@classmethod
def __register__(cls, module_name):
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
table = TableHandler(cursor, cls, module_name)
# Remove not null constraint from anonymous_customer
table.not_null_action('anonymous_customer', action='remove')
# Remove not null constraint from ship_to_warehouse
table.not_null_action('ship_to_warehouse', action='remove')
# Rename ship_to_warehouse to backorder_warehouse
table.column_rename('ship_to_warehouse', 'backorder_warehouse')
super(SaleChannel, cls).__register__(module_name)
class Sale:
__name__ = "sale.sale"
@staticmethod
def default_party():
User = Pool().get('res.user')
user = User(Transaction().user)
if (
'use_anonymous_customer' not in Transaction().context
): # pragma: no cover
return
if user.current_channel and user.current_channel.anonymous_customer:
return user.current_channel.anonymous_customer.id
@classmethod
def __setup__(cls):
super(Sale, cls).__setup__()
cls.__rpc__.update({
'pos_add_product': RPC(instantiate=0, readonly=False),
'pos_serialize': RPC(instantiate=0, readonly=True),
'get_recent_sales': RPC(readonly=True),
})
cls.lines.context = {
'current_channel': Eval('channel'),
}
cls._buttons.update({
'round_down_total': {
'invisible': ~Eval('state').in_(['draft', 'quotation']),
},
})
@classmethod
@ModelView.button
def round_down_total(cls, records):
'''
Round down total order price and add remaining amount as new sale line
'''
SaleLine = Pool().get('sale.line')
sale_lines = []
for record in records:
# Check if there's already a roundoff line, remove and create new
# if there is.
round_off_line = SaleLine.search([
('sale', '=', record.id),
('is_round_off', '=', True),
])
if round_off_line:
SaleLine.delete(round_off_line)
floored_total = floor(record.total_amount)
amount_diff = record.total_amount - Decimal(floored_total)
sale_lines.append({
'sale': record,
'is_round_off': True,
'type': 'line',
'quantity': -1,
'unit_price': amount_diff,
'description': 'Round Off'
})
SaleLine.create(
[line for line in sale_lines if line['unit_price']]
)
@classmethod
def get_recent_sales(cls):
"""
Return sales of current channel, which were made within last 5 days
and are in draft state. Sort by write_date or create_date of Sale and
sale lines.
"""
SaleLine = Pool().get('sale.line')
context = Transaction().context
date = (
datetime.now() - timedelta(days=5)
).strftime('%Y-%m-%d %H:%M:%S')
current_channel = context['current_channel']
SaleTable = cls.__table__()
SaleLineTable = SaleLine.__table__()
cursor = Transaction().cursor
query = SaleTable.join(
SaleLineTable,
condition=(SaleTable.id == SaleLineTable.sale)
).select(
SaleTable.id,
where=(
(SaleTable.channel == Literal(current_channel)) &
(SaleTable.state.in_([
'draft', 'quotation', 'confirmed', 'processing'
])) &
(
(SaleTable.write_date >= Literal(date)) |
(SaleTable.create_date >= Literal(date))
)
),
order_by=(
SaleLineTable.write_date.desc,
SaleLineTable.create_date.desc,
SaleTable.write_date.desc,
SaleTable.create_date.desc
)
)
cursor.execute(*query)
ids = [x[0] for x in cursor.fetchall()]
return [cls(id).serialize('recent_sales') for id in ids]
def pos_find_sale_line_domain(self):
"""
Return domain to find existing sale line for given product.
"""
domain = [
('sale', '=', self.id),
]
context = Transaction().context
if 'product' in context:
domain.append(('product', '=', context['product']))
if 'delivery_mode' in context:
domain.append(('delivery_mode', '=', context['delivery_mode']))
return domain
def pos_add_product(self, product_ids, quantity, unit_price=None):
"""
Add product to sale from POS.
This method is for POS, to add multiple products to cart in single call
"""
AccountTax = Pool().get('account.tax')
SaleLine = Pool().get('sale.line')
updated_lines = []
for product_id in product_ids:
Transaction().set_context(product=product_id)
try:
if 'sale_line' in Transaction().context:
sale_line = SaleLine(Transaction().context.get('sale_line'))
else:
sale_line, = SaleLine.search(
self.pos_find_sale_line_domain()
)
except ValueError:
sale_line = None
delivery_mode = Transaction().context.get(
'delivery_mode', 'pick_up'
)
if sale_line:
values = {
'product': sale_line.product.id,
'_parent_sale.currency': self.currency.id,
'_parent_sale.party': self.party.id,
'_parent_sale.price_list': (
self.price_list.id if self.price_list else None
),
'_parent_sale.sale_date': self.sale_date,
'_parent_sale.channel': self.channel,
'_parent_sale.shipment_address': self.shipment_address,
'warehouse': self.warehouse,
'_parent_sale.warehouse': self.warehouse,
'unit': sale_line.unit.id,
'quantity': quantity,
'type': 'line',
'delivery_mode': delivery_mode,
}
# Update the values by triggering an onchange which should
# fill missing vals
values.update(SaleLine(**values).on_change_quantity())
values.update(SaleLine(**values).on_change_delivery_mode())
values['unit_price'] = Decimal(unit_price) if unit_price else\
sale_line.unit_price
new_values = {}
for key, value in values.iteritems():
if '.' in key:
continue
if key == 'taxes':
# Difficult to reach here unless taxes change when
# quantities change.
continue # pragma: no cover
new_values[key] = value
SaleLine.write([sale_line], new_values)
else:
values = {
'product': product_id,
'_parent_sale.currency': self.currency.id,
'_parent_sale.party': self.party.id,
'_parent_sale.price_list': (
self.price_list.id if self.price_list else None
),
'_parent_sale.sale_date': self.sale_date,
'_parent_sale.channel': self.channel,
'_parent_sale.shipment_address': self.shipment_address,
'warehouse': self.warehouse,
'_parent_sale.warehouse': self.warehouse,
'sale': self.id,
'type': 'line',
'quantity': quantity,
'unit': None,
'description': None,
'delivery_mode': delivery_mode,
}
values.update(SaleLine(**values).on_change_product())
values.update(SaleLine(**values).on_change_quantity())
values.update(SaleLine(**values).on_change_delivery_mode())
new_values = {}
for key, value in values.iteritems():
if '.' in key:
continue
if key == 'taxes':
continue
new_values[key] = value
sale_line = SaleLine.create([new_values])[0]
updated_lines.append(sale_line.id)
if 'taxes' in values:
sale_line.taxes = AccountTax.browse(values['taxes'])
sale_line.save()
# Now that the sale line is built, return a serializable response
# which ensures that the client does not have to call again.
res = {
'sale': self.serialize('pos'),
'updated_lines': updated_lines,
}
return res
def pos_serialize(self):
"""
Serialize sale for pos
"""
return self.serialize('pos')
def serialize(self, purpose=None):
"""
Serialize with information needed for POS
"""
if purpose == 'pos':
invoice_address = self.invoice_address or \
self.party.address_get('invoice')
shipment_address = self.shipment_address or \
self.party.address_get('delivery')
return {
'party': self.party.id,
'total_amount': self.total_amount,
'untaxed_amount': self.untaxed_amount,
'tax_amount': self.tax_amount,
'comment': self.comment,
'state': self.state,
'invoice_address': invoice_address and
invoice_address.serialize(purpose),
'shipment_address': shipment_address and
shipment_address.serialize(purpose),
'lines': [line.serialize(purpose) for line in self.lines],
'reference': self.reference,
}
elif purpose == 'recent_sales':
return {
'id': self.id,
'party': {
'id': self.party.id,
'name': self.party.name,
},
'total_amount': self.total_amount,
'create_date': self.create_date,
'state': self.state,
'reference': self.reference,
}
elif hasattr(super(Sale, self), 'serialize'):
return super(SaleLine, self).serialize(purpose) # pragma: no cover
def _group_shipment_key(self, moves, move):
"""
This method returns a key based on which Tryton creates shipments
for a given sale order. By default Tryton uses the planned_date for the
delivery and warehouse to separate shipments.
We use the same functionality to split the shipments for items being
picked up and delivered. This is later used to auto proceed and finish
the shipping of the picked up products.
:param moves: A list of all moves
:param move: move is a tuple of line id and a move
"""
SaleLine = Pool().get('sale.line')
line = SaleLine(move[0])
rv = super(Sale, self)._group_shipment_key(moves, move)
return rv + (('delivery_mode', line.delivery_mode),)
_group_return_key = _group_shipment_key
def create_shipment(self, shipment_type):
"""
This method creates the shipments for the given sale order.
This implementation inspects the order lines to look for lines which
are expected to be picked up instantly and the shipment created for
pick_up is automatically processed all the way through.
"""
pool = Pool()
shipments = super(Sale, self).create_shipment(shipment_type)
if self.shipment_method == 'manual':
# shipments will be None but for future return the value
# returned by the super function
return shipments
if not shipments:
return shipments
picked_up_shipments = filter(
lambda s: s.delivery_mode == 'pick_up', shipments
)
if shipment_type == 'out':
Shipment = pool.get('stock.shipment.out')
with Transaction().set_user(0, set_context=True):
# If we are going to "process" a shipment, it is
# equivalent to sale being processed.
#
# Doing this here helps in an edge case where the
# sale total is 0. When a shipment is "Done" it
# tries to recprocess the order state, but
# usually this happens after sale is in
# processing state. Since we push things through in the
# next few lines, that call happens when the sale is in
# confirmed state and there is no transition from
# Confirmed to Done.
self.state = 'processing'
self.save()
# Assign and complete the shipments
if not Shipment.assign_try(picked_up_shipments):
draft_moves = filter(
lambda m: m.state == 'draft',
[m for s in picked_up_shipments for m in s.inventory_moves] # noqa
)
products_out_of_stock = [
m.product.rec_name for m in draft_moves
]
self.raise_user_error(
"Order cannot be processed as the following items are "
"out of stock:\n" + "\n".join(products_out_of_stock)
)
Shipment.pack(picked_up_shipments)
Shipment.done(picked_up_shipments)
elif shipment_type == 'return':
Shipment = pool.get('stock.shipment.out.return')
with Transaction().set_user(0, set_context=True):
Shipment.receive(picked_up_shipments)
Shipment.done(picked_up_shipments)
# Finally return the value the super function returned, but after
# reloading the active records.
return Shipment.browse(map(int, shipments))
def create_invoice(self, invoice_type):
"""
Sale creates draft invoices. But if the invoices are created from
shipments, then they should be automatically opened
"""
Invoice = Pool().get('account.invoice')
invoice = super(Sale, self).create_invoice(invoice_type)
if not invoice:
return invoice
if self.invoice_method == 'shipment':
# Invoices created from shipment can be automatically opened
# for payment.
Invoice.post([invoice])
return invoice
class SaleLine:
__name__ = 'sale.line'
is_round_off = fields.Boolean('Round Off', readonly=True)
delivery_mode = fields.Selection([
(None, ''),
('pick_up', 'Pick Up'),
('ship', 'Ship'),
], 'Delivery Mode', states={
'invisible': Eval('type') != 'line',
'required': And(
Eval('type') == 'line',
Bool(Eval('product_type_is_goods'))
)
}, depends=['type', 'product_type_is_goods'])
product_type_is_goods = fields.Function(
fields.Boolean('Product Type is Goods?'), 'get_product_type_is_goods'
)
@classmethod
def __register__(cls, module_name):
super(SaleLine, cls).__register__(module_name)
TableHandler = backend.get('TableHandler')
cursor = Transaction().cursor
table = TableHandler(cursor, cls, module_name)
table.not_null_action('delivery_mode', action='remove')
@classmethod
def __setup__(cls):
super(SaleLine, cls).__setup__()
# Hide product and unit fields.
cls.product.states['invisible'] |= Bool(Eval('is_round_off'))
cls.unit.states['invisible'] |= Bool(Eval('is_round_off'))
cls.delivery_mode.states['invisible'] |= Bool(Eval('is_round_off'))
cls.product.depends.insert(0, 'is_round_off')
cls.unit.depends.insert(0, 'is_round_off')
@fields.depends(
'product', 'unit', 'quantity', '_parent_sale.party',
'_parent_sale.currency', '_parent_sale.sale_date', 'delivery_mode',
'_parent_sale.channel', '_parent_sale.shipment_address',
'warehouse', '_parent_sale.warehouse'
)
def on_change_delivery_mode(self):
"""
This method can be overridden by downstream modules to make changes
according to delivery mode. Like change taxes according to delivery
mode.
"""
res = {}
if self.delivery_mode == 'ship' and \
self.sale.channel.backorder_warehouse:
res['warehouse'] = self.sale.channel.backorder_warehouse.id
return res
@staticmethod
def default_is_round_off():
return False
def get_invoice_line(self, invoice_type):
SaleConfiguration = Pool().get('sale.configuration')
InvoiceLine = Pool().get('account.invoice.line')
if not self.is_round_off:
return super(SaleLine, self).get_invoice_line(invoice_type)
# The line is a round off line and apply the logic here.
# Check if the invoice line already exists for the sale line
# If yes, then no line needs to be created
# XXX: What if the invoice was cancelled ?
if self.invoice_lines:
return []
round_down_account = SaleConfiguration(1).round_down_account
if not round_down_account:
self.raise_user_error(
'''Set round down account from Sale Configuration to
add round off line'''
)
invoice_line = InvoiceLine()
invoice_line.origin = self
invoice_line.account = round_down_account
invoice_line.unit_price = self.unit_price
invoice_line.description = self.description
# For positive sales transactions (where the order is effectively
# a positive total), the round_down is applied on out_invoice
# and if overall order total is negative, then the round_down is
# tied to credit note.
if self.sale.total_amount >= Decimal('0'):
if invoice_type == 'out_credit_note':
# positive order looking for credit note
return []
invoice_line.quantity = self.quantity
else:
if invoice_type == 'out_invoice':
# negative order looking for invoice
return []
invoice_line.quantity = -self.quantity
return [invoice_line]
@staticmethod
def default_delivery_mode():
Channel = Pool().get('sale.channel')
User = Pool().get('res.user')
user = User(Transaction().user)
sale_channel = user.current_channel
if Transaction().context.get('current_channel'):
sale_channel = Channel(
Transaction().context.get('current_channel')
)
return sale_channel and sale_channel.delivery_mode
def serialize(self, purpose=None):
"""
Serialize for the purpose of POS
"""
if purpose == 'pos':
return {
'id': self.id,
'description': self.description,
'product': self.product and {
'id': self.product.id,
'code': self.product.code,
'rec_name': self.product.rec_name,
'default_image': self.product.default_image and
self.product.default_image.id,
},
'unit': self.unit and {
'id': self.unit.id,
'rec_name': self.unit.rec_name,
},
'unit_price': self.unit_price,
'quantity': self.quantity,
'amount': self.amount,
'delivery_mode': self.delivery_mode
}
elif hasattr(super(SaleLine, self), 'serialize'):
return super(SaleLine, self).serialize(purpose) # pragma: no cover
def get_product_type_is_goods(self, name):
"""
Return True if product is of type goods
"""
if self.product and self.product.type == 'goods':
return True
return False
|
bsd-3-clause
|
alaski/nova
|
nova/api/openstack/compute/agents.py
|
6
|
7174
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack.compute.schemas import agents as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova import objects
from nova.policies import agents as agents_policies
from nova import utils
ALIAS = "os-agents"
class AgentController(wsgi.Controller):
"""The agent is talking about guest agent.The host can use this for
things like accessing files on the disk, configuring networking,
or running other applications/scripts in the guest while it is
running. Typically this uses some hypervisor-specific transport
to avoid being dependent on a working network configuration.
Xen, VMware, and VirtualBox have guest agents,although the Xen
driver is the only one with an implementation for managing them
in openstack. KVM doesn't really have a concept of a guest agent
(although one could be written).
You can find the design of agent update in this link:
http://wiki.openstack.org/AgentUpdate
and find the code in nova.virt.xenapi.vmops.VMOps._boot_new_instance.
In this design We need update agent in guest from host, so we need
some interfaces to update the agent info in host.
You can find more information about the design of the GuestAgent in
the following link:
http://wiki.openstack.org/GuestAgent
http://wiki.openstack.org/GuestAgentXenStoreCommunication
"""
@extensions.expected_errors(())
def index(self, req):
"""Return a list of all agent builds. Filter by hypervisor."""
context = req.environ['nova.context']
context.can(agents_policies.BASE_POLICY_NAME)
hypervisor = None
agents = []
if 'hypervisor' in req.GET:
hypervisor = req.GET['hypervisor']
builds = objects.AgentList.get_all(context, hypervisor=hypervisor)
for agent_build in builds:
agents.append({'hypervisor': agent_build.hypervisor,
'os': agent_build.os,
'architecture': agent_build.architecture,
'version': agent_build.version,
'md5hash': agent_build.md5hash,
'agent_id': agent_build.id,
'url': agent_build.url})
return {'agents': agents}
@extensions.expected_errors((400, 404))
@validation.schema(schema.update)
def update(self, req, id, body):
"""Update an existing agent build."""
context = req.environ['nova.context']
context.can(agents_policies.BASE_POLICY_NAME)
# TODO(oomichi): This parameter name "para" is different from the ones
# of the other APIs. Most other names are resource names like "server"
# etc. This name should be changed to "agent" for consistent naming
# with v2.1+microversions.
para = body['para']
url = para['url']
md5hash = para['md5hash']
version = para['version']
try:
utils.validate_integer(id, 'id')
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
agent = objects.Agent(context=context, id=id)
agent.obj_reset_changes()
agent.version = version
agent.url = url
agent.md5hash = md5hash
try:
agent.save()
except exception.AgentBuildNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
# TODO(alex_xu): The agent_id should be integer that consistent with
# create/index actions. But parameter 'id' is string type that parsed
# from url. This is a bug, but because back-compatibility, it can't be
# fixed for v2 API. This will be fixed in v2.1 API by Microversions in
# the future. lp bug #1333494
return {"agent": {'agent_id': id, 'version': version,
'url': url, 'md5hash': md5hash}}
# TODO(oomichi): Here should be 204(No Content) instead of 200 by v2.1
# +microversions because the resource agent has been deleted completely
# when returning a response.
@extensions.expected_errors((400, 404))
@wsgi.response(200)
def delete(self, req, id):
"""Deletes an existing agent build."""
context = req.environ['nova.context']
context.can(agents_policies.BASE_POLICY_NAME)
try:
utils.validate_integer(id, 'id')
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
try:
agent = objects.Agent(context=context, id=id)
agent.destroy()
except exception.AgentBuildNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
# TODO(oomichi): Here should be 201(Created) instead of 200 by v2.1
# +microversions because the creation of a resource agent finishes
# when returning a response.
@extensions.expected_errors((400, 409))
@wsgi.response(200)
@validation.schema(schema.create)
def create(self, req, body):
"""Creates a new agent build."""
context = req.environ['nova.context']
context.can(agents_policies.BASE_POLICY_NAME)
agent = body['agent']
hypervisor = agent['hypervisor']
os = agent['os']
architecture = agent['architecture']
version = agent['version']
url = agent['url']
md5hash = agent['md5hash']
agent_obj = objects.Agent(context=context)
agent_obj.hypervisor = hypervisor
agent_obj.os = os
agent_obj.architecture = architecture
agent_obj.version = version
agent_obj.url = url
agent_obj.md5hash = md5hash
try:
agent_obj.create()
agent['agent_id'] = agent_obj.id
except exception.AgentBuildExists as ex:
raise webob.exc.HTTPConflict(explanation=ex.format_message())
return {'agent': agent}
class Agents(extensions.V21APIExtensionBase):
"""Agents support."""
name = "Agents"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
AgentController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
apache-2.0
|
marcoantoniooliveira/labweb
|
oscar/lib/python2.7/site-packages/django/db/models/sql/constants.py
|
126
|
1360
|
"""
Constants specific to the SQL storage portion of the ORM.
"""
from collections import namedtuple
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = set([
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
JoinInfo = namedtuple('JoinInfo',
'table_name rhs_alias join_type lhs_alias '
'join_cols nullable join_field')
# Pairs of column clauses to select, and (possibly None) field for the clause.
SelectInfo = namedtuple('SelectInfo', 'col field')
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
|
bsd-3-clause
|
larsbutler/swift
|
swift/obj/replicator.py
|
1
|
37876
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import errno
from os.path import isdir, isfile, join, dirname
import random
import shutil
import time
import itertools
from six import viewkeys
import six.moves.cPickle as pickle
from swift import gettext_ as _
import eventlet
from eventlet import GreenPool, tpool, Timeout, sleep, hubs
from eventlet.green import subprocess
from eventlet.support.greenlets import GreenletExit
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, ismount, \
rsync_module_interpolation, mkdirs, config_true_value, list_from_csv, \
get_hub, tpool_reraise, config_auto_int_value, storage_directory
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
from swift.obj import ssync_sender
from swift.obj.diskfile import DiskFileManager, get_data_dir, get_tmp_dir
from swift.common.storage_policy import POLICIES, REPL_POLICY
DEFAULT_RSYNC_TIMEOUT = 900
hubs.use_hub(get_hub())
class ObjectReplicator(Daemon):
"""
Replicate objects.
Encapsulates most logic and data needed by the object replication process.
Each call to .replicate() performs one replication pass. It's up to the
caller to do this in a loop.
"""
def __init__(self, conf, logger=None):
"""
:param conf: configuration object obtained from ConfigParser
:param logger: logging object
"""
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-replicator')
self.devices_dir = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.bind_ip = conf.get('bind_ip', '0.0.0.0')
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
self.port = None if self.servers_per_port else \
int(conf.get('bind_port', 6200))
self.concurrency = int(conf.get('concurrency', 1))
self.stats_interval = int(conf.get('stats_interval', '300'))
self.ring_check_interval = int(conf.get('ring_check_interval', 15))
self.next_check = time.time() + self.ring_check_interval
self.reclaim_age = int(conf.get('reclaim_age', 86400 * 7))
self.partition_times = []
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.rsync_timeout = int(conf.get('rsync_timeout',
DEFAULT_RSYNC_TIMEOUT))
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
self.rsync_bwlimit = conf.get('rsync_bwlimit', '0')
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.rsync_module = conf.get('rsync_module', '').rstrip('/')
if not self.rsync_module:
self.rsync_module = '{replication_ip}::object'
if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warning('Option object-replicator/vm_test_mode '
'is deprecated and will be removed in a '
'future version. Update your '
'configuration to use option '
'object-replicator/rsync_module.')
self.rsync_module += '{replication_port}'
self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.node_timeout = float(conf.get('node_timeout', 10))
self.sync_method = getattr(self, conf.get('sync_method') or 'rsync')
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.default_headers = {
'Content-Length': '0',
'user-agent': 'object-replicator %s' % os.getpid()}
self.rsync_error_log_line_length = \
int(conf.get('rsync_error_log_line_length', 0))
self.handoffs_first = config_true_value(conf.get('handoffs_first',
False))
self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)):
self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and '
'handoff_delete before the next '
'normal rebalance')
self._diskfile_mgr = DiskFileManager(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
error_line = _('Bad rsync return code: %(ret)d <- %(args)s') % \
{'args': str(args), 'ret': ret_val}
if self.rsync_error_log_line_length:
error_line = error_line[:self.rsync_error_log_line_length]
self.logger.error(error_line)
elif results:
self.logger.info(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
else:
self.logger.debug(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
self.stats['rsync'] += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for region, cand_objs in synced_remote_regions.items():
if delete_objs is None:
delete_objs = cand_objs
else:
delete_objs = delete_objs & cand_objs
if self.handoff_delete:
# delete handoff if we have had handoff_delete successes
delete_handoff = len([resp for resp in responses if resp]) >= \
self.handoff_delete
else:
# delete handoff if all syncs were successful
delete_handoff = len(responses) == len(job['nodes']) and \
all(responses)
if delete_handoff:
self.stats['remove'] += 1
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
delete_objs is not None):
self.logger.info(_("Removing %s objects"),
len(delete_objs))
_junk, error_paths = self.delete_handoff_objs(
job, delete_objs)
# if replication works for a hand-off device and it failed,
# the remote devices which are target of the replication
# from the hand-off device will be marked. Because cleanup
# after replication failed means replicator needs to
# replicate again with the same info.
if error_paths:
failure_devs_info.update(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
else:
self.delete_partition(job['path'])
handoff_partition_deleted = True
elif not suffixes:
self.delete_partition(job['path'])
handoff_partition_deleted = True
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
finally:
target_devs_info = set([(target_dev['replication_ip'],
target_dev['device'])
for target_dev in job['nodes']])
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
if not handoff_partition_deleted:
self.handoffs_remaining += 1
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
def delete_partition(self, path):
self.logger.info(_("Removing partition: %s"), path)
tpool.execute(shutil.rmtree, path)
def delete_handoff_objs(self, job, delete_objs):
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
os.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
error_paths.append(object_path)
self.logger.exception(
"Unexpected error trying to cleanup suffix dir:%r",
suffix_dir)
return success_paths, error_paths
def update(self, job):
"""
High-level method that replicates a single partition.
:param job: a dict containing info about the partition to be replicated
"""
self.replication_count += 1
self.logger.increment('partition.update.count.%s' % (job['device'],))
headers = dict(self.default_headers)
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
target_devs_info = set()
failure_devs_info = set()
begin = time.time()
try:
hashed, local_hash = tpool_reraise(
self._diskfile_mgr._get_hashes, job['path'],
do_listdir=(self.replication_count % 10) == 0,
reclaim_age=self.reclaim_age)
self.suffix_hash += hashed
self.logger.update_stats('suffix.hashes', hashed)
attempts_left = len(job['nodes'])
synced_remote_regions = set()
random.shuffle(job['nodes'])
nodes = itertools.chain(
job['nodes'],
job['policy'].object_ring.get_more_nodes(
int(job['partition'])))
while attempts_left > 0:
# If this throws StopIteration it will be caught way below
node = next(nodes)
target_devs_info.add((node['replication_ip'], node['device']))
attempts_left -= 1
# if we have already synced to this remote region,
# don't sync again on this replication pass
if node['region'] in synced_remote_regions:
continue
try:
with Timeout(self.http_timeout):
resp = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'', headers=headers).getresponse()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
self.logger.error(
_('%(replication_ip)s/%(device)s '
'responded as unmounted'), node)
attempts_left += 1
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
if resp.status != HTTP_OK:
self.logger.error(_("Invalid response %(resp)s "
"from %(ip)s"),
{'resp': resp.status,
'ip': node['replication_ip']})
failure_devs_info.add((node['replication_ip'],
node['device']))
continue
remote_hash = pickle.loads(resp.read())
del resp
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
if not suffixes:
self.stats['hashmatch'] += 1
continue
hashed, recalc_hash = tpool_reraise(
self._diskfile_mgr._get_hashes,
job['path'], recalculate=suffixes,
reclaim_age=self.reclaim_age)
self.logger.update_stats('suffix.hashes', hashed)
local_hash = recalc_hash
suffixes = [suffix for suffix in local_hash if
local_hash[suffix] !=
remote_hash.get(suffix, -1)]
self.stats['rsync'] += 1
success, _junk = self.sync(node, job, suffixes)
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'], node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes),
headers=headers)
conn.getresponse().read()
if not success:
failure_devs_info.add((node['replication_ip'],
node['device']))
# add only remote region when replicate succeeded
if success and node['region'] != job['region']:
synced_remote_regions.add(node['region'])
self.suffix_sync += len(suffixes)
self.logger.update_stats('suffix.syncs', len(suffixes))
except (Exception, Timeout):
failure_devs_info.add((node['replication_ip'],
node['device']))
self.logger.exception(_("Error syncing with node: %s") %
node)
self.suffix_count += len(local_hash)
except (Exception, Timeout):
failure_devs_info.update(target_devs_info)
self.logger.exception(_("Error syncing partition"))
finally:
self.stats['success'] += len(target_devs_info - failure_devs_info)
self._add_failure_stats(failure_devs_info)
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.update.timing', begin)
def stats_line(self):
"""
Logs various stats for the currently running replication pass.
"""
if self.replication_count:
elapsed = (time.time() - self.start) or 0.000001
rate = self.replication_count / elapsed
self.logger.info(
_("%(replicated)d/%(total)d (%(percentage).2f%%)"
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
"%(remaining)s remaining)"),
{'replicated': self.replication_count, 'total': self.job_count,
'percentage': self.replication_count * 100.0 / self.job_count,
'time': time.time() - self.start, 'rate': rate,
'remaining': '%d%s' % compute_eta(self.start,
self.replication_count,
self.job_count)})
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
if self.suffix_count:
self.logger.info(
_("%(checked)d suffixes checked - "
"%(hashed).2f%% hashed, %(synced).2f%% synced"),
{'checked': self.suffix_count,
'hashed': (self.suffix_hash * 100.0) / self.suffix_count,
'synced': (self.suffix_sync * 100.0) / self.suffix_count})
self.partition_times.sort()
self.logger.info(
_("Partition times: max %(max).4fs, "
"min %(min).4fs, med %(med).4fs"),
{'max': self.partition_times[-1],
'min': self.partition_times[0],
'med': self.partition_times[
len(self.partition_times) // 2]})
else:
self.logger.info(
_("Nothing replicated for %s seconds."),
(time.time() - self.start))
def kill_coros(self):
"""Utility function that kills all coroutines currently running."""
for coro in list(self.run_pool.coroutines_running):
try:
coro.kill(GreenletExit)
except GreenletExit:
pass
def heartbeat(self):
"""
Loop that runs in the background during replication. It periodically
logs progress.
"""
while True:
eventlet.sleep(self.stats_interval)
self.stats_line()
def detect_lockups(self):
"""
In testing, the pool.waitall() call very occasionally failed to return.
This is an attempt to make sure the replicator finishes its replication
pass in some eventuality.
"""
while True:
eventlet.sleep(self.lockup_timeout)
if self.replication_count == self.last_replication_count:
self.logger.error(_("Lockup detected.. killing live coros."))
self.kill_coros()
self.last_replication_count = self.replication_count
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
def collect_jobs(self, override_devices=None, override_partitions=None,
override_policies=None):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
:param override_devices: if set, only jobs on these devices
will be returned
:param override_partitions: if set, only jobs on these partitions
will be returned
:param override_policies: if set, only jobs in these storage
policies will be returned
"""
jobs = []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type == REPL_POLICY:
if (override_policies is not None and
str(policy.idx) not in override_policies):
continue
# ensure rings are loaded for policy
self.load_object_ring(policy)
jobs += self.build_replication_jobs(
policy, ips, override_devices=override_devices,
override_partitions=override_partitions)
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
def replicate(self, override_devices=None, override_partitions=None,
override_policies=None):
"""Run a replication pass"""
self.start = time.time()
self.suffix_count = 0
self.suffix_sync = 0
self.suffix_hash = 0
self.replication_count = 0
self.last_replication_count = -1
self.partition_times = []
self.my_replication_ips = self._get_my_replication_ips()
self.all_devs_info = set()
self.handoffs_remaining = 0
stats = eventlet.spawn(self.heartbeat)
lockup_detector = eventlet.spawn(self.detect_lockups)
eventlet.sleep() # Give spawns a cycle
current_nodes = None
try:
self.run_pool = GreenPool(size=self.concurrency)
jobs = self.collect_jobs(override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
current_nodes = job['nodes']
if override_devices and job['device'] not in override_devices:
continue
if override_partitions and \
job['partition'] not in override_partitions:
continue
dev_path = join(self.devices_dir, job['device'])
if self.mount_check and not ismount(dev_path):
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in job['nodes']])
self.logger.warning(_('%s is not mounted'), job['device'])
continue
if self.handoffs_first and not job['delete']:
# in handoffs first mode, we won't process primary
# partitions until rebalance was successful!
if self.handoffs_remaining:
self.logger.warning(_(
"Handoffs first mode still has handoffs "
"remaining. Aborting current "
"replication pass."))
break
if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting "
"current replication pass."))
return
try:
if isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
os.remove(job['path'])
continue
except OSError:
continue
if job['delete']:
self.run_pool.spawn(self.update_deleted, job)
else:
self.run_pool.spawn(self.update, job)
current_nodes = None
with Timeout(self.lockup_timeout):
self.run_pool.waitall()
except (Exception, Timeout):
if current_nodes:
self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in current_nodes])
else:
self._add_failure_stats(self.all_devs_info)
self.logger.exception(_("Exception in top-level replication loop"))
self.kill_coros()
finally:
stats.kill()
lockup_detector.kill()
self.stats_line()
self.stats['attempted'] = self.replication_count
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
def run_forever(self, *args, **kwargs):
self.logger.info(_("Starting object replicator in daemon mode."))
# Run the replicator continually
while True:
self._zero_stats()
self.logger.info(_("Starting object replication pass."))
# Run the replicator
self.replicate()
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete. (%.02f minutes)"), total)
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
self.logger.debug('Replication sleeping for %s seconds.',
self.interval)
sleep(self.interval)
|
apache-2.0
|
shhui/nova
|
nova/api/openstack/compute/plugins/v3/server_password.py
|
13
|
2588
|
# Copyright (c) 2012 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The server password extension."""
from nova.api.metadata import password
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import db
ALIAS = 'os-server-password'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class ServerPasswordController(object):
"""The Server Password API controller for the OpenStack API."""
def __init__(self):
self.compute_api = compute.API()
@extensions.expected_errors(404)
def index(self, req, server_id):
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
passw = password.extract_password(instance)
return {'password': passw or ''}
@extensions.expected_errors(404)
@wsgi.response(204)
def clear(self, req, server_id):
"""Removes the encrypted server password from the metadata server
Note that this does not actually change the instance server
password.
"""
context = req.environ['nova.context']
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
meta = password.convert_password(context, None)
db.instance_system_metadata_update(context, instance['uuid'],
meta, False)
class ServerPassword(extensions.V3APIExtensionBase):
"""Server password support."""
name = "ServerPassword"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(
ALIAS, ServerPasswordController(),
collection_actions={'clear': 'DELETE'},
parent=dict(member_name='server', collection_name='servers'))]
return resources
def get_controller_extensions(self):
return []
|
apache-2.0
|
vasyarv/edx-platform
|
lms/djangoapps/certificates/tests/test_api.py
|
42
|
13565
|
"""Tests for the certificates Python API. """
from contextlib import contextmanager
import ddt
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.conf import settings
from mock import patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from course_modes.tests.factories import CourseModeFactory
from config_models.models import cache
from util.testing import EventTestMixin
from certificates import api as certs_api
from certificates.models import (
CertificateStatuses,
CertificateGenerationConfiguration,
ExampleCertificate,
GeneratedCertificate
)
from certificates.queue import XQueueCertInterface, XQueueAddToQueueError
from certificates.tests.factories import GeneratedCertificateFactory
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
@attr('shard_1')
class CertificateDownloadableStatusTests(ModuleStoreTestCase):
"""Tests for the `certificate_downloadable_status` helper function. """
def setUp(self):
super(CertificateDownloadableStatusTests, self).setUp()
self.student = UserFactory()
self.student_no_cert = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course'
)
self.request_factory = RequestFactory()
def test_user_cert_status_with_generating(self):
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.generating,
mode='verified'
)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': False,
'is_generating': True,
'download_url': None
}
)
def test_user_cert_status_with_error(self):
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.error,
mode='verified'
)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': False,
'is_generating': True,
'download_url': None
}
)
def test_user_with_out_cert(self):
self.assertEqual(
certs_api.certificate_downloadable_status(self.student_no_cert, self.course.id),
{
'is_downloadable': False,
'is_generating': False,
'download_url': None
}
)
def test_user_with_downloadable_cert(self):
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified',
download_url='www.google.com'
)
self.assertEqual(
certs_api.certificate_downloadable_status(self.student, self.course.id),
{
'is_downloadable': True,
'is_generating': False,
'download_url': 'www.google.com'
}
)
@attr('shard_1')
@override_settings(CERT_QUEUE='certificates')
class GenerateUserCertificatesTest(EventTestMixin, ModuleStoreTestCase):
"""Tests for generating certificates for students. """
ERROR_REASON = "Kaboom!"
def setUp(self):
super(GenerateUserCertificatesTest, self).setUp('certificates.api.tracker')
self.student = UserFactory.create(
email='[email protected]',
username='joeuser',
password='foo'
)
self.student_no_cert = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course',
grade_cutoffs={'cutoff': 0.75, 'Pass': 0.5}
)
self.enrollment = CourseEnrollment.enroll(self.student, self.course.id, mode='honor')
self.request_factory = RequestFactory()
def test_new_cert_requests_into_xqueue_returns_generating(self):
with self._mock_passing_grade():
with self._mock_queue():
certs_api.generate_user_certificates(self.student, self.course.id)
# Verify that the certificate has status 'generating'
cert = GeneratedCertificate.objects.get(user=self.student, course_id=self.course.id)
self.assertEqual(cert.status, CertificateStatuses.generating)
self.assert_event_emitted(
'edx.certificate.created',
user_id=self.student.id,
course_id=unicode(self.course.id),
certificate_url=certs_api.get_certificate_url(self.student.id, self.course.id),
certificate_id=cert.verify_uuid,
enrollment_mode=cert.mode,
generation_mode='batch'
)
def test_xqueue_submit_task_error(self):
with self._mock_passing_grade():
with self._mock_queue(is_successful=False):
certs_api.generate_user_certificates(self.student, self.course.id)
# Verify that the certificate has been marked with status error
cert = GeneratedCertificate.objects.get(user=self.student, course_id=self.course.id)
self.assertEqual(cert.status, 'error')
self.assertIn(self.ERROR_REASON, cert.error_reason)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': True})
def test_new_cert_requests_returns_generating_for_html_certificate(self):
"""
Test no message sent to Xqueue if HTML certificate view is enabled
"""
self._setup_course_certificate()
with self._mock_passing_grade():
certs_api.generate_user_certificates(self.student, self.course.id)
# Verify that the certificate has status 'downloadable'
cert = GeneratedCertificate.objects.get(user=self.student, course_id=self.course.id)
self.assertEqual(cert.status, CertificateStatuses.downloadable)
@patch.dict(settings.FEATURES, {'CERTIFICATES_HTML_VIEW': False})
def test_cert_url_empty_with_invalid_certificate(self):
"""
Test certificate url is empty if html view is not enabled and certificate is not yet generated
"""
url = certs_api.get_certificate_url(self.student.id, self.course.id)
self.assertEqual(url, "")
@contextmanager
def _mock_passing_grade(self):
"""Mock the grading function to always return a passing grade. """
symbol = 'courseware.grades.grade'
with patch(symbol) as mock_grade:
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
yield
@contextmanager
def _mock_queue(self, is_successful=True):
"""Mock the "send to XQueue" method to return either success or an error. """
symbol = 'capa.xqueue_interface.XQueueInterface.send_to_queue'
with patch(symbol) as mock_send_to_queue:
if is_successful:
mock_send_to_queue.return_value = (0, "Successfully queued")
else:
mock_send_to_queue.side_effect = XQueueAddToQueueError(1, self.ERROR_REASON)
yield mock_send_to_queue
def _setup_course_certificate(self):
"""
Creates certificate configuration for course
"""
certificates = [
{
'id': 1,
'name': 'Test Certificate Name',
'description': 'Test Certificate Description',
'course_title': 'tes_course_title',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
@attr('shard_1')
@ddt.ddt
class CertificateGenerationEnabledTest(EventTestMixin, TestCase):
"""Test enabling/disabling self-generated certificates for a course. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
def setUp(self):
super(CertificateGenerationEnabledTest, self).setUp('certificates.api.tracker')
# Since model-based configuration is cached, we need
# to clear the cache before each test.
cache.clear()
@ddt.data(
(None, None, False),
(False, None, False),
(False, True, False),
(True, None, False),
(True, False, False),
(True, True, True)
)
@ddt.unpack
def test_cert_generation_enabled(self, is_feature_enabled, is_course_enabled, expect_enabled):
if is_feature_enabled is not None:
CertificateGenerationConfiguration.objects.create(enabled=is_feature_enabled)
if is_course_enabled is not None:
certs_api.set_cert_generation_enabled(self.COURSE_KEY, is_course_enabled)
cert_event_type = 'enabled' if is_course_enabled else 'disabled'
event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type])
self.assert_event_emitted(
event_name,
course_id=unicode(self.COURSE_KEY),
)
self._assert_enabled_for_course(self.COURSE_KEY, expect_enabled)
def test_latest_setting_used(self):
# Enable the feature
CertificateGenerationConfiguration.objects.create(enabled=True)
# Enable for the course
certs_api.set_cert_generation_enabled(self.COURSE_KEY, True)
self._assert_enabled_for_course(self.COURSE_KEY, True)
# Disable for the course
certs_api.set_cert_generation_enabled(self.COURSE_KEY, False)
self._assert_enabled_for_course(self.COURSE_KEY, False)
def test_setting_is_course_specific(self):
# Enable the feature
CertificateGenerationConfiguration.objects.create(enabled=True)
# Enable for one course
certs_api.set_cert_generation_enabled(self.COURSE_KEY, True)
self._assert_enabled_for_course(self.COURSE_KEY, True)
# Should be disabled for another course
other_course = CourseLocator(org='other', course='other', run='other')
self._assert_enabled_for_course(other_course, False)
def _assert_enabled_for_course(self, course_key, expect_enabled):
"""Check that self-generated certificates are enabled or disabled for the course. """
actual_enabled = certs_api.cert_generation_enabled(course_key)
self.assertEqual(expect_enabled, actual_enabled)
@attr('shard_1')
class GenerateExampleCertificatesTest(TestCase):
"""Test generation of example certificates. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
def setUp(self):
super(GenerateExampleCertificatesTest, self).setUp()
def test_generate_example_certs(self):
# Generate certificates for the course
with self._mock_xqueue() as mock_queue:
certs_api.generate_example_certificates(self.COURSE_KEY)
# Verify that the appropriate certs were added to the queue
self._assert_certs_in_queue(mock_queue, 1)
# Verify that the certificate status is "started"
self._assert_cert_status({
'description': 'honor',
'status': 'started'
})
def test_generate_example_certs_with_verified_mode(self):
# Create verified and honor modes for the course
CourseModeFactory(course_id=self.COURSE_KEY, mode_slug='honor')
CourseModeFactory(course_id=self.COURSE_KEY, mode_slug='verified')
# Generate certificates for the course
with self._mock_xqueue() as mock_queue:
certs_api.generate_example_certificates(self.COURSE_KEY)
# Verify that the appropriate certs were added to the queue
self._assert_certs_in_queue(mock_queue, 2)
# Verify that the certificate status is "started"
self._assert_cert_status(
{
'description': 'verified',
'status': 'started'
},
{
'description': 'honor',
'status': 'started'
}
)
@contextmanager
def _mock_xqueue(self):
"""Mock the XQueue method for adding a task to the queue. """
with patch.object(XQueueCertInterface, 'add_example_cert') as mock_queue:
yield mock_queue
def _assert_certs_in_queue(self, mock_queue, expected_num):
"""Check that the certificate generation task was added to the queue. """
certs_in_queue = [call_args[0] for (call_args, __) in mock_queue.call_args_list]
self.assertEqual(len(certs_in_queue), expected_num)
for cert in certs_in_queue:
self.assertTrue(isinstance(cert, ExampleCertificate))
def _assert_cert_status(self, *expected_statuses):
"""Check the example certificate status. """
actual_status = certs_api.example_certificates_status(self.COURSE_KEY)
self.assertEqual(list(expected_statuses), actual_status)
|
agpl-3.0
|
zhiwliu/openshift-ansible
|
roles/lib_openshift/library/oc_scale.py
|
6
|
64391
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/scale -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_scale
short_description: Manage openshift services through the scale parameters
description:
- Manage openshift services through scaling them.
options:
state:
description:
- State represents whether to scale or list the current replicas
required: true
default: present
choices: ["present", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
kind:
description:
- The kind of object to scale.
required: false
default: None
choices:
- rc
- dc
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: scale down a rc to 0
oc_scale:
name: my-replication-controller
kind: rc
namespace: openshift-infra
replicas: 0
- name: scale up a deploymentconfig to 2
oc_scale:
name: php
kind: dc
namespace: my-php-app
replicas: 2
'''
# -*- -*- -*- End included fragment: doc/scale -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/replicationcontroller.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class ReplicationController(DeploymentConfig):
''' Class to model a replicationcontroller openshift object.
Currently we are modeled after a deployment config since they
are very similar. In the future, when the need arises we
will add functionality to this class.
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content):
''' Constructor for ReplicationController '''
super(ReplicationController, self).__init__(content=content)
# -*- -*- -*- End included fragment: lib/replicationcontroller.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_scale.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCScale(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
resource_name,
namespace,
replicas,
kind,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.replicas = replicas
self.name = resource_name
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def get(self):
'''return replicas information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
if self.kind == 'rc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = ReplicationController(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
return vol
def put(self):
'''update replicas into dc '''
self.resource.update_replicas(self.replicas)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify whether an update is needed '''
return self.resource.needs_update_replicas(self.replicas)
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''perform the idempotent ansible logic'''
oc_scale = OCScale(params['name'],
params['namespace'],
params['replicas'],
params['kind'],
params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'result': api_rval['results'], 'state': 'list'} # noqa: E501
elif state == 'present':
########
# Update
########
if oc_scale.needs_update():
if check_mode:
return {'changed': True, 'result': 'CHECK_MODE: Would have updated.'} # noqa: E501
api_rval = oc_scale.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'changed': False, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'failed': True, 'msg': 'Unknown state passed. [{}]'.format(state)}
# -*- -*- -*- End included fragment: class/oc_scale.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_scale.py -*- -*- -*-
def main():
'''
ansible oc module for scaling
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='dc', choices=['dc', 'rc'], type='str'),
namespace=dict(default='default', type='str'),
replicas=dict(default=None, type='int'),
name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
rval = OCScale.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_scale.py -*- -*- -*-
|
apache-2.0
|
SteveDiamond/cvxpy
|
cvxpy/tests/test_interfaces.py
|
2
|
11553
|
"""
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.interface as intf
import numpy as np
import scipy.sparse as sp
from cvxpy.tests.base_test import BaseTest
class TestInterfaces(BaseTest):
""" Unit tests for matrix interfaces. """
def setUp(self):
pass
def sign_for_intf(self, interface):
"""Test sign for a given interface.
"""
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(intf.sign(mat), (True, False)) # Positive.
self.assertEqual(intf.sign(-mat), (False, True)) # Negative.
self.assertEqual(intf.sign(0*mat), (True, True)) # Zero.
mat = interface.const_to_matrix([[-1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(intf.sign(mat), (False, False)) # Unknown.
# # Test cvxopt dense interface.
# def test_cvxopt_dense(self):
# interface = intf.get_matrix_interface(cvxopt.matrix)
# # const_to_matrix
# mat = interface.const_to_matrix([1, 2, 3])
# self.assertEqual(interface.shape(mat), (3, 1))
# sp_mat = sp.coo_matrix(([1, 2], ([3, 4], [2, 1])), (5, 5))
# mat = interface.const_to_matrix(sp_mat)
# self.assertEqual(interface.shape(mat), (5, 5))
# # identity
# mat = interface.identity(4)
# cmp_mat = interface.const_to_matrix(np.eye(4))
# self.assertEqual(type(mat), type(cmp_mat))
# self.assertEqual(interface.shape(mat), interface.shape(cmp_mat))
# assert not mat - cmp_mat
# # scalar_matrix
# mat = interface.scalar_matrix(2, 4, 3)
# self.assertEqual(interface.shape(mat), (4, 3))
# self.assertEqual(interface.index(mat, (1, 2)), 2)
# # reshape
# mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
# mat = interface.reshape(mat, (6, 1))
# self.assertEqual(interface.index(mat, (4, 0)), 4)
# mat = interface.const_to_matrix(1, convert_scalars=True)
# self.assertEqual(type(interface.reshape(mat, (1, 1))), type(mat))
# # index
# mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
# self.assertEqual(interface.index(mat, (0, 1)), 3)
# mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
# self.assertEqual(list(mat), [2, 4, 4, 6])
# # Sign
# self.sign_for_intf(interface)
# # Test cvxopt sparse interface.
# def test_cvxopt_sparse(self):
# interface = intf.get_matrix_interface(cvxopt.spmatrix)
# # const_to_matrix
# mat = interface.const_to_matrix([1, 2, 3])
# self.assertEqual(interface.shape(mat), (3, 1))
# # identity
# mat = interface.identity(4)
# cmp_mat = interface.const_to_matrix(np.eye(4))
# self.assertEqual(interface.shape(mat), interface.shape(cmp_mat))
# assert not mat - cmp_mat
# assert intf.is_sparse(mat)
# # scalar_matrix
# mat = interface.scalar_matrix(2, 4, 3)
# self.assertEqual(interface.shape(mat), (4, 3))
# self.assertEqual(interface.index(mat, (1, 2)), 2)
# # reshape
# mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
# mat = interface.reshape(mat, (6, 1))
# self.assertEqual(interface.index(mat, (4, 0)), 4)
# mat = interface.const_to_matrix(1, convert_scalars=True)
# self.assertEqual(type(interface.reshape(mat, (1, 1))), type(mat))
# # Test scalars.
# scalar = interface.scalar_matrix(1, 1, 1)
# self.assertEqual(type(scalar), cvxopt.spmatrix)
# scalar = interface.scalar_matrix(1, 1, 3)
# self.assertEqual(scalar.shape, (1, 3))
# # index
# mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
# self.assertEqual(interface.index(mat, (0, 1)), 3)
# mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
# self.assertEqual(list(mat), [2, 4, 4, 6])
# # Sign
# self.sign_for_intf(interface)
# Test numpy ndarray interface.
def test_ndarray(self):
interface = intf.get_matrix_interface(np.ndarray)
# const_to_matrix
mat = interface.const_to_matrix([1, 2, 3])
self.assertEqual(interface.shape(mat), (3,))
mat = interface.const_to_matrix([1, 2])
self.assertEqual(interface.shape(mat), (2,))
# # CVXOPT sparse conversion
# tmp = intf.get_matrix_interface(cvxopt.spmatrix).const_to_matrix([1, 2, 3])
# mat = interface.const_to_matrix(tmp)
# assert (mat == interface.const_to_matrix([1, 2, 3])).all()
# # identity
# mat = interface.identity(4)
# cvxopt_dense = intf.get_matrix_interface(cvxopt.matrix)
# cmp_mat = interface.const_to_matrix(cvxopt_dense.identity(4))
# self.assertEqual(interface.shape(mat), interface.shape(cmp_mat))
# assert (mat == cmp_mat).all()
# scalar_matrix
mat = interface.scalar_matrix(2, (4, 3))
self.assertEqual(interface.shape(mat), (4, 3))
self.assertEqual(interface.index(mat, (1, 2)), 2)
# reshape
mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
mat = interface.reshape(mat, (6, 1))
self.assertEqual(interface.index(mat, (4, 0)), 4)
mat = interface.const_to_matrix(1, convert_scalars=True)
self.assertEqual(type(interface.reshape(mat, (1, 1))), type(mat))
# index
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(interface.index(mat, (0, 1)), 3)
mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
self.assertEqual(list(mat.flatten('C')), [2, 4, 4, 6])
# Scalars and matrices.
scalar = interface.const_to_matrix(2)
mat = interface.const_to_matrix([1, 2, 3])
assert (scalar*mat == interface.const_to_matrix([2, 4, 6])).all()
assert (scalar - mat == interface.const_to_matrix([1, 0, -1])).all()
# Sign
self.sign_for_intf(interface)
# shape.
assert interface.shape(np.array([1, 2, 3])) == (3,)
# Test numpy matrix interface.
def test_numpy_matrix(self):
interface = intf.get_matrix_interface(np.matrix)
# const_to_matrix
mat = interface.const_to_matrix([1, 2, 3])
self.assertEqual(interface.shape(mat), (3, 1))
mat = interface.const_to_matrix([[1], [2], [3]])
self.assertEqual(mat[0, 0], 1)
# identity
# mat = interface.identity(4)
# cvxopt_dense = intf.get_matrix_interface(cvxopt.matrix)
# cmp_mat = interface.const_to_matrix(cvxopt_dense.identity(4))
# self.assertEqual(interface.shape(mat), interface.shape(cmp_mat))
# assert not (mat - cmp_mat).any()
# scalar_matrix
mat = interface.scalar_matrix(2, (4, 3))
self.assertEqual(interface.shape(mat), (4, 3))
self.assertEqual(interface.index(mat, (1, 2)), 2)
# reshape
mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
mat = interface.reshape(mat, (6, 1))
self.assertEqual(interface.index(mat, (4, 0)), 4)
mat = interface.const_to_matrix(1, convert_scalars=True)
self.assertEqual(type(interface.reshape(mat, (1, 1))), type(mat))
# index
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(interface.index(mat, (0, 1)), 3)
mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
assert not (mat - np.array([[2, 4], [4, 6]])).any()
# Sign
self.sign_for_intf(interface)
def test_scipy_sparse(self):
"""Test cvxopt sparse interface.
"""
interface = intf.get_matrix_interface(sp.csc_matrix)
# const_to_matrix
mat = interface.const_to_matrix([1, 2, 3])
self.assertEqual(interface.shape(mat), (3, 1))
# C = cvxopt.spmatrix([1, 1, 1, 1, 1], [0, 1, 2, 0, 0, ], [0, 0, 0, 1, 2])
# mat = interface.const_to_matrix(C)
# self.assertEqual(interface.shape(mat), (3, 3))
# identity
mat = interface.identity(4)
cmp_mat = interface.const_to_matrix(np.eye(4))
self.assertEqual(interface.shape(mat), interface.shape(cmp_mat))
assert (mat - cmp_mat).nnz == 0
# scalar_matrix
mat = interface.scalar_matrix(2, (4, 3))
self.assertEqual(interface.shape(mat), (4, 3))
self.assertEqual(interface.index(mat, (1, 2)), 2)
# reshape
mat = interface.const_to_matrix([[1, 2, 3], [3, 4, 5]])
mat = interface.reshape(mat, (6, 1))
self.assertEqual(interface.index(mat, (4, 0)), 4)
# Test scalars.
scalar = interface.scalar_matrix(1, (1, 1))
self.assertEqual(type(scalar), np.ndarray)
scalar = interface.scalar_matrix(1, (1, 3))
self.assertEqual(scalar.shape, (1, 3))
# index
mat = interface.const_to_matrix([[1, 2, 3, 4], [3, 4, 5, 6]])
self.assertEqual(interface.index(mat, (0, 1)), 3)
mat = interface.index(mat, (slice(1, 4, 2), slice(0, 2, None)))
assert not (mat - np.array([[2, 4], [4, 6]])).any()
# scalar value
mat = sp.eye(1)
self.assertEqual(intf.scalar_value(mat), 1.0)
# Sign
self.sign_for_intf(interface)
# Complex
# define sparse matrix [[0, 1j],[-1j,0]]
row = np.array([0, 1])
col = np.array([1, 0])
data = np.array([1j, -1j])
A = sp.csr_matrix((data, (row, col)), shape=(2, 2))
mat = interface.const_to_matrix(A)
self.assertEquals(mat[0, 1], 1j)
self.assertEquals(mat[1, 0], -1j)
def test_conversion_between_intf(self):
"""Test conversion between every pair of interfaces.
"""
interfaces = [intf.get_matrix_interface(np.ndarray),
intf.get_matrix_interface(np.matrix),
intf.get_matrix_interface(sp.csc_matrix)]
cmp_mat = [[1, 2, 3, 4], [3, 4, 5, 6], [-1, 0, 2, 4]]
for i in range(len(interfaces)):
for j in range(i+1, len(interfaces)):
intf1 = interfaces[i]
mat1 = intf1.const_to_matrix(cmp_mat)
intf2 = interfaces[j]
mat2 = intf2.const_to_matrix(cmp_mat)
for col in range(len(cmp_mat)):
for row in range(len(cmp_mat[0])):
key = (slice(row, row+1, None),
slice(col, col+1, None))
self.assertEqual(intf1.index(mat1, key),
intf2.index(mat2, key))
# Convert between the interfaces.
self.assertEqual(cmp_mat[col][row],
intf1.index(intf1.const_to_matrix(mat2), key))
self.assertEqual(intf2.index(intf2.const_to_matrix(mat1), key),
cmp_mat[col][row])
|
gpl-3.0
|
wuzhy/autotest
|
mirror/source.py
|
2
|
7106
|
# Copyright 2009 Google Inc. Released under the GPL v2
import os, re, time, urllib2, urlparse, HTMLParser
from autotest_lib.mirror import database
from autotest_lib.client.common_lib import utils
class source(object):
"""
Abstract Base Class for the source classes.
"""
def __init__(self, database):
self.database = database
def _get_new_files(self, files):
"""
Return a copy of "files" after filtering out known old files
from "files".
"""
old_files = self.database.get_dictionary()
return dict(filter(lambda x: x[0] not in old_files, files.iteritems()))
def get_new_files(self):
raise NotImplemented('get_new_files not implemented')
def store_files(self, files):
self.database.merge_dictionary(files)
class rsync_source(source):
_cmd_template = '/usr/bin/rsync -rltz --no-motd %s %s/%s'
def __init__(self, database, prefix, excludes = []):
super(rsync_source, self).__init__(database)
self.prefix = prefix
self.exclude = ' '.join(['--exclude "' + x + '"' for x in excludes])
self.sources = []
def _parse_output(self, output, prefix):
"""
Parse rsync's "ls -l" style output and return a dictionary of
database.item indexed by the "name" field.
"""
regex = re.compile(
'-[rwx-]{9} +(\d+) (\d{4}/\d\d/\d\d \d\d:\d\d:\d\d) (.*)')
res = {}
for line in output.splitlines():
match = regex.match(line)
if match:
groups = match.groups()
timestamp = time.mktime(time.strptime(groups[1],
'%Y/%m/%d %H:%M:%S'))
if prefix:
fname = '%s/%s' % (prefix, groups[2])
else:
fname = groups[2]
item = database.item(fname, int(groups[0]), int(timestamp))
res[item.name] = item
return res
def add_path(self, src, prefix=''):
"""
Add paths to synchronize from the source.
"""
self.sources.append((src, prefix))
def get_new_files(self):
"""
Implement source.get_new_files by using rsync listing feature.
"""
files = {}
for src, prefix in self.sources:
output = utils.system_output(self._cmd_template %
(self.exclude, self.prefix, src))
files.update(self._parse_output(output, prefix))
return self._get_new_files(files)
class _ahref_parser(HTMLParser.HTMLParser):
def reset(self, url=None, pattern=None):
HTMLParser.HTMLParser.reset(self)
self.url = url
self.pattern = pattern
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for name, value in attrs:
if name == 'href':
# compose absolute URL if relative "href" found
url = urlparse.urljoin(self.url, value)
if self.pattern.match(url):
self.links.append(url)
def get_ahref_list(self, url, pattern):
self.reset(url, pattern)
self.feed(urllib2.urlopen(url).read())
self.close()
return self.links
class url_source(source):
"""
A simple URL based source that parses HTML to find references to
kernel files.
"""
_extension_pattern = re.compile(r'.*\.[^/.]+$')
def __init__(self, database, prefix):
super(url_source, self).__init__(database)
self.prefix = prefix
self.urls = []
def add_url(self, url, pattern):
"""
Add a URL path to a HTML document with links to kernel files.
@param url: URL path to a HTML file with links to kernel files
(can be either an absolute URL or one relative to self.prefix)
@param pattern: regex pattern to filter kernel files links out of
all othe links found in the HTML document
"""
# if it does not have an extension then it's a directory and it needs
# a trailing '/'. NOTE: there are some false positives such as
# directories named "v2.6" where ".6" will be assumed to be extension.
# In order for these to work the caller must provide a trailing /
if url[-1:] != '/' and not self._extension_pattern.match(url):
url = url + '/'
self.urls.append((url, re.compile(pattern)))
@staticmethod
def _get_item(url):
"""
Get a database.item object by fetching relevant HTTP information
from the document pointed to by the given url.
"""
try:
info = urllib2.urlopen(url).info()
except IOError, err:
# file is referenced but does not exist
print 'WARNING: %s' % err
return None
size = info.get('content-length')
if size:
size = int(size)
else:
size = -1
timestamp = int(time.mktime(info.getdate('date')))
if not timestamp:
timestamp = 0
return database.item(url, size, timestamp)
def get_new_files(self):
parser = _ahref_parser()
files = {}
for url, pattern in self.urls:
links = parser.get_ahref_list(urlparse.urljoin(self.prefix, url),
pattern)
for link in links:
item = self._get_item(link)
if item:
files[item.name] = item
return self._get_new_files(files)
class directory_source(source):
"""
Source that finds kernel files by listing the contents of a directory.
"""
def __init__(self, database, path):
"""
Initialize a directory_source instance.
@param database: Persistent database with known kernels information.
@param path: Path to the directory with the kernel files found by
this source.
"""
super(directory_source, self).__init__(database)
self._path = path
def get_new_files(self, _stat_func=os.stat):
"""
Main function, see source.get_new_files().
@param _stat_func: Used for unit testing, if we stub os.stat in the
unit test then unit test failures get reported confusingly
because the unit test framework tries to stat() the unit test
file.
"""
all_files = {}
for filename in os.listdir(self._path):
full_filename = os.path.join(self._path, filename)
try:
stat_data = _stat_func(full_filename)
except OSError:
# File might have been removed/renamed since we listed the
# directory so skip it.
continue
item = database.item(full_filename, stat_data.st_size,
int(stat_data.st_mtime))
all_files[filename] = item
return self._get_new_files(all_files)
|
gpl-2.0
|
ldtri0209/robotframework
|
src/robot/libdocpkg/javabuilder.py
|
6
|
4693
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot.errors import DataError
from robot import utils
from .model import LibraryDoc, KeywordDoc
class JavaDocBuilder(object):
def build(self, path):
doc = ClassDoc(path)
libdoc = LibraryDoc(name=doc.qualifiedName(),
doc=self._get_doc(doc),
version=self._get_version(doc),
scope=self._get_scope(doc),
named_args=False,
doc_format=self._get_doc_format(doc))
libdoc.inits = self._initializers(doc)
libdoc.keywords = self._keywords(doc)
return libdoc
def _get_doc(self, doc):
text = doc.getRawCommentText()
return '\n'.join(line.strip() for line in text.splitlines())
def _get_version(self, doc):
return self._get_attr(doc, 'VERSION')
def _get_scope(self, doc):
return self._get_attr(doc, 'SCOPE', default='TESTCASE', upper=True)
def _get_doc_format(self, doc):
return self._get_attr(doc, 'DOC_FORMAT', upper=True)
def _get_attr(self, doc, name, default='', upper=False):
name = 'ROBOT_LIBRARY_' + name
for field in doc.fields():
if field.name() == name and field.isPublic():
value = field.constantValue()
if upper:
value = utils.normalize(value, ignore='_').upper()
return value
return default
def _initializers(self, doc):
inits = [self._keyword_doc(init) for init in doc.constructors()]
if len(inits) == 1 and not inits[0].args:
return []
return inits
def _keywords(self, doc):
return [self._keyword_doc(m) for m in doc.methods()]
def _keyword_doc(self, method):
return KeywordDoc(
name=utils.printable_name(method.name(), code_style=True),
args=self._get_keyword_arguments(method),
doc=self._get_doc(method)
)
def _get_keyword_arguments(self, method):
params = method.parameters()
if not params:
return []
names = [p.name() for p in params]
if self._is_varargs(params[-1]):
names[-1] = '*' + names[-1]
elif self._is_kwargs(params[-1]):
names[-1] = '**' + names[-1]
if len(params) > 1 and self._is_varargs(params[-2]):
names[-2] = '*' + names[-2]
return names
def _is_varargs(self, param):
return (param.typeName().startswith('java.util.List')
or param.type().dimension() == '[]')
def _is_kwargs(self, param):
return param.typeName().startswith('java.util.Map')
def ClassDoc(path):
"""Process the given Java source file and return ClassDoc instance.
Processing is done using com.sun.tools.javadoc APIs. Returned object
implements com.sun.javadoc.ClassDoc interface:
http://docs.oracle.com/javase/7/docs/jdk/api/javadoc/doclet/
"""
try:
from com.sun.tools.javadoc import JavadocTool, Messager, ModifierFilter
from com.sun.tools.javac.util import List, Context
from com.sun.tools.javac.code.Flags import PUBLIC
except ImportError:
raise DataError("Creating documentation from Java source files "
"requires 'tools.jar' to be in CLASSPATH.")
context = Context()
Messager.preRegister(context, 'libdoc')
jdoctool = JavadocTool.make0(context)
filter = ModifierFilter(PUBLIC)
java_names = List.of(path)
if sys.platform[4:7] < '1.8': # API changed in Java 8
root = jdoctool.getRootDocImpl('en', 'utf-8', filter, java_names,
List.nil(), False, List.nil(),
List.nil(), False, False, True)
else:
root = jdoctool.getRootDocImpl('en', 'utf-8', filter, java_names,
List.nil(), List.nil(), False, List.nil(),
List.nil(), False, False, True)
return root.classes()[0]
|
apache-2.0
|
Kilhog/odoo
|
addons/hr_payroll/res_config.py
|
441
|
1294
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class human_resources_configuration(osv.osv_memory):
_inherit = 'hr.config.settings'
_columns = {
'module_hr_payroll_account': fields.boolean('Link your payroll to accounting system',
help ="""Create journal entries from payslips"""),
}
|
agpl-3.0
|
bjolivot/ansible
|
test/units/modules/packaging/os/test_apt.py
|
80
|
1500
|
import collections
import os
import sys
from ansible.compat.tests import mock
from ansible.compat.tests import unittest
try:
from ansible.modules.packaging.os.apt import (
expand_pkgspec_from_fnmatches,
)
except:
# Need some more module_utils work (porting urls.py) before we can test
# modules. So don't error out in this case.
if sys.version_info[0] >= 3:
pass
class AptExpandPkgspecTestCase(unittest.TestCase):
def setUp(self):
FakePackage = collections.namedtuple("Package", ("name",))
self.fake_cache = [ FakePackage("apt"),
FakePackage("apt-utils"),
FakePackage("not-selected"),
]
def test_trivial(self):
foo = ["apt"]
self.assertEqual(
expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
def test_version_wildcard(self):
foo = ["apt=1.0*"]
self.assertEqual(
expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
def test_pkgname_wildcard_version_wildcard(self):
foo = ["apt*=1.0*"]
m_mock = mock.Mock()
self.assertEqual(
expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
['apt', 'apt-utils'])
def test_pkgname_expands(self):
foo = ["apt*"]
m_mock = mock.Mock()
self.assertEqual(
expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
["apt", "apt-utils"])
|
gpl-3.0
|
icdishb/scikit-learn
|
sklearn/utils/tests/test_linear_assignment.py
|
421
|
1349
|
# Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
|
bsd-3-clause
|
YoQuieroSaber/votainteligente-portal-electoral
|
votainteligente/urls.py
|
1
|
1101
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
admin.autodiscover()
admin.site.site_header = getattr(settings, 'ADMIN_HEADER', 'Vota Inteligente')
urlpatterns = i18n_patterns('',
# Examples:
# url(r'^$', 'votainteligente.views.home', name='home'),
# url(r'^votainteligente/', include('votainteligente.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('elections.urls')),
('^pages/', include('django.contrib.flatpages.urls')),
(r'^tinymce/', include('tinymce.urls')),
(r'^newsletter/', include('newsletter.urls')),
)
from django.conf import settings
if settings.THEME:
urlpatterns += i18n_patterns('',
('^theme/', include('%s.urls' % (settings.THEME)))
)
|
gpl-3.0
|
jmcarp/django
|
django/views/decorators/csrf.py
|
586
|
2202
|
from functools import wraps
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import available_attrs, decorator_from_middleware
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
|
bsd-3-clause
|
putcn/Paddle
|
python/paddle/utils/predefined_net.py
|
18
|
14269
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from paddle.trainer.config_parser import *
from paddle.utils.preprocess_img import \
ImageClassificationDatasetCreater
from paddle.trainer_config_helpers import *
def image_data(data_dir,
processed_image_size,
overwrite=False,
color=True,
train_list="batches/train.list",
test_list="batches/test.list",
meta_file="batches/batches.meta",
use_jpeg=1):
"""
Predefined image data provider for image classification.
train_list: a text file containing a list of training batches.
test_list: a text file containing a list of test batches.
processed_image_size: all the input images will be resized into this size.
If the image is not square. Then the shorter edge will be resized into
this size, and the aspect ratio is kept the same.
color: whether the images are color or gray.
meta_path: the path of the meta file that stores the mean image file and
other dataset information, such as the size of images,
the size of the mean image, the number of classes.
async_load_data: whether to load image data asynchronuously.
"""
data_creator = ImageClassificationDatasetCreater(
data_dir, processed_image_size, color)
batch_data_dir = data_dir
train_list = os.path.join(batch_data_dir, train_list)
test_list = os.path.join(batch_data_dir, test_list)
meta_path = os.path.join(batch_data_dir, meta_file)
image_size = processed_image_size
conf = np.load(meta_path)
mean_image_size = conf["mean_image_size"]
is_color = conf["color"]
num_classes = conf["num_classes"]
color_string = "color" if is_color else "gray"
args = {
'meta': meta_path,
'mean_img_size': mean_image_size,
'img_size': image_size,
'num_classes': num_classes,
'use_jpeg': use_jpeg != 0,
'color': color_string
}
define_py_data_sources2(
train_list,
test_list,
module='image_provider',
obj='processData',
args=args)
return {
"image_size": image_size,
"num_classes": num_classes,
"is_color": is_color
}
def get_extra_layer_attr(drop_rate):
if drop_rate == 0:
return None
else:
return ExtraLayerAttribute(drop_rate=drop_rate)
def image_data_layers(image_size, num_classes, is_color=False,
is_predict=False):
"""
Data layers for image classification.
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
is_predict: whether the network is used for prediction.
"""
num_image_channels = 3 if is_color else 1
data_input = data_layer("input",
image_size * image_size * num_image_channels)
if is_predict:
return data_input, None, num_image_channels
else:
label_input = data_layer("label", 1)
return data_input, label_input, num_image_channels
def simple_conv_net(data_conf, is_color=False):
"""
A Wrapper for a simple network for MNIST digit recognition.
It contains two convolutional layers, one fully conencted layer, and
one softmax layer.
data_conf is a dictionary with the following keys:
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
"""
for k, v in data_conf.iteritems():
globals()[k] = v
data_input, label_input, num_image_channels = \
image_data_layers(image_size, num_classes, is_color, is_predict)
filter_sizes = [5, 5]
num_channels = [32, 64]
strides = [1, 1]
fc_dims = [500]
conv_bn_pool1 = img_conv_bn_pool(
name="g1",
input=data_input,
filter_size=filter_sizes[0],
num_channel=num_image_channels,
num_filters=num_channels[0],
conv_stride=1,
conv_padding=0,
pool_size=3,
pool_stride=2,
act=ReluActivation())
conv_bn_pool2 = img_conv_bn_pool(
name="g2",
input=conv_bn_pool1,
filter_size=filter_sizes[1],
num_channel=num_channels[0],
num_filters=num_channels[1],
conv_stride=1,
conv_padding=0,
pool_size=3,
pool_stride=2,
act=ReluActivation())
fc3 = fc_layer(
name="fc3", input=conv_bn_pool2, dim=fc_dims[0], act=ReluActivation())
fc3_dropped = dropout_layer(name="fc3_dropped", input=fc3, dropout_rate=0.5)
output = fc_layer(
name="output",
input=fc3_dropped,
dim=fc_dims[0],
act=SoftmaxActivation())
if is_predict:
end_of_network(output)
else:
cost = classify(name="cost", input=output, label=label_input)
end_of_network(cost)
def conv_layer_group(prefix_num,
num_layers,
input,
input_channels,
output_channels,
drop_rates=[],
strides=[],
with_bn=[]):
"""
A set of convolution layers, and batch normalization layers,
followed by one pooling layer.
It is utilized in VGG network for image classifcation.
prefix_num: the prefix number of the layer names.
For example, if prefix_num = 1, the first convolutioal layer's
name will be conv_1_1.
num_layers: number of the convolutional layers.
input: the name of the input layer.
input_channels: the number of channels of the input feature map.
output_channels: the number of channels of the output feature map.
drop_rates: the drop rates of the BN layers. It will be all zero by default.
strides: the stride of the convolution for the layers.
It will be all 1 by default.
with_bn: whether to use Batch Normalization for Conv layers.
By default, it is all false.
"""
if len(drop_rates) == 0: drop_rates = [0] * num_layers
if len(strides) == 0: strides = [1] * num_layers
if len(with_bn) == 0: with_bn = [False] * num_layers
assert (len(drop_rates) == num_layers)
assert (len(strides) == num_layers)
for i in range(1, num_layers + 1):
if i == 1:
i_conv_in = input
else:
i_conv_in = group_output
i_channels_conv = input_channels if i == 1 else output_channels
conv_act = LinearActivation() if with_bn[i - 1] else ReluActivation()
conv_output = img_conv_layer(
name="conv%d_%d" % (prefix_num, i),
input=i_conv_in,
filter_size=3,
num_channels=i_channels_conv,
num_filters=output_channels,
stride=strides[i - 1],
padding=1,
act=conv_act)
if with_bn[i - 1]:
bn = batch_norm_layer(
name="conv%d_%d_bn" % (prefix_num, i),
input=conv_output,
num_channels=output_channels,
act=ReluActivation(),
layer_attr=get_extra_layer_attr(drop_rate=drop_rates[i - 1]))
group_output = bn
else:
group_output = conv_output
pool = img_pool_layer(
name="pool%d" % prefix_num,
input=group_output,
pool_size=2,
num_channels=output_channels,
stride=2)
return pool
def vgg_conv_net(image_size,
num_classes,
num_layers,
channels,
strides,
with_bn,
fc_dims,
drop_rates,
drop_rates_fc=[],
is_color=True,
is_predict=False):
"""
A Wrapper for a VGG network for image classification.
It is a set of convolutional groups followed by several fully
connected layers, and a cross-entropy classifiation loss.
The detailed architecture of the paper can be found here:
Very Deep Convolutional Networks for Large-Scale Visual Recognition
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
image_size: image size.
num_classes: num of classes.
num_layers: the number of layers for all the convolution groups.
channels: the number of output filters for all the convolution groups.
with_bn: whether each layer of a convolution group is followed by a
batch normalization.
drop_rates: the dropout rates for all the convolutional layers.
fc_dims: the dimension for all the fully connected layers.
is_color: whether the input images are color.
"""
data_input, label_input, num_image_channels = \
image_data_layers(image_size, num_classes, is_color, is_predict)
assert (len(num_layers) == len(channels))
assert (len(num_layers) == len(strides))
assert (len(num_layers) == len(with_bn))
num_fc_layers = len(fc_dims)
assert (num_fc_layers + 1 == len(drop_rates_fc))
for i in range(len(num_layers)):
input_layer = data_input if i == 0 else group_output
input_channels = 3 if i == 0 else channels[i - 1]
group_output = conv_layer_group(
prefix_num=i + 1,
num_layers=num_layers[i],
input=input_layer,
input_channels=input_channels,
output_channels=channels[i],
drop_rates=drop_rates[i],
strides=strides[i],
with_bn=with_bn[i])
conv_output_name = group_output
if drop_rates_fc[0] != 0.0:
dropped_pool_name = "pool_dropped"
conv_output_name = dropout_layer(
name=dropped_pool_name,
input=conv_output_name,
dropout_rate=drop_rates_fc[0])
for i in range(len(fc_dims)):
input_layer_name = conv_output_name if i == 0 else fc_output
active_type = LinearActivation() if i == len(
fc_dims) - 1 else ReluActivation()
drop_rate = 0.0 if i == len(fc_dims) - 1 else drop_rates_fc[i + 1]
fc_output = fc_layer(
name="fc%d" % (i + 1),
input=input_layer_name,
size=fc_dims[i],
act=active_type,
layer_attr=get_extra_layer_attr(drop_rate))
bn = batch_norm_layer(
name="fc_bn",
input=fc_output,
num_channels=fc_dims[len(fc_dims) - 1],
act=ReluActivation(),
layer_attr=get_extra_layer_attr(drop_rate=drop_rates_fc[-1]))
output = fc_layer(
name="output", input=bn, size=num_classes, act=SoftmaxActivation())
if is_predict:
outputs(output)
else:
cost = classification_cost(name="cost", input=output, label=label_input)
outputs(cost)
def vgg16_conv_net(image_size, num_classes, is_color=True, is_predict=False):
"""
A Wrapper for a 16 layers VGG network for image classification.
The detailed architecture of the paper can be found here:
Very Deep Convolutional Networks for Large-Scale Visual Recognition
http://www.robots.ox.ac.uk/~vgg/research/very_deep/
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
"""
vgg_conv_net(image_size, num_classes,
num_layers=[2, 2, 3, 3, 3],
channels=[64, 128, 256, 512, 512],
strides=[[], [], [], [], []],
with_bn=[[False, True], [False, True], [False, False, True], \
[False, False, True], [False, False, True]],
drop_rates=[[]] * 5,
drop_rates_fc=[0.0, 0.5, 0.5],
fc_dims=[4096, 4096],
is_predict=is_predict)
def small_vgg(data_conf, is_predict=False):
"""
A Wrapper for a small VGG network for CIFAR-10 image classification.
The detailed architecture of the paper can be found here:
92.45% on CIFAR-10 in Torch
http://torch.ch/blog/2015/07/30/cifar.html
Due to the constraints of CuDNN, it only has four convolutional groups
rather than five.
Thus, it only achieves 91.2% test accuracy and 98.1% training accuracy.
data_conf is a dictionary with the following keys:
image_size: image size.
num_classes: num of classes.
is_color: whether the input images are color.
"""
for k, v in data_conf.iteritems():
globals()[k] = v
vgg_conv_net(image_size, num_classes,
num_layers=[2, 2, 3, 3],
channels=[64, 128, 256, 512],
strides=[[], [], [], []],
with_bn=[[True, True], [True, True], [True, True, True], \
[True, True, True]],
drop_rates=[[0.3, 0.0], [0.4, 0.0],
[0.4, 0.4, 0.0], [0.4, 0.4, 0.0]],
drop_rates_fc=[0.5, 0.5],
fc_dims=[512],
is_predict=is_predict)
def training_settings(learning_rate=0.1,
batch_size=128,
algorithm="sgd",
momentum=0.9,
decay_rate=0.001):
"""
Training settings.
learning_rate: learning rate of the training.
batch_size: the size of each training batch.
algorithm: training algorithm, can be
- sgd
- adagrad
- adadelta
- rmsprop
momentum: momentum of the training algorithm.
decay_rate: weight decay rate.
"""
Settings(
algorithm=algorithm,
batch_size=batch_size,
learning_rate=learning_rate / float(batch_size))
default_momentum(momentum)
default_decay_rate(decay_rate * batch_size)
|
apache-2.0
|
great-expectations/great_expectations
|
great_expectations/expectations/core/expect_table_row_count_to_equal.py
|
1
|
6245
|
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...data_asset.util import parse_result_format
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import InvalidExpectationConfigurationError, TableExpectation
class ExpectTableRowCountToEqual(TableExpectation):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.validator.validator.Validator.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "table expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
}
metric_dependencies = ("table.row_count",)
success_keys = ("value",)
default_kwarg_values = {
"value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
value = configuration.kwargs.get("value")
try:
assert value is not None, "An expected row count must be provided"
if not isinstance(value, (int, dict)):
raise ValueError("Provided row count must be an integer")
if isinstance(value, dict):
assert (
"$PARAMETER" in value
), 'Evaluation Parameter dict for value kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["value", "row_condition", "condition_parser"],
)
template_str = "Must have exactly $value rows."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = (
conditional_template_str
+ ", then "
+ template_str[0].lower()
+ template_str[1:]
)
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
expected_table_row_count = self.get_success_kwargs().get("value")
actual_table_row_count = metrics.get("table.row_count")
return {
"success": actual_table_row_count == expected_table_row_count,
"result": {"observed_value": actual_table_row_count},
}
|
apache-2.0
|
wkritzinger/asuswrt-merlin
|
release/src/router/samba36/lib/testtools/testtools/tests/test_helpers.py
|
20
|
3580
|
# Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
from testtools import TestCase
from testtools.helpers import (
try_import,
try_imports,
)
from testtools.matchers import (
Equals,
Is,
)
class TestTryImport(TestCase):
def test_doesnt_exist(self):
# try_import('thing', foo) returns foo if 'thing' doesn't exist.
marker = object()
result = try_import('doesntexist', marker)
self.assertThat(result, Is(marker))
def test_None_is_default_alternative(self):
# try_import('thing') returns None if 'thing' doesn't exist.
result = try_import('doesntexist')
self.assertThat(result, Is(None))
def test_existing_module(self):
# try_import('thing', foo) imports 'thing' and returns it if it's a
# module that exists.
result = try_import('os', object())
import os
self.assertThat(result, Is(os))
def test_existing_submodule(self):
# try_import('thing.another', foo) imports 'thing' and returns it if
# it's a module that exists.
result = try_import('os.path', object())
import os
self.assertThat(result, Is(os.path))
def test_nonexistent_submodule(self):
# try_import('thing.another', foo) imports 'thing' and returns foo if
# 'another' doesn't exist.
marker = object()
result = try_import('os.doesntexist', marker)
self.assertThat(result, Is(marker))
def test_object_from_module(self):
# try_import('thing.object') imports 'thing' and returns
# 'thing.object' if 'thing' is a module and 'object' is not.
result = try_import('os.path.join')
import os
self.assertThat(result, Is(os.path.join))
class TestTryImports(TestCase):
def test_doesnt_exist(self):
# try_imports('thing', foo) returns foo if 'thing' doesn't exist.
marker = object()
result = try_imports(['doesntexist'], marker)
self.assertThat(result, Is(marker))
def test_fallback(self):
result = try_imports(['doesntexist', 'os'])
import os
self.assertThat(result, Is(os))
def test_None_is_default_alternative(self):
# try_imports('thing') returns None if 'thing' doesn't exist.
e = self.assertRaises(
ImportError, try_imports, ['doesntexist', 'noreally'])
self.assertThat(
str(e),
Equals("Could not import any of: doesntexist, noreally"))
def test_existing_module(self):
# try_imports('thing', foo) imports 'thing' and returns it if it's a
# module that exists.
result = try_imports(['os'], object())
import os
self.assertThat(result, Is(os))
def test_existing_submodule(self):
# try_imports('thing.another', foo) imports 'thing' and returns it if
# it's a module that exists.
result = try_imports(['os.path'], object())
import os
self.assertThat(result, Is(os.path))
def test_nonexistent_submodule(self):
# try_imports('thing.another', foo) imports 'thing' and returns foo if
# 'another' doesn't exist.
marker = object()
result = try_imports(['os.doesntexist'], marker)
self.assertThat(result, Is(marker))
def test_fallback_submodule(self):
result = try_imports(['os.doesntexist', 'os.path'])
import os
self.assertThat(result, Is(os.path))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
gpl-2.0
|
pbstark/permute
|
permute/data/__init__.py
|
1
|
3720
|
"""Standard test data.
For more information, see
- http://www.wiley.com/legacy/wileychi/pesarin/material.html
"""
import os as _os
import numpy as np
from .. import data_dir
__all__ = ['load',
'kenya', ]
def load(f):
"""Load a data file located in the data directory.
Parameters
----------
f : string
File name.
Returns
-------
x : ndarray (or Pandas' frame?)
Data loaded from permute.data_dir.
"""
return np.recfromcsv(_os.path.join(data_dir, f), delimiter=",")
def nsgk():
"""NSGK test data for irr.
"""
nz = np.loadtxt(_os.path.join(data_dir, "nsgk.csv"),
delimiter=',', skiprows=1, dtype=np.int)
shape = tuple(nz.max(axis=0))
x = np.zeros(shape, dtype=np.int)
nz -= 1
for r in nz:
x[tuple(r)] = 1
# given order: time_stamp,domain,video,rater
# desired order: domain,video,rater,time_stamp
x = x.transpose(1, 2, 3, 0)
# hardcoding the number of timestamps per video
time_stamps = [36, 32, 35, 37, 31, 35, 40, 32]
p1 = [[m[:, :time_stamps[i]] for i, m in enumerate(n)]for n in x]
## Alternatively, I could return a 2D object array with
## rater x time_stamp(video) matrices as entries
## Not sure which is better, so I will wait to see how I use it.
# p1 = np.zeros(x.shape[:2], dtype=object)
# for i, n in enumerate(x):
# for j, m in enumerate(n):
# p1[i, j] = m
return p1
def macnell2014():
"""Data from MacNell et alrom MacNell et alrom MacNell et al. 2014
.. Lillian MacNell, Adam Driscoll, and Andrea N Hunt, "What's
in a Name: Exposing Gender Bias in Student Ratings of Teaching,"
Innovative Higher Education, pp. 1-13, 2014.
"""
return load("MacNell2014.csv")
# def another_poss():
# nz = np.loadtxt(_os.path.join(data_dir, "nsgk.csv"),
# delimiter=',', skiprows=1, dtype=np.int)
# _, nd, nv, nr = tuple(nz.max(axis=0))
# dv = np.zeros((nd, nv), dtype=object)
# time_stamps = [36, 32, 35, 37, 31, 35, 40, 32]
# for n in range(nd):
# for v in range(nv):
# dv[n, v] = np.zeros((nr, time_stamps[v]), dtype=np.int)
# nz -= 1
# for _ts, _d, _v, _r in nz:
# dv[_d, _v][_r, _ts] = 1
#
def botulinum():
"""The
"""
return load(_os.path.join("npc", "botulinum.csv"))
def chrom17m():
"""The
"""
return load(_os.path.join("npc", "chrom17m.csv"))
def confocal():
"""The
"""
return load(_os.path.join("npc", "confocal.csv"))
def germina():
"""The
"""
return load(_os.path.join("npc", "germina.csv"))
def kenya():
"""The Kenya dataset contains 16 observations and two variables in total.
It concerns an anthropological study on the "Ol Molo" and "Kamba"
populations.
"""
return load(_os.path.join("npc", "kenya.csv"))
def massaro_blair():
"""The
"""
return load(_os.path.join("npc", "massaro_blair.csv"))
def monachus():
"""The
"""
return load(_os.path.join("npc", "monachus.csv"))
def mult():
"""The
"""
return load(_os.path.join("npc", "mult.csv"))
def perch():
"""The
"""
return load(_os.path.join("npc", "perch.csv"))
def rats():
"""The
"""
return load(_os.path.join("npc", "rats.csv"))
def setig():
"""The
"""
return load(_os.path.join("npc", "setig.csv"))
def urology():
"""The
"""
return load(_os.path.join("npc", "urology.csv"))
def washing_test():
"""The
"""
return load(_os.path.join("npc", "washing_test.csv"))
def waterfalls():
"""The
"""
return load(_os.path.join("npc", "waterfalls.csv"))
|
bsd-2-clause
|
HyperBaton/ansible
|
lib/ansible/modules/network/junos/junos_command.py
|
21
|
14489
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_command
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Run arbitrary commands on an Juniper JUNOS device
description:
- Sends an arbitrary set of commands to an JUNOS node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
extends_documentation_fragment: junos
options:
commands:
description:
- The commands to send to the remote junos device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of I(retries) has been exceeded.
rpcs:
description:
- The C(rpcs) argument accepts a list of RPCs to be executed
over a netconf session and the results from the RPC execution
is return to the playbook via the modules results dictionary.
version_added: "2.3"
wait_for:
description:
- Specifies what to evaluate from the output of the command
and what conditionals to apply. This argument will cause
the task to wait for a particular conditional to be true
before moving forward. If the conditional is not true
by the configured retries, the task fails. See examples.
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the I(wait_for) must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the I(wait_for)
conditionals.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditional, the interval indicates how to long to wait before
trying the command again.
default: 1
display:
description:
- Encoding scheme to use when serializing output from the device.
This handles how to properly understand the output and apply the
conditionals path to the result set. For I(rpcs) argument default
display is C(xml) and for I(commands) argument default display
is C(text). Value C(set) is applicable only for fetching configuration
from device.
default: depends on input argument I(rpcs) or I(commands)
aliases: ['format', 'output']
choices: ['text', 'json', 'xml', 'set']
version_added: "2.3"
requirements:
- jxmlease
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(network_cli) connections and with C(local) connections for legacy playbooks.
"""
EXAMPLES = """
- name: run show version on remote devices
junos_command:
commands: show version
- name: run show version and check to see if output contains Juniper
junos_command:
commands: show version
wait_for: result[0] contains Juniper
- name: run multiple commands on remote nodes
junos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
junos_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Juniper
- result[1] contains Loopback0
- name: run commands and specify the output format
junos_command:
commands: show version
display: json
- name: run rpc on the remote device
junos_command:
commands: show configuration
display: set
- name: run rpc on the remote device
junos_command:
rpcs: get-software-information
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
output:
description: The set of transformed xml to json format from the commands responses
returned: If the I(display) is in C(xml) format.
type: list
sample: ['...', '...']
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import re
import shlex
import time
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.network.common.netconf import exec_rpc
from ansible.module_utils.network.junos.junos import junos_argument_spec, get_configuration, get_connection, get_capabilities, tostring
from ansible.module_utils.network.common.parsing import Conditional, FailedConditionalError
from ansible.module_utils.network.common.utils import to_lines
from ansible.module_utils.six import iteritems
try:
from lxml.etree import Element, SubElement
except ImportError:
from xml.etree.ElementTree import Element, SubElement
try:
import jxmlease
HAS_JXMLEASE = True
except ImportError:
HAS_JXMLEASE = False
USE_PERSISTENT_CONNECTION = True
def rpc(module, items):
responses = list()
for item in items:
name = item['name']
xattrs = item['xattrs']
fetch_config = False
args = item.get('args')
text = item.get('text')
name = str(name).replace('_', '-')
if all((module.check_mode, not name.startswith('get'))):
module.fail_json(msg='invalid rpc for running in check_mode')
if name == 'command' and text.startswith('show configuration') or name == 'get-configuration':
fetch_config = True
element = Element(name, xattrs)
if text:
element.text = text
elif args:
for key, value in iteritems(args):
key = str(key).replace('_', '-')
if isinstance(value, list):
for item in value:
child = SubElement(element, key)
if item is not True:
child.text = item
else:
child = SubElement(element, key)
if value is not True:
child.text = value
if fetch_config:
reply = get_configuration(module, format=xattrs['format'])
else:
reply = exec_rpc(module, tostring(element), ignore_warning=False)
if xattrs['format'] == 'text':
if fetch_config:
data = reply.find('.//configuration-text')
else:
data = reply.find('.//output')
if data is None:
module.fail_json(msg=tostring(reply))
responses.append(data.text.strip())
elif xattrs['format'] == 'json':
responses.append(module.from_json(reply.text.strip()))
elif xattrs['format'] == 'set':
data = reply.find('.//configuration-set')
if data is None:
module.fail_json(msg="Display format 'set' is not supported by remote device.")
responses.append(data.text.strip())
else:
responses.append(tostring(reply))
return responses
def split(value):
lex = shlex.shlex(value)
lex.quotes = '"'
lex.whitespace_split = True
lex.commenters = ''
return list(lex)
def parse_rpcs(module):
items = list()
for rpc in (module.params['rpcs'] or list()):
parts = shlex.split(rpc)
name = parts.pop(0)
args = dict()
for item in parts:
key, value = item.split('=')
if str(value).upper() in ['TRUE', 'FALSE']:
args[key] = bool(value)
elif re.match(r'^[0-9]+$', value):
args[key] = int(value)
else:
args[key] = str(value)
display = module.params['display'] or 'xml'
if display == 'set' and rpc != 'get-configuration':
module.fail_json(msg="Invalid display option '%s' given for rpc '%s'" % ('set', name))
xattrs = {'format': display}
items.append({'name': name, 'args': args, 'xattrs': xattrs})
return items
def parse_commands(module, warnings):
items = list()
for command in (module.params['commands'] or list()):
if module.check_mode and not command.startswith('show'):
warnings.append(
'Only show commands are supported when using check_mode, not '
'executing %s' % command
)
continue
parts = command.split('|')
text = parts[0]
display = module.params['display'] or 'text'
if '| display json' in command:
display = 'json'
elif '| display xml' in command:
display = 'xml'
if display == 'set' or '| display set' in command:
if command.startswith('show configuration'):
display = 'set'
else:
module.fail_json(msg="Invalid display option '%s' given for command '%s'" % ('set', command))
xattrs = {'format': display}
items.append({'name': 'command', 'xattrs': xattrs, 'text': text})
return items
def main():
"""entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list'),
rpcs=dict(type='list'),
display=dict(choices=['text', 'json', 'xml', 'set'], aliases=['format', 'output']),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(junos_argument_spec)
required_one_of = [('commands', 'rpcs')]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
supports_check_mode=True)
warnings = list()
conn = get_connection(module)
capabilities = get_capabilities(module)
if capabilities.get('network_api') == 'cliconf':
if any((module.params['wait_for'], module.params['match'], module.params['rpcs'])):
module.warn('arguments wait_for, match, rpcs are not supported when using transport=cli')
commands = module.params['commands']
output = list()
display = module.params['display']
for cmd in commands:
# if display format is not mentioned in command, add the display format
# from the modules params
if ('display json' not in cmd) and ('display xml' not in cmd):
if display and display != 'text':
cmd += ' | display {0}'.format(display)
try:
output.append(conn.get(command=cmd))
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
lines = [out.split('\n') for out in output]
result = {'changed': False, 'stdout': output, 'stdout_lines': lines}
module.exit_json(**result)
items = list()
items.extend(parse_commands(module, warnings))
items.extend(parse_rpcs(module))
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = rpc(module, items)
transformed = list()
output = list()
for item, resp in zip(items, responses):
if item['xattrs']['format'] == 'xml':
if not HAS_JXMLEASE:
module.fail_json(msg='jxmlease is required but does not appear to be installed. '
'It can be installed using `pip install jxmlease`')
try:
json_resp = jxmlease.parse(resp)
transformed.append(json_resp)
output.append(json_resp)
except Exception:
raise ValueError(resp)
else:
transformed.append(resp)
for item in list(conditionals):
try:
if item(transformed):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
except FailedConditionalError:
pass
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'warnings': warnings,
'stdout': responses,
'stdout_lines': list(to_lines(responses)),
}
if output:
result['output'] = output
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ascott1/regulations-parser
|
regparser/layer/formatting.py
|
3
|
5046
|
import re
from regparser.layer.layer import Layer
from regparser.tree import struct
from regparser.tree.priority_stack import PriorityStack
from regparser.tree.xml_parser import tree_utils
class HeaderStack(PriorityStack):
"""Used to determine Table Headers -- indeed, they are complicated
enough to warrant their own stack"""
def unwind(self):
children = [pair[1] for pair in self.pop()]
self.peek_last()[1].children = children
class TableHeaderNode(object):
"""Represents a cell in a table's header"""
def __init__(self, text, level):
self.text = text
self.level = level
self.children = []
def height(self):
child_heights = [0] + [c.height() for c in self.children]
return 1 + max(child_heights)
def width(self):
if not self.children:
return 1
return sum(c.width() for c in self.children)
def build_header(xml_nodes):
"""Builds a TableHeaderNode tree, with an empty root. Each node in the tree
includes its colspan/rowspan"""
stack = HeaderStack()
stack.add(0, TableHeaderNode(None, 0)) # Root
for xml_node in xml_nodes:
level = int(xml_node.attrib['H'])
text = tree_utils.get_node_text(xml_node, add_spaces=True).strip()
stack.add(level, TableHeaderNode(text, level))
while stack.size() > 1:
stack.unwind()
root = stack.m_stack[0][0][1]
max_height = root.height()
def set_rowspan(n):
n.rowspan = max_height - n.height() - n.level + 1
struct.walk(root, set_rowspan)
def set_colspan(n):
n.colspan = n.width()
struct.walk(root, set_colspan)
return root
def table_xml_to_plaintext(xml_node):
"""Markdown representation of a table. Note that this doesn't account
for all the options needed to display the table properly, but works fine
for simple tables. This gets included in the reg plain text"""
header = [tree_utils.get_node_text(hd, add_spaces=True).strip()
for hd in xml_node.xpath('./BOXHD/CHED')]
divider = ['---']*len(header)
rows = []
for tr in xml_node.xpath('./ROW'):
rows.append([tree_utils.get_node_text(td, add_spaces=True).strip()
for td in tr.xpath('./ENT')])
table = []
for row in [header] + [divider] + rows:
table.append('|' + '|'.join(row) + '|')
return '\n'.join(table)
def table_xml_to_data(xml_node):
"""Construct a data structure of the table data. We provide a different
structure than the native XML as the XML encodes too much logic. This
structure can be used to generate semi-complex tables which could not be
generated from the markdown above"""
header_root = build_header(xml_node.xpath('./BOXHD/CHED'))
header = [[] for _ in range(header_root.height())]
def per_node(node):
header[node.level].append({'text': node.text,
'colspan': node.colspan,
'rowspan': node.rowspan})
struct.walk(header_root, per_node)
header = header[1:] # skip the root
rows = []
for row in xml_node.xpath('./ROW'):
rows.append([tree_utils.get_node_text(td, add_spaces=True).strip()
for td in row.xpath('./ENT')])
return {'header': header, 'rows': rows}
class Formatting(Layer):
fenced_re = re.compile(r"```(?P<type>[a-zA-Z0-9 ]+)\w*\n"
+ r"(?P<lines>([^\n]*\n)+)"
+ r"```")
subscript_re = re.compile(r"([a-zA-Z0-9]+)_\{(\w+)\}")
def process(self, node):
layer_el = []
if node.source_xml is not None:
if node.source_xml.tag == 'GPOTABLE':
tables = [node.source_xml]
else:
tables = []
tables.extend(node.source_xml.xpath('.//GPOTABLE'))
for table in tables:
layer_el.append({'text': table_xml_to_plaintext(table),
'locations': [0],
'table_data': table_xml_to_data(table)})
for match in Formatting.fenced_re.finditer(node.text):
layer_el.append({
'text': node.text[match.start():match.end()],
'locations': [0],
'fence_data': {
'type': match.group('type'),
'lines': filter(bool, match.group('lines').split("\n"))}})
subscripts = {}
for match in Formatting.subscript_re.finditer(node.text):
key = (match.group(1), match.group(2))
subscripts[key] = subscripts.get(key, 0) + 1
for key, count in subscripts.iteritems():
variable, subscript = key
layer_el.append({
'text': variable + '_{' + subscript + '}',
'locations': list(range(count)),
'subscript_data': {'variable': variable,
'subscript': subscript}})
if layer_el:
return layer_el
|
cc0-1.0
|
Intel-tensorflow/tensorflow
|
tensorflow/python/training/warm_starting_util.py
|
14
|
23762
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to warm-start TF.Learn Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.VocabInfo"])
class VocabInfo(
collections.namedtuple("VocabInfo", [
"new_vocab",
"new_vocab_size",
"num_oov_buckets",
"old_vocab",
"old_vocab_size",
"backup_initializer",
"axis",
])):
"""Vocabulary information for warm-starting.
See `tf.estimator.WarmStartSettings` for examples of using
VocabInfo to warm-start.
Args:
new_vocab: [Required] A path to the new vocabulary file (used with the model
to be trained).
new_vocab_size: [Required] An integer indicating how many entries of the new
vocabulary will used in training.
num_oov_buckets: [Required] An integer indicating how many OOV buckets are
associated with the vocabulary.
old_vocab: [Required] A path to the old vocabulary file (used with the
checkpoint to be warm-started from).
old_vocab_size: [Optional] An integer indicating how many entries of the old
vocabulary were used in the creation of the checkpoint. If not provided,
the entire old vocabulary will be used.
backup_initializer: [Optional] A variable initializer used for variables
corresponding to new vocabulary entries and OOV. If not provided, these
entries will be zero-initialized.
axis: [Optional] Denotes what axis the vocabulary corresponds to. The
default, 0, corresponds to the most common use case (embeddings or
linear weights for binary classification / regression). An axis of 1
could be used for warm-starting output layers with class vocabularies.
Returns:
A `VocabInfo` which represents the vocabulary information for warm-starting.
Raises:
ValueError: `axis` is neither 0 or 1.
Example Usage:
```python
embeddings_vocab_info = tf.VocabInfo(
new_vocab='embeddings_vocab',
new_vocab_size=100,
num_oov_buckets=1,
old_vocab='pretrained_embeddings_vocab',
old_vocab_size=10000,
backup_initializer=tf.compat.v1.truncated_normal_initializer(
mean=0.0, stddev=(1 / math.sqrt(embedding_dim))),
axis=0)
softmax_output_layer_kernel_vocab_info = tf.VocabInfo(
new_vocab='class_vocab',
new_vocab_size=5,
num_oov_buckets=0, # No OOV for classes.
old_vocab='old_class_vocab',
old_vocab_size=8,
backup_initializer=tf.compat.v1.glorot_uniform_initializer(),
axis=1)
softmax_output_layer_bias_vocab_info = tf.VocabInfo(
new_vocab='class_vocab',
new_vocab_size=5,
num_oov_buckets=0, # No OOV for classes.
old_vocab='old_class_vocab',
old_vocab_size=8,
backup_initializer=tf.compat.v1.zeros_initializer(),
axis=0)
#Currently, only axis=0 and axis=1 are supported.
```
"""
def __new__(cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size=-1,
backup_initializer=None,
axis=0):
if axis != 0 and axis != 1:
raise ValueError("The only supported values for the axis argument are 0 "
"and 1. Provided axis: {}".format(axis))
return super(VocabInfo, cls).__new__(
cls,
new_vocab,
new_vocab_size,
num_oov_buckets,
old_vocab,
old_vocab_size,
backup_initializer,
axis,
)
def _infer_var_name(var):
"""Returns name of the `var`.
Args:
var: A list. The list can contain either of the following:
(i) A single `Variable`
(ii) A single `ResourceVariable`
(iii) Multiple `Variable` objects which must be slices of the same larger
variable.
(iv) A single `PartitionedVariable`
Returns:
Name of the `var`
"""
name_to_var_dict = saveable_object_util.op_list_to_dict(var)
if len(name_to_var_dict) > 1:
raise TypeError("`var` = %s passed as arg violates the constraints. "
"name_to_var_dict = %s" % (var, name_to_var_dict))
return list(name_to_var_dict.keys())[0]
def _get_var_info(var, prev_tensor_name=None):
"""Helper method for standarizing Variable and naming.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following: (i) `Variable` (ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable. (iv) `PartitionedVariable`
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
Returns:
A tuple of the Tensor name and var.
"""
if checkpoint_utils._is_variable(var): # pylint: disable=protected-access
current_var_name = _infer_var_name([var])
elif (isinstance(var, list) and
all(checkpoint_utils._is_variable(v) for v in var)): # pylint: disable=protected-access
current_var_name = _infer_var_name(var)
elif isinstance(var, variables_lib.PartitionedVariable):
current_var_name = _infer_var_name([var])
var = var._get_variable_list() # pylint: disable=protected-access
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = current_var_name
return prev_tensor_name, var
# pylint: disable=protected-access
# Accesses protected members of tf.Variable to reset the variable's internal
# state.
def _warm_start_var_with_vocab(var,
current_vocab_path,
current_vocab_size,
prev_ckpt,
prev_vocab_path,
previous_vocab_size=-1,
current_oov_buckets=0,
prev_tensor_name=None,
initializer=None,
axis=0):
"""Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.
Use this method when the `var` is backed by vocabulary. This method stitches
the given `var` such that values corresponding to individual features in the
vocabulary remain consistent irrespective of changing order of the features
between old and new vocabularies.
Args:
var: Current graph's variable that needs to be warm-started (initialized).
Can be either of the following:
(i) `Variable`
(ii) `ResourceVariable`
(iii) list of `Variable`: The list must contain slices of the same larger
variable.
(iv) `PartitionedVariable`
current_vocab_path: Path to the vocab file used for the given `var`.
current_vocab_size: An `int` specifying the number of entries in the current
vocab.
prev_ckpt: A string specifying the directory with checkpoint file(s) or path
to checkpoint. The given checkpoint must have tensor with name
`prev_tensor_name` (if not None) or tensor with name same as given `var`.
prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.
previous_vocab_size: If provided, will constrain previous vocab to the first
`previous_vocab_size` entries. -1 means use the entire previous vocab.
current_oov_buckets: An `int` specifying the number of out-of-vocabulary
buckets used for given `var`.
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
initializer: Variable initializer to be used for missing entries. If None,
missing entries will be zero-initialized.
axis: Axis of the variable that the provided vocabulary corresponds to.
Raises:
ValueError: If required args are not provided.
"""
if not (current_vocab_path and current_vocab_size and prev_ckpt and
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
if checkpoint_utils._is_variable(var):
var = [var]
elif (isinstance(var, list) and
all(checkpoint_utils._is_variable(v) for v in var)):
var = var
elif isinstance(var, variables_lib.PartitionedVariable):
var = var._get_variable_list()
else:
raise TypeError(
"var MUST be one of the following: a Variable, list of Variable or "
"PartitionedVariable, but is {}".format(type(var)))
if not prev_tensor_name:
# Assume tensor name remains the same.
prev_tensor_name = _infer_var_name(var)
total_v_first_axis = sum(v.get_shape().as_list()[0] for v in var)
for v in var:
v_shape = v.get_shape().as_list()
slice_info = v._get_save_slice_info()
partition_info = None
if slice_info:
partition_info = variable_scope._PartitionInfo(
full_shape=slice_info.full_shape, var_offset=slice_info.var_offset)
if axis == 0:
new_row_vocab_size = current_vocab_size
new_col_vocab_size = v_shape[1]
old_row_vocab_size = previous_vocab_size
old_row_vocab_file = prev_vocab_path
new_row_vocab_file = current_vocab_path
old_col_vocab_file = None
new_col_vocab_file = None
num_row_oov_buckets = current_oov_buckets
num_col_oov_buckets = 0
elif axis == 1:
# Note that we must compute this value across all partitions, whereas
# in the axis = 0 case, we can simply use v_shape[1] because we don't
# allow partitioning across axis = 1.
new_row_vocab_size = total_v_first_axis
new_col_vocab_size = current_vocab_size
old_row_vocab_size = -1
old_row_vocab_file = None
new_row_vocab_file = None
old_col_vocab_file = prev_vocab_path
new_col_vocab_file = current_vocab_path
num_row_oov_buckets = 0
num_col_oov_buckets = current_oov_buckets
else:
raise ValueError("The only supported values for the axis argument are 0 "
"and 1. Provided axis: {}".format(axis))
init = checkpoint_ops._load_and_remap_matrix_initializer(
ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt),
old_tensor_name=prev_tensor_name,
new_row_vocab_size=new_row_vocab_size,
new_col_vocab_size=new_col_vocab_size,
old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=num_row_oov_buckets,
num_col_oov_buckets=num_col_oov_buckets,
initializer=initializer)
new_init_val = ops.convert_to_tensor(
init(shape=v_shape, partition_info=partition_info))
v._initializer_op = state_ops.assign(v, new_init_val)
# pylint: enable=protected-access
def _get_grouped_variables(vars_to_warm_start):
"""Collects and groups (possibly partitioned) variables into a dictionary.
The variables can be provided explicitly through vars_to_warm_start, or they
are retrieved from collections (see below).
Args:
vars_to_warm_start: One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.compat.v1.get_collection). This expression will
only consider variables in the TRAINABLE_VARIABLES collection.
- A list of strings, each representing a full variable name to warm-start.
These will consider variables in GLOBAL_VARIABLES collection.
- A list of Variables to warm-start.
- `None`, in which case all variables in TRAINABLE_VARIABLES will be used.
Returns:
A dictionary mapping variable names (strings) to lists of Variables.
Raises:
ValueError: If vars_to_warm_start is not a string, `None`, a list of
`Variables`, or a list of strings.
"""
# TODO(b/143899805): Remove unicode checks when deprecating Python2.
if isinstance(vars_to_warm_start,
six.string_types) or vars_to_warm_start is None:
# Both vars_to_warm_start = '.*' and vars_to_warm_start = None will match
# everything (in TRAINABLE_VARIABLES) here.
logging.info("Warm-starting variables only in TRAINABLE_VARIABLES.")
list_of_vars = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=vars_to_warm_start)
elif isinstance(vars_to_warm_start, list):
if all(isinstance(v, six.string_types) for v in vars_to_warm_start):
list_of_vars = []
for v in vars_to_warm_start:
list_of_vars += ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES, scope=v)
elif all(checkpoint_utils._is_variable(v) for v in vars_to_warm_start): # pylint: disable=protected-access
list_of_vars = vars_to_warm_start
else:
raise ValueError("If `vars_to_warm_start` is a list, it must be all "
"`Variable` or all `str`. Given types are {}".format(
[type(v) for v in vars_to_warm_start]))
else:
raise ValueError("`vars_to_warm_start must be a `list` or `str`. Given "
"type is {}".format(type(vars_to_warm_start)))
# We have to deal with partitioned variables, since get_collection flattens
# out the list.
grouped_variables = {}
for v in list_of_vars:
t = [v] if not isinstance(v, list) else v
var_name = _infer_var_name(t)
grouped_variables.setdefault(var_name, []).append(v)
return grouped_variables
def _get_object_checkpoint_renames(path, variable_names):
"""Returns a dictionary mapping variable names to checkpoint keys.
The warm-starting utility expects variable names to match with the variable
names in the checkpoint. For object-based checkpoints, the variable names
and names in the checkpoint are different. Thus, for object-based checkpoints,
this function is used to obtain the map from variable names to checkpoint
keys.
Args:
path: path to checkpoint directory or file.
variable_names: list of variable names to load from the checkpoint.
Returns:
If the checkpoint is object-based, this function returns a map from variable
names to their corresponding checkpoint keys.
If the checkpoint is name-based, this returns an empty dict.
Raises:
ValueError: If the object-based checkpoint is missing variables.
"""
fname = checkpoint_utils._get_checkpoint_filename(path) # pylint: disable=protected-access
try:
names_to_keys = saver_lib.object_graph_key_mapping(fname)
except errors.NotFoundError:
# If an error is raised from `object_graph_key_mapping`, then the
# checkpoint is name-based. There are no renames, so return an empty dict.
return {}
missing_names = set(variable_names) - set(names_to_keys.keys())
if missing_names:
raise ValueError(
"Attempting to warm-start from an object-based checkpoint, but found "
"that the checkpoint did not contain values for all variables. The "
"following variables were missing: {}"
.format(missing_names))
return {name: names_to_keys[name] for name in variable_names}
@tf_export(v1=["train.warm_start"])
def warm_start(ckpt_to_initialize_from,
vars_to_warm_start=".*",
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
"""Warm-starts a model using the given settings.
If you are using a tf.estimator.Estimator, this will automatically be called
during training.
Args:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.compat.v1.get_collection). This expression will only
consider variables in the TRAINABLE_VARIABLES collection -- if you need
to warm-start non_TRAINABLE vars (such as optimizer accumulators or
batch norm statistics), please use the below option.
- A list of strings, each a regex scope provided to
tf.compat.v1.get_collection with GLOBAL_VARIABLES (please see
tf.compat.v1.get_collection). For backwards compatibility reasons,
this is separate from the single-string argument type.
- A list of Variables to warm-start. If you do not have access to the
`Variable` objects at the call site, please use the above option.
- `None`, in which case only TRAINABLE variables specified in
`var_name_to_vocab_info` will be warm-started.
Defaults to `'.*'`, which warm-starts all variables in the
TRAINABLE_VARIABLES collection. Note that this excludes variables such
as accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
is assumed to have no (changes to) vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model. Note that this has no
effect on the set of variables that is warm-started, and only controls
name mapping (use `vars_to_warm_start` for controlling what variables to
warm-start).
Raises:
ValueError: If the WarmStartSettings contains prev_var_name or VocabInfo
configuration for variable names that are not used. This is to ensure
a stronger check for variable configuration than relying on users to
examine the logs.
"""
logging.info("Warm-starting from: {}".format(ckpt_to_initialize_from))
grouped_variables = _get_grouped_variables(vars_to_warm_start)
if var_name_to_vocab_info is None:
var_name_to_vocab_info = {}
if not var_name_to_prev_var_name:
# Detect whether the checkpoint is object-based, in which case the
# var_name_to_prev_var_name dictionary should map variable names to
# checkpoint keys. If the user has specified var_name_to_prev_var_name, we
# do not override it.
var_name_to_prev_var_name = _get_object_checkpoint_renames(
ckpt_to_initialize_from, grouped_variables.keys())
warmstarted_count = 0
# Keep track of which var_names in var_name_to_prev_var_name and
# var_name_to_vocab_info have been used. Err on the safer side by throwing an
# exception if any are unused by the end of the loop. It is easy to misname
# a variable during this configuration, in which case without this check, we
# would fail to warm-start silently.
prev_var_name_used = set()
vocab_info_used = set()
# Group the vocabless vars into one call to init_from_checkpoint.
vocabless_vars = {}
for var_name, variable in six.iteritems(grouped_variables):
prev_var_name = var_name_to_prev_var_name.get(var_name)
if prev_var_name:
prev_var_name_used.add(var_name)
vocab_info = var_name_to_vocab_info.get(var_name)
if vocab_info:
vocab_info_used.add(var_name)
warmstarted_count += 1
logging.debug(
"Warm-starting variable: {}; current_vocab: {} current_vocab_size: {}"
" prev_vocab: {} prev_vocab_size: {} current_oov: {} prev_tensor: {}"
" initializer: {}".format(
var_name, vocab_info.new_vocab, vocab_info.new_vocab_size,
vocab_info.old_vocab, (vocab_info.old_vocab_size if
vocab_info.old_vocab_size > 0 else "All"),
vocab_info.num_oov_buckets, prev_var_name or "Unchanged",
vocab_info.backup_initializer or "zero-initialized"))
_warm_start_var_with_vocab(
variable,
current_vocab_path=vocab_info.new_vocab,
current_vocab_size=vocab_info.new_vocab_size,
prev_ckpt=ckpt_to_initialize_from,
prev_vocab_path=vocab_info.old_vocab,
previous_vocab_size=vocab_info.old_vocab_size,
current_oov_buckets=vocab_info.num_oov_buckets,
prev_tensor_name=prev_var_name,
initializer=vocab_info.backup_initializer,
axis=vocab_info.axis)
else:
# For the special value of vars_to_warm_start = None,
# we only warm-start variables with explicitly specified vocabularies.
if vars_to_warm_start:
warmstarted_count += 1
logging.debug("Warm-starting variable: {}; prev_var_name: {}".format(
var_name, prev_var_name or "Unchanged"))
# Because we use a default empty list in grouped_variables, single
# unpartitioned variables will be lists here, which we rectify in order
# for init_from_checkpoint logic to work correctly.
if len(variable) == 1:
variable = variable[0]
prev_tensor_name, var = _get_var_info(variable, prev_var_name)
vocabless_vars[prev_tensor_name] = var
checkpoint_utils.init_from_checkpoint(ckpt_to_initialize_from, vocabless_vars)
prev_var_name_not_used = set(
var_name_to_prev_var_name.keys()) - prev_var_name_used
vocab_info_not_used = set(var_name_to_vocab_info.keys()) - vocab_info_used
logging.info("Warm-started %d variables.", warmstarted_count)
if prev_var_name_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_prev_var_name that were not used: "
"{0}. Perhaps you misspelled them? Here is the list of viable "
"variable names: {1}".format(prev_var_name_not_used,
grouped_variables.keys()))
if vocab_info_not_used:
raise ValueError(
"You provided the following variables in "
"var_name_to_vocab_info that were not used: {0}. "
" Perhaps you misspelled them? Here is the list of viable variable "
"names: {1}".format(vocab_info_not_used, grouped_variables.keys()))
|
apache-2.0
|
Fullens/kodi
|
playtvfr.py
|
297
|
8750
|
import struct
import urllib2,urllib
import re
import json
import math
CRYPT_XXTEA_DELTA= 0x9E3779B9
headers = [('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),( 'Connection','Keep-Alive')]
class Crypt_XXTEA:
_key=None
def setKey(self,key):
if isinstance(key, basestring):
k = self._str2long(key, False);
elif isinstance(key, list):
k = key;
else:
print "The secret key must be a string or long integer array"
if (len(k) > 4):
print "The secret key cannot be more than 16 characters or 4 long values"
elif (len(k) == 0):
print "The secret key cannot be empty"
elif (len(k) < 4):
for i in range(len(k),4):
k.append(0)
#k[i] = 0;
#print k
self._key = k;
def encrypt(self,plaintext):
if (self._key == None):
print "Secret key is undefined"
if isinstance(plaintext, basestring):
return self._encryptString(plaintext)
elif isinstance(plaintext, list):
return self._encryptArray(plaintext)
else:
print "The plain text must be a string or long integer array"
def decrypt(self,ciphertext):
if (self._key == None):
print "Secret key is undefined"
#print 'dec',isinstance(ciphertext, basestring)
if isinstance(ciphertext, basestring):
return self._decryptString(ciphertext)
elif isinstance(ciphertext, list):
return self._decryptArray(ciphertext)
else:
print "The plain text must be a string or long integer array"
def _encryptString(self,str):
if (str == ''):
return ''
v = self._str2long(str, False);
v = self._encryptArray(v);
return self._long2str(v, False);
def _encryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = 0;
while (0 < q):
q-=1
sum = self._int32(sum + CRYPT_XXTEA_DELTA);
e = sum >> 2 & 3;
for p in range(0,n):
y = v[p + 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[p] = self._int32(v[p] + mx);
p+=1#due to range
y = v[0];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
z = v[n] = self._int32(v[n] + mx);
return v;
def _decryptString(self,str):
if (str == ''):
return '';
v = self._str2long(str, False);
v = self._decryptArray(v);
return self._long2str(v, False);
def _decryptArray(self,v):
n = len(v) - 1;
z = v[n];
y = v[0];
q = math.floor(6 + 52 / (n + 1));
sum = self._int32(q * CRYPT_XXTEA_DELTA);
while (sum != 0):
e = sum >> 2 & 3;
for p in range( n, 0, -1):
z = v[p - 1];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[p] = self._int32(v[p] - mx);
p=p-1 #due to range
z = v[n];
mx = self._int32(((z >> 5 & 0x07FFFFFF) ^ y << 2) + ((y >> 3 & 0x1FFFFFFF) ^ z << 4)) ^ self._int32((sum ^ y) + (self._key[p & 3 ^ e] ^ z));
y = v[0] = self._int32(v[0] - mx);
sum = self._int32(sum - CRYPT_XXTEA_DELTA);
return v;
def _long2str(self,v, w):
ln = len(v);
s = '';
for i in range(0,ln):
s += struct.pack('<I', v[i]&0xFFFFFFFF);
if (w):
return substr(s, 0, v[ln - 1]);
else:
return s;
def _str2long(self,s, w):
#return (s + ("\0" *( (4 - len(s) % 4) & 3))).encode("hex")
i=int(math.ceil((len(s)/4)))
if (len(s)%4)>0 :
i+=1
#print struct.unpack('<I',(s + ("\0" *( (4 - len(s) % 4) & 3))))
v = list(struct.unpack(('I'*i),(s + ("\0" *( (4 - len(s) % 4) & 3)))))
if (w):
v[0] = len(s); #prb
return v;
def _int32(self,n):
while (n >= 2147483648):
n -= 4294967296;
while (n <= -2147483649):
n += 4294967296;
return int(n);
def getUrl(url, cookieJar=None,post=None, timeout=20, headers=None):
cookie_handler = urllib2.HTTPCookieProcessor(cookieJar)
opener = urllib2.build_opener(cookie_handler, urllib2.HTTPBasicAuthHandler(), urllib2.HTTPHandler())
#opener = urllib2.install_opener(opener)
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.154 Safari/537.36')
if headers:
for h,hv in headers:
req.add_header(h,hv)
response = opener.open(req,post,timeout=timeout)
link=response.read()
response.close()
return link;
def HexToByte( hexStr ):
"""
Convert a string hex byte values into a byte string. The Hex Byte values may
or may not be space separated.
"""
# The list comprehension implementation is fractionally slower in this case
#
# hexStr = ''.join( hexStr.split(" ") )
# return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \
# for i in range(0, len( hexStr ), 2) ] )
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
def get_url(player_id):
v=Crypt_XXTEA()
import time
# Retrieve channel id and primary key
timestamp = time.time();
#player_id = '69T7MabZ47';
init = getUrl("http://tvplayer.playtv.fr/js/"+player_id+".js?_="+str(timestamp),headers=headers);
#print init
pat="b:(\{\"a.*\"})}"
init =re.compile(pat).findall(init)[0]
init = json.loads(init);
from binascii import unhexlify
from binascii import hexlify
a = init['a'];
b = init['b'];
b=b.decode("hex")
a=a.decode("hex")
bb=""
v.setKey("object");
#b=v._long2str(b,False)
b_s=v.decrypt(b).rstrip('\0')
params = json.loads(b_s)
pack_k=params['k'].decode("hex")# pack("H*", params['k'])#init['a']
key = v.decrypt(pack_k).rstrip('\0');
v.setKey(key);
a_d=v.decrypt(a).rstrip('\0')
params = json.loads(a_d);
channel_id = params['i'];
api_url = params['u'];
req={"i": channel_id, "t": timestamp,"h":"playtv.fr","a":5}
req = json.dumps(req)
req_en=v.encrypt(req)
req_en=req_en.encode("hex");# struct.unpack("H"*(len(req_en)/4),req_en);
if not req_en.endswith( '/'):
req_en += '/';
headers2 =headers.append( [('Referer','http://static.playtv.fr/swf/tvplayer.swf?r=22'),( 'x-flash-version','11,6,602,180')])
init = getUrl(api_url+req_en,headers=headers2);
init=init.decode("hex")
params = json.loads(v.decrypt(init).rstrip('\0'));
if params['s'][1] and params['s'][1] <>'' :
streams =params['s'][0] if params['s'][0]['bitrate'] > params['s'][1]['bitrate'] else params['s'][1];
else:
streams = params['s'][0];
scheme = streams['scheme'];
host = streams['host'];
port = streams['port'];
app = streams['application'];
playpath = streams['stream'];
token = streams['token'];
title = streams['title'];
t = params['j']['t'];
k = params['j']['k'];
v.setKey("object");
key=v.decrypt(k.decode("hex"))# pack("H*", k));
v.setKey(key);
auth = v.encrypt(t).encode("hex") #unpack("H*", $xxtea->encrypt($t));
if (scheme == "http"):
final_url = scheme+"://"+host + ( ":" +port if port and len(port)>0 else "") + "/" + playpath
else:
final_url = scheme + "://" + host +( ":" +port if port and len(port)>0 else "") + "/" + app +" app=" + app +" swfUrl=http://static.playtv.fr/swf/tvplayer.swf pageUrl=http://playtv.fr/television Conn=S:" + auth + (" token=" + token if token and len(token)>0 else "") + " playpath=" + playpath +' live=1 timeout=20'
print final_url
return final_url
#print get_url('69T7MabZ47')
|
lgpl-2.1
|
ict-felix/stack
|
modules/resource/orchestrator/src/server/flask/methods.py
|
1
|
3132
|
from delegate.geni.v3.utils.tn import TNUtils
from flask import current_app
from flask import request
from flask import Response
from functools import wraps
CLI_UNAVAILABLE_MSG = "Method only available via REST API"
GUI_UNAVAILABLE_MSG = "Method only available via GUI"
def parse_output_json(output):
resp = Response(str(output), status=200, mimetype="application/json")
return resp
def error_on_unallowed_method(output):
resp = Response(str(output), status=405, mimetype="text/plain")
return resp
def get_user_agent():
from flask import request
user_agent = "curl"
try:
user_agent = request.environ["HTTP_USER_AGENT"]
except:
pass
return user_agent
def warn_must_use_gui():
user_agent = get_user_agent()
if "curl" in user_agent:
return error_on_unallowed_method("Method not supported. \
Details: %s" % GUI_UNAVAILABLE_MSG)
def warn_must_use_cli():
user_agent = get_user_agent()
if "curl" not in user_agent:
return error_on_unallowed_method("Method not supported. \
Details: %s" % CLI_UNAVAILABLE_MSG)
def check_request_by_origin(func):
"""
Enforce check of origin (remote) address and allow
consequently depending on the accepted values.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if not current_app.policies_enabled or \
"*" in current_app.policies_allowed_origins or \
current_app.policies_enabled and \
any([orig in request.environ["REMOTE_ADDR"] for
orig in current_app.policies_allowed_origins]):
return func(*args, **kwargs)
else:
return error_on_unallowed_method("Method not supported.")
return wrapper
def check_cli_user_agent(func):
"""
Enforce check of CLI-based agent to access specific methods.
"""
@wraps(func)
def wrapper(*args, **kwargs):
check_ua = warn_must_use_cli()
if check_ua is not None:
return check_ua
else:
return func(*args, **kwargs)
return wrapper
def check_gui_user_agent(func):
"""
Enforce check of GUI-based agent to access specific methods.
"""
@wraps(func)
def wrapper(*args, **kwargs):
check_ua = warn_must_use_gui()
if check_ua is not None:
return check_ua
else:
return func(*args, **kwargs)
return wrapper
def get_peers():
# domain_routing = g.mongo.db[current_app.db].domain.routing.find()
domain_routing = [p for p in current_app.mongo.get_configured_peers()]
# Retrieve domain URN per peer
for route in domain_routing:
try:
peer_urns = []
assoc_peers = current_app.mongo.get_info_peers(
{"_ref_peer": route["_id"]})
for r in range(0, assoc_peers.count()):
peer_urns.append(assoc_peers.next().get("domain_urn"))
route["urn"] = peer_urns
del route["_id"]
except:
pass
return domain_routing
def get_used_tn_vlans():
return TNUtils.find_used_tn_vlans()
|
apache-2.0
|
maninmotion/LittleSportsBiscuit
|
tests/engine.py
|
1
|
5102
|
from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("LittleSportsBiscuit", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("LittleSportsBiscuit", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
|
bsd-3-clause
|
dga4654dan/UTM-Demo
|
V_1_0_2_1/UtmDemo_Sfs_2.9.0/UtmDemo_Sfs_2.9.0_Server/lib/Lib/encodings/base64_codec.py
|
9
|
1778
|
""" Python 'base64_codec' Codec - base64 content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, base64
### Codec APIs
def base64_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.encodestring(input)
return (output, len(input))
def base64_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.decodestring(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return base64_encode(input,errors)
def decode(self, input,errors='strict'):
return base64_decode(input,errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (base64_encode,base64_decode,StreamReader,StreamWriter)
|
gpl-2.0
|
Ares42/Guidebox
|
Flask/flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py
|
2360
|
3778
|
"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
mit
|
cpang24/data-science-from-scratch
|
code/databases.py
|
60
|
8165
|
from __future__ import division
import math, random, re
from collections import defaultdict
class Table:
def __init__(self, columns):
self.columns = columns
self.rows = []
def __repr__(self):
"""pretty representation of the table: columns then rows"""
return str(self.columns) + "\n" + "\n".join(map(str, self.rows))
def insert(self, row_values):
if len(row_values) != len(self.columns):
raise TypeError("wrong number of elements")
row_dict = dict(zip(self.columns, row_values))
self.rows.append(row_dict)
def update(self, updates, predicate):
for row in self.rows:
if predicate(row):
for column, new_value in updates.iteritems():
row[column] = new_value
def delete(self, predicate=lambda row: True):
"""delete all rows matching predicate
or all rows if no predicate supplied"""
self.rows = [row for row in self.rows if not(predicate(row))]
def select(self, keep_columns=None, additional_columns=None):
if keep_columns is None: # if no columns specified,
keep_columns = self.columns # return all columns
if additional_columns is None:
additional_columns = {}
# new table for results
result_table = Table(keep_columns + additional_columns.keys())
for row in self.rows:
new_row = [row[column] for column in keep_columns]
for column_name, calculation in additional_columns.iteritems():
new_row.append(calculation(row))
result_table.insert(new_row)
return result_table
def where(self, predicate=lambda row: True):
"""return only the rows that satisfy the supplied predicate"""
where_table = Table(self.columns)
where_table.rows = filter(predicate, self.rows)
return where_table
def limit(self, num_rows=None):
"""return only the first num_rows rows"""
limit_table = Table(self.columns)
limit_table.rows = (self.rows[:num_rows]
if num_rows is not None
else self.rows)
return limit_table
def group_by(self, group_by_columns, aggregates, having=None):
grouped_rows = defaultdict(list)
# populate groups
for row in self.rows:
key = tuple(row[column] for column in group_by_columns)
grouped_rows[key].append(row)
result_table = Table(group_by_columns + aggregates.keys())
for key, rows in grouped_rows.iteritems():
if having is None or having(rows):
new_row = list(key)
for aggregate_name, aggregate_fn in aggregates.iteritems():
new_row.append(aggregate_fn(rows))
result_table.insert(new_row)
return result_table
def order_by(self, order):
new_table = self.select() # make a copy
new_table.rows.sort(key=order)
return new_table
def join(self, other_table, left_join=False):
join_on_columns = [c for c in self.columns # columns in
if c in other_table.columns] # both tables
additional_columns = [c for c in other_table.columns # columns only
if c not in join_on_columns] # in right table
# all columns from left table + additional_columns from right table
join_table = Table(self.columns + additional_columns)
for row in self.rows:
def is_join(other_row):
return all(other_row[c] == row[c] for c in join_on_columns)
other_rows = other_table.where(is_join).rows
# each other row that matches this one produces a result row
for other_row in other_rows:
join_table.insert([row[c] for c in self.columns] +
[other_row[c] for c in additional_columns])
# if no rows match and it's a left join, output with Nones
if left_join and not other_rows:
join_table.insert([row[c] for c in self.columns] +
[None for c in additional_columns])
return join_table
if __name__ == "__main__":
users = Table(["user_id", "name", "num_friends"])
users.insert([0, "Hero", 0])
users.insert([1, "Dunn", 2])
users.insert([2, "Sue", 3])
users.insert([3, "Chi", 3])
users.insert([4, "Thor", 3])
users.insert([5, "Clive", 2])
users.insert([6, "Hicks", 3])
users.insert([7, "Devin", 2])
users.insert([8, "Kate", 2])
users.insert([9, "Klein", 3])
users.insert([10, "Jen", 1])
print "users table"
print users
print
# SELECT
print "users.select()"
print users.select()
print
print "users.limit(2)"
print users.limit(2)
print
print "users.select(keep_columns=[\"user_id\"])"
print users.select(keep_columns=["user_id"])
print
print 'where(lambda row: row["name"] == "Dunn")'
print users.where(lambda row: row["name"] == "Dunn") \
.select(keep_columns=["user_id"])
print
def name_len(row): return len(row["name"])
print 'with name_length:'
print users.select(keep_columns=[],
additional_columns = { "name_length" : name_len })
print
# GROUP BY
def min_user_id(rows): return min(row["user_id"] for row in rows)
stats_by_length = users \
.select(additional_columns={"name_len" : name_len}) \
.group_by(group_by_columns=["name_len"],
aggregates={ "min_user_id" : min_user_id,
"num_users" : len })
print "stats by length"
print stats_by_length
print
def first_letter_of_name(row):
return row["name"][0] if row["name"] else ""
def average_num_friends(rows):
return sum(row["num_friends"] for row in rows) / len(rows)
def enough_friends(rows):
return average_num_friends(rows) > 1
avg_friends_by_letter = users \
.select(additional_columns={'first_letter' : first_letter_of_name}) \
.group_by(group_by_columns=['first_letter'],
aggregates={ "avg_num_friends" : average_num_friends },
having=enough_friends)
print "avg friends by letter"
print avg_friends_by_letter
print
def sum_user_ids(rows): return sum(row["user_id"] for row in rows)
user_id_sum = users \
.where(lambda row: row["user_id"] > 1) \
.group_by(group_by_columns=[],
aggregates={ "user_id_sum" : sum_user_ids })
print "user id sum"
print user_id_sum
print
# ORDER BY
friendliest_letters = avg_friends_by_letter \
.order_by(lambda row: -row["avg_num_friends"]) \
.limit(4)
print "friendliest letters"
print friendliest_letters
print
# JOINs
user_interests = Table(["user_id", "interest"])
user_interests.insert([0, "SQL"])
user_interests.insert([0, "NoSQL"])
user_interests.insert([2, "SQL"])
user_interests.insert([2, "MySQL"])
sql_users = users \
.join(user_interests) \
.where(lambda row: row["interest"] == "SQL") \
.select(keep_columns=["name"])
print "sql users"
print sql_users
print
def count_interests(rows):
"""counts how many rows have non-None interests"""
return len([row for row in rows if row["interest"] is not None])
user_interest_counts = users \
.join(user_interests, left_join=True) \
.group_by(group_by_columns=["user_id"],
aggregates={"num_interests" : count_interests })
print "user interest counts"
print user_interest_counts
# SUBQUERIES
likes_sql_user_ids = user_interests \
.where(lambda row: row["interest"] == "SQL") \
.select(keep_columns=['user_id'])
likes_sql_user_ids.group_by(group_by_columns=[],
aggregates={ "min_user_id" : min_user_id })
print "likes sql user ids"
print likes_sql_user_ids
|
unlicense
|
akash1808/nova_test_latest
|
nova/virt/libvirt/driver.py
|
4
|
303163
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, XEN and Parallels.
"""
import collections
import contextlib
import errno
import functools
import glob
import itertools
import mmap
import operator
import os
import shutil
import tempfile
import time
import uuid
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('virt_type',
default='kvm',
choices=('kvm', 'lxc', 'qemu', 'uml', 'xen', 'parallels'),
help='Libvirt domain type'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
choices=('raw', 'qcow2', 'vmdk', 'vdi'),
help='Snapshot image format. Defaults to same as source image'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
choices=('host-model', 'host-passthrough', 'custom', 'none'),
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
choices=('none', 'os', 'hardware', 'auto'),
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS.'),
cfg.IntOpt('mem_stats_period_seconds',
default=10,
help='A number of seconds to memory usage statistics period. '
'Zero or negative value mean to disable memory usage '
'statistics.'),
cfg.ListOpt('uid_maps',
default=[],
help='List of uid targets and ranges.'
'Syntax is guest-uid:host-uid:count'
'Maximum of 5 allowed.'),
cfg.ListOpt('gid_maps',
default=[],
help='List of guid targets and ranges.'
'Syntax is guest-gid:host-gid:count'
'Maximum of 5 allowed.')
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('cipher', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('key_size', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc', group='vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.virt.hardware')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('proxyclient_address', 'nova.console.serial',
group='serial_console')
CONF.import_opt('hw_disk_discard', 'nova.virt.libvirt.imagebackend',
group='libvirt')
CONF.import_group('workarounds', 'nova.utils')
CONF.import_opt('iscsi_use_multipath', 'nova.virt.libvirt.volume',
group='libvirt')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.volume.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.volume.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.aoe.LibvirtAOEVolumeDriver',
'glusterfs='
'nova.virt.libvirt.volume.glusterfs.LibvirtGlusterfsVolumeDriver',
'fibre_channel='
'nova.virt.libvirt.volume.fibrechannel.'
'LibvirtFibreChannelVolumeDriver',
'scality=nova.virt.libvirt.volume.scality.LibvirtScalityVolumeDriver',
'gpfs=nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
]
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 11)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# TODO(mriedem): Change MIN_LIB_VERSION to this in the 13.0.0 'M' release.
NEXT_MIN_LIBVIRT_VERSION = (0, 10, 2)
# Live snapshot requirements
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit & rebase (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
# libvirt discard feature
MIN_LIBVIRT_DISCARD_VERSION = (1, 0, 6)
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
# While earlier versions could support NUMA reporting and
# NUMA placement, not until 1.2.7 was there the ability
# to pin guest nodes to host nodes, so mandate that. Without
# this the scheduler cannot make guaranteed decisions, as the
# guest placement may not match what was requested
MIN_LIBVIRT_NUMA_VERSION = (1, 2, 7)
# Versions of libvirt with known NUMA topology issues
# See bug #1449028
BAD_LIBVIRT_NUMA_VERSIONS = [(1, 2, 9, 2)]
# While earlier versions could support hugepage backed
# guests, not until 1.2.8 was there the ability to request
# a particular huge page size. Without this the scheduler
# cannot make guaranteed decisions, as the huge page size
# used by the guest may not match what was requested
MIN_LIBVIRT_HUGEPAGE_VERSION = (1, 2, 8)
# Versions of libvirt with broken cpu pinning support. This excludes
# versions of libvirt with broken NUMA support since pinning needs
# NUMA
# See bug #1438226
BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 10)]
# qemu 2.1 introduces support for pinning memory on host
# NUMA nodes, along with the ability to specify hugepage
# sizes per guest NUMA node
MIN_QEMU_NUMA_HUGEPAGE_VERSION = (2, 1, 0)
# fsFreeze/fsThaw requirement
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)
# Hyper-V paravirtualized time source
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)
MIN_LIBVIRT_HYPERV_FEATURE_VERSION = (1, 0, 0)
MIN_LIBVIRT_HYPERV_FEATURE_EXTRA_VERSION = (1, 1, 0)
MIN_QEMU_HYPERV_FEATURE_VERSION = (1, 1, 0)
# parallels driver support
MIN_LIBVIRT_PARALLELS_VERSION = (1, 2, 12)
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
"supports_migrate_to_same_host": False
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
self.volume_drivers = driver.driver_dict_from_config(
self._get_volume_drivers(), self)
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
if not self._sysinfo_serial_func:
raise exception.NovaException(
_("Unexpected sysinfo_serial setting '%(actual)s'. "
"Permitted values are %(expect)s'") %
{'actual': CONF.libvirt.sysinfo_serial,
'expect': ', '.join("'%s'" % k for k in
sysinfo_serial_funcs.keys())})
self.job_tracker = instancejobtracker.InstanceJobTracker()
def _get_volume_drivers(self):
return libvirt_volume_drivers
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (arch.I686, arch.X86_64)):
LOG.warn(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://wiki.openstack.org/wiki/'
'HypervisorSupportMatrix'),
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def _handle_conn_event(self, enabled, reason):
LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def _version_to_string(self, version):
return '.'.join([str(x) for x in version])
def init_host(self, host):
self._host.initialize()
self._do_quality_warnings()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warn(_LW("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment."))
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.NovaException(
_('Nova requires libvirt version %s or greater.') %
self._version_to_string(MIN_LIBVIRT_VERSION))
if (CONF.libvirt.virt_type == 'parallels' and
not self._host.has_min_version(MIN_LIBVIRT_PARALLELS_VERSION)):
raise exception.NovaException(
_('Running Nova with parallels virt_type requires '
'libvirt version %s') %
self._version_to_string(MIN_LIBVIRT_PARALLELS_VERSION))
# TODO(mriedem): We plan to move to a minimum required version of
# libvirt 0.10.2 in the 13.0.0 'M' release so if we're running with
# less than that now, log a warning.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning(_LW('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the 13.0.0 release.'),
{'version': self._version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
#
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the nova.virt.libvirt.host.Host
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# driver.py file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def _uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._host.get_guest(instance)
return True
except exception.NovaException:
return False
def list_instances(self):
names = []
for dom in self._host.list_instance_domains(only_running=False):
names.append(dom.name())
return names
def list_instance_uuids(self):
uuids = []
for dom in self._host.list_instance_domains(only_running=False):
uuids.append(dom.UUIDString())
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
disk.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance, attempt=1):
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if guest is not None:
try:
old_domid = guest.id
guest.poweroff()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
# TODO(sahid): At this point we should be a Guest object
state = self._get_power_state(guest._domain)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
if e.get_int1() == errno.EBUSY:
# NOTE(danpb): When libvirt kills a process it sends it
# SIGTERM first and waits 10 seconds. If it hasn't gone
# it sends SIGKILL and waits another 5 seconds. If it
# still hasn't gone then you get this EBUSY error.
# Usually when a QEMU process fails to go away upon
# SIGKILL it is because it is stuck in an
# uninterruptable kernel sleep waiting on I/O from
# some non-responsive server.
# Given the CPU load of the gate tests though, it is
# conceivable that the 15 second timeout is too short,
# particularly if the VM running tempest has a high
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
LOG.warn(_LW('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s; '
'attempt %(attempt)d of 3'),
{'errcode': errcode, 'e': e,
'attempt': attempt},
instance=instance)
with excutils.save_and_reraise_exception() as ctxt:
# Try up to 3 times before giving up.
if attempt < 3:
ctxt.reraise = False
self._destroy(instance, attempt + 1)
return
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.id
except exception.InstanceNotFound:
LOG.info(_LI("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
guest = self._host.get_guest(instance)
try:
guest.delete_configuration()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
except exception.InstanceNotFound:
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.unfilter_instance(instance, network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
if destroy_disks or (
migrate_data and migrate_data.get('is_shared_block_storage',
False)):
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
instance.save()
if CONF.serial_console.enabled:
try:
serials = self._get_serial_ports_from_instance(instance)
except exception.InstanceNotFound:
# Serial ports already gone. Nothing to release.
serials = ()
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance):
"""Detaches encrypted volumes attached to instance."""
disks = jsonutils.loads(self.get_instance_disk_info(instance))
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_instance(self, instance, mode=None):
"""Returns an iterator over serial port(s) configured on instance.
:param mode: Should be a value in (None, bind, connect)
"""
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = arch.S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils._get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.iscsi_use_multipath,
enforce_multipath=True,
host=CONF.host)
def _cleanup_resize(self, instance, network_info):
# NOTE(wangpan): we get the pre-grizzly instance path firstly,
# so the backup dir of pre-grizzly instance can
# be deleted correctly with grizzly or later nova.
pre_grizzly_name = libvirt_utils.get_instance_path(instance,
forceold=True)
target = pre_grizzly_name + '_resize'
if not os.path.exists(target):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, connection_info, disk_info):
driver = self._get_volume_driver(connection_info)
driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
driver = self._get_volume_driver(connection_info)
driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_config(self, connection_info, disk_info):
driver = self._get_volume_driver(connection_info)
return driver.get_config(connection_info, disk_info)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self._host.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type,
image_meta, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
try:
state = self._get_power_state(guest._domain)
live = state in (power_state.RUNNING, power_state.PAUSED)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
guest.attach_device(conf, persistent=True, live=live)
except Exception as ex:
LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, guest, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
guest.delete_configuration()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
dev.rebase(new_path, copy=True, reuse_ext=True)
while dev.wait_for_job():
time.sleep(0.5)
dev.abort_job(pivot=True)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while dev.wait_for_job(wait_for_job_clean=True):
time.sleep(0.5)
dev.resize(resize_to * units.Gi / units.Ki)
finally:
self._host.write_instance_config(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
self._connect_volume(new_connection_info, disk_info)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
# Save updates made in connection_info when connect_volume was called
volume_id = new_connection_info.get('serial')
bdm = objects.BlockDeviceMapping.get_by_volume_id(
nova_context.get_admin_context(), volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
driver_bdm['connection_info'] = new_connection_info
driver_bdm.save()
self._swap_volume(guest, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)
conf = guest.get_disk(disk_dev)
if not conf:
raise exception.DiskNotFound(location=disk_dev)
state = self._get_power_state(guest._domain)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.detach_device(conf, persistent=True, live=live)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warn(_LW("During detach_volume, instance disappeared."),
instance=instance)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_LW("During detach_volume, instance disappeared."),
instance=instance)
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type)
try:
state = self._get_power_state(guest._domain)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
def detach_interface(self, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif, None, instance.flavor,
CONF.libvirt.virt_type)
try:
self.vif_driver.unplug(instance, vif)
state = self._get_power_state(guest._domain)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.detach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if image_meta.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = image_meta.get(
'container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.libvirt.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(image_meta,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = self._get_power_state(virt_dom)
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
host.HV_DRIVER_QEMU)
and source_format not in ('lvm', 'rbd')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(context, instance, guest)
guest.save_memory_state()
snapshot_backend = self.image_backend.snapshot(instance,
disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, guest, disk_path,
out_path, image_format, image_meta)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
guest = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
guest = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
guest = self._create_domain(
domain=virt_dom, pause=True)
if guest is not None:
self._attach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, guest)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
LOG.info(_LI("Snapshot image upload complete"),
instance=instance)
def _can_quiesce(self, image_meta):
if CONF.libvirt.virt_type not in ('kvm', 'qemu'):
return (False, _('Only KVM and QEMU are supported'))
if not self._host.has_min_version(MIN_LIBVIRT_FSFREEZE_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_FSFREEZE_VERSION])
return (False, _('Quiescing requires libvirt version %(version)s '
'or greater') % {'version': ver})
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', '')
if not strutils.bool_from_string(hw_qga):
return (False, _('QEMU guest agent is not enabled'))
return (True, None)
def _set_quiesced(self, context, instance, image_meta, quiesced):
supported, reason = self._can_quiesce(image_meta)
if not supported:
raise exception.InstanceQuiesceNotSupported(
instance_id=instance.uuid, reason=reason)
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
if quiesced:
domain.fsFreeze()
else:
domain.fsThaw()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
The qemu-guest-agent must be setup to execute fsfreeze.
"""
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
image_format, image_meta):
"""Snapshot an instance without downtime."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
require_quiesce = strutils.bool_from_string(
img_meta_prop.get('os_require_quiesce', ''))
if require_quiesce:
self.quiesce(context, instance, image_meta)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
guest.delete_configuration()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True)
while dev.wait_for_job():
time.sleep(0.5)
dev.abort_job()
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._host.write_instance_config(xml)
if require_quiesce:
self.unquiesce(context, instance, image_meta)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, domain,
volume_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param new_file: relative path to new qcow2 file present on share
"""
# TODO(sahid): An object Guest should be passed instead of
# a "domain" as virDomain.
guest = libvirt_guest.Guest(domain)
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml, instance=instance)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'),
instance=instance)
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'),
instance=instance)
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_id(context,
volume_id)
driver_bdm = driver_block_device.convert_volume(bdm)
if driver_bdm:
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self._host.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info,
instance=instance)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml, instance=instance)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev, instance=instance)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
# libgfapi delete
LOG.debug("XML: %s" % xml)
LOG.debug("active disk object: %s", active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s', b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s', b.source_name, index)
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_base = delete_info['file_to_merge'] # often None
if active_protocol is not None:
rebase_base = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, continue with old behaviour
# (relative backing path seems to work for this case)
try:
libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE
relative = rebase_base is not None
except AttributeError:
LOG.warn(_LW("Relative blockrebase support was not detected. "
"Continuing with old behaviour."))
relative = False
LOG.debug(
'disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, relative: %(relative)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': libvirt_guest.BlockDevice.REBASE_DEFAULT_BANDWIDTH,
'relative': str(relative)}, instance=instance)
dev = guest.get_block_device(rebase_disk)
result = dev.rebase(rebase_base, relative=relative)
if result == 0:
LOG.debug('blockRebase started successfully',
instance=instance)
while dev.wait_for_job(abort_on_error=True):
LOG.debug('waiting for blockRebase job completion',
instance=instance)
time.sleep(0.5)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
# NOTE(deepakcs): libvirt added support for _RELATIVE in v1.2.7,
# and when available this flag _must_ be used to ensure backing
# paths are maintained relative by qemu.
#
# If _RELATIVE flag not found, raise exception as relative backing
# path may not be maintained and Cinder flow is broken if allowed
# to continue.
try:
libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of file/network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s ',
{'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top}, instance=instance)
dev = guest.get_block_device(commit_disk)
result = dev.commit(commit_base, commit_top, relative=True)
if result == 0:
LOG.debug('blockCommit started successfully',
instance=instance)
while dev.wait_for_job(abort_on_error=True):
LOG.debug('waiting for blockCommit job completion',
instance=instance)
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'),
instance=instance)
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e,
instance=instance)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove dom at the end.
dom = guest._domain
state = self._get_power_state(dom)
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove dom at the end.
dom = guest._domain
state = self._get_power_state(dom)
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
"""
self._destroy(instance)
# Convert the system metadata to image metadata
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info = self._get_instance_disk_info(instance.name, xml,
block_device_info)
if context.auth_token is not None:
self._create_images_and_backing(context, instance, instance_dir,
disk_info)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove dom at the end.
dom = guest._domain
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
self._host.get_guest(instance).resume()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove dom at the end.
dom = guest._domain
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
state = self._get_power_state(dom)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
dom.shutdown()
retry_countdown = retry_interval
for sec in six.moves.range(timeout):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove dom at the end.
dom = guest._domain
state = self._get_power_state(dom)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
dom.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def inject_nmi(self, instance):
"""Inject an NMI to the specified instance."""
try:
self._host.get_guest(instance).inject_nmi()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while injecting an NMI to '
'%(instance_uuid)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_uuid': instance.uuid,
'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def suspend(self, context, instance):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
self._detach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(context, instance, guest)
guest.save_memory_state()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, image_meta,
block_device_info=block_device_info)
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
guest = self._create_domain_and_network(context, xml, instance,
network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(guest,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, guest, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
state = self._get_power_state(domain)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if image_meta is not None:
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)
self._create_image(context, instance, disk_info['mapping'],
suffix='.rescue', disk_images=rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
# cleanup rescue volume
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
if lvmdisk.endswith('.rescue')])
def poll_rebooting_instances(self, timeout, instances):
pass
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
if not os.path.exists(path):
LOG.info(_LI('Instance is configured with a file console, '
'but the backing file is not (yet?) present'),
instance=instance)
return ""
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
def get_host_ip_addr(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warn(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='vnc']")
if graphic is not None:
return graphic.get('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vnc.vncserver_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='spice']")
if graphic is not None:
return (graphic.get('port'), graphic.get('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance.name)
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
for hostname, port in self._get_serial_ports_from_instance(
instance, mode='bind'):
return ctype.ConsoleSerial(host=hostname, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': e})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None, context=None, specified_fs=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, max_size=None, context=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as an image name suffix
"""
# Handles the partition need to be used.
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance.key_data)
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(
instance,
'disk' + suffix,
image_type)
img_id = instance.image_ref
if not injection_image.check_image_exists():
LOG.warn(_LW('Image %s not found on disk storage. '
'Continue without injecting data'),
injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.get_model(self._conn),
key, net, metadata, admin_pass, files,
partition=target_partition,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance.user_id,
project_id=instance.project_id)
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance.user_id,
project_id=instance.project_id)
inst_type = instance.get_flavor()
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = image('disk')
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
file_extension = disk.get_file_extension_for_os_type(
os_type_with_default)
ephemeral_gb = instance.ephemeral_gb
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._host.device_lookup_by_name(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, guest, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self._host.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
guest.detach_device(self._get_guest_pci_device(dev), live=True)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, guest, pci_devs):
try:
for dev in pci_devs:
guest.attach_device(self._get_guest_pci_device(dev))
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': guest.id})
raise
@staticmethod
def _has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
return True
return False
def _attach_sriov_ports(self, context, instance, guest, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
instance.flavor,
CONF.libvirt.virt_type)
LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',
{'port': vif, 'dom': guest.id},
instance=instance)
guest.attach_device(cfg)
def _detach_sriov_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if it's an older version
if not self._host.has_min_version(
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
reason = (_("Detaching SR-IOV ports with"
" libvirt < %(ver)s is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=network_info)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
instance.flavor,
CONF.libvirt.virt_type)
guest.detach_device(cfg, live=True)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host "%s" '
'since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host "%s" '
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
# TODO(jaypipes): Remove when image_meta is always passed
# as an objects.ImageMeta
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
topology = hardware.get_best_cpu_topology(
flavor, image_meta, numa_topology=instance_numa_topology)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
if CONF.libvirt.hw_disk_discard:
if not self._host.has_min_version(MIN_LIBVIRT_DISCARD_VERSION,
MIN_QEMU_DISCARD_VERSION,
host.HV_DRIVER_QEMU):
msg = (_('Volume sets discard option, but libvirt %(libvirt)s'
' or later is required, qemu %(qemu)s'
' or later is required.') %
{'libvirt': MIN_LIBVIRT_DISCARD_VERSION,
'qemu': MIN_QEMU_DISCARD_VERSION})
raise exception.Invalid(msg)
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version())
def _get_guest_fs_config(self, instance, name, image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
return image.libvirt_fs_info("/", "ploop")
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type, os_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
elif os_type == vm_mode.EXE and CONF.libvirt.virt_type == "parallels":
if 'disk' in disk_mapping:
fs = self._get_guest_fs_config(instance, "disk")
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
if 'disk.config' in disk_mapping:
diskconfig = self._get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(connection_info, info)
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
for d in devices:
self._set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
hw_scsi_model = image_meta['properties']['hw_scsi_model']
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
caps = self._host.get_capabilities()
return caps.host.uuid
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
return str(uuid.UUID(f.read().split()[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance.uuid
sysinfo.system_family = "Virtual Machine"
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device.address)
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen', 'parallels',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance.display_name
meta.creationTime = time.time()
if instance.image_ref not in ("", None):
meta.roottype = "image"
meta.rootid = instance.image_ref
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
flavor = instance.flavor
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_machine_type')
is not None):
mach_type = image_meta['properties']['hw_machine_type']
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == arch.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == arch.AARCH64:
mach_type = "virt"
if caps.host.cpu.arch in (arch.S390, arch.S390X):
mach_type = 's390-ccw-virtio'
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warn(_LW("Too many id maps, only included first five."))
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warn(_LW("Invalid value for id mapping %s"), map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _update_guest_cputune(self, guest, flavor, virt_type):
if virt_type in ('lxc', 'kvm', 'qemu'):
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
# Setting the default cpu.shares value to be a value
# dependent on the number of vcpus
guest.cputune.shares = 1024 * guest.vcpus
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
def _get_cpu_numa_config_from_instance(self, instance_numa_topology):
if instance_numa_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _has_cpu_policy_support(self):
for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS:
if self._host.has_version(ver):
ver_ = self._version_to_string(ver)
raise exception.CPUPinningNotSupported(reason=_(
'Invalid libvirt version %(version)s') % {'version': ver_})
return True
def _get_guest_numa_config(self, instance_numa_topology, flavor, pci_devs,
allowed_cpus=None):
"""Returns the config objects for the guest NUMA specs.
Determines the CPUs that the guest can be pinned to if the guest
specifies a cell topology and the host supports it. Constructs the
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
topologies.
b) If there is a specified guest NUMA topology, then
cpu_set will be None and guest_cpu_numa will be the
LibvirtConfigGuestCPUNUMA object representing the guest's
NUMA topology. If the host supports NUMA, then guest_cpu_tune
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
if (not self._has_numa_support() and
instance_numa_topology is not None):
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.NUMATopologyUnsupported()
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
guest_cpu_numa_config = self._get_cpu_numa_config_from_instance(
instance_numa_topology)
if not guest_cpu_numa_config:
# No NUMA topology defined for instance - let the host kernel deal
# with the NUMA effects.
# TODO(ndipanov): Attempt to spread the instance
# across NUMA nodes and expose the topology to the
# instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology:
# Now get the CpuTune configuration from the numa_topology
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
allpcpus = []
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()
for _ in guest_cpu_numa_config.cells]
for host_cell in topology.cells:
for guest_node_id, guest_config_cell in enumerate(
guest_cpu_numa_config.cells):
if guest_config_cell.id == host_cell.id:
node = numa_memnodes[guest_node_id]
node.cellid = guest_config_cell.id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_mem.nodeset.append(host_cell.id)
object_numa_cell = (
instance_numa_topology.cells[guest_node_id]
)
for cpu in guest_config_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
# If there is pinning information in the cell
# we pin to individual CPUs, otherwise we float
# over the whole host NUMA node
if (object_numa_cell.cpu_pinning and
self._has_cpu_policy_support()):
pcpu = object_numa_cell.cpu_pinning[cpu]
pin_cpuset.cpuset = set([pcpu])
else:
pin_cpuset.cpuset = host_cell.cpuset
allpcpus.extend(pin_cpuset.cpuset)
guest_cpu_tune.vcpupin.append(pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will
# span multiple host NUMA nodes. By pinning emulator threads
# to the union of all nodes, we guarantee there will be
# cross-node memory access by the emulator threads when
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminiate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
emulatorpin = vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()
emulatorpin.cpuset = set(allpcpus)
guest_cpu_tune.emulatorpin = emulatorpin
# Sort the vcpupin list per vCPU id for human-friendlier XML
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
# normalize cell.id
for i, (cell, memnode) in enumerate(
zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)):
cell.id = i
memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune,
guest_cpu_numa_config,
guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
if virt_type == "lxc":
ret = vm_mode.EXE
elif virt_type == "uml":
ret = vm_mode.UML
elif virt_type == "xen":
ret = vm_mode.XEN
else:
ret = vm_mode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance.ramdisk_id:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
def _set_clock(self, guest, os_type, image_meta, virt_type):
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'))
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(clk, os_type, image_meta)
def _set_kvm_timers(self, clk, os_type, image_meta):
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (arch.I686, arch.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
# With new enough QEMU we can provide Windows guests
# with the paravirtualized hyperv timer source. This
# is the windows equiv of kvm-clock, allowing Windows
# guests to accurately keep time.
if (os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_TIMER_VERSION,
MIN_QEMU_HYPERV_TIMER_VERSION)):
tmhyperv = vconfig.LibvirtConfigGuestTimer()
tmhyperv.name = "hypervclock"
tmhyperv.present = True
clk.add_timer(tmhyperv)
def _set_features(self, guest, os_type, caps, virt_type):
if virt_type == "xen":
# PAE only makes sense in X86
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE())
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == vm_mode.HVM)):
guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_FEATURE_VERSION,
MIN_QEMU_HYPERV_FEATURE_VERSION)):
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
hv.relaxed = True
if self._host.has_min_version(
MIN_LIBVIRT_HYPERV_FEATURE_EXTRA_VERSION):
hv.spinlocks = True
# Increase spinlock retries - value recommended by
# KVM maintainers who certify Windows guests
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
guest.features.append(hv)
def _create_serial_console_devices(self, guest, instance, flavor,
image_meta):
guest_arch = libvirt_utils.get_arch(image_meta)
if CONF.serial_console.enabled:
# TODO(jaypipes): Remove when image_meta is always passed
# as an objects.ImageMeta
if not isinstance(image_meta, objects.ImageMeta):
image_meta = objects.ImageMeta.from_dict(image_meta)
num_ports = hardware.get_number_of_serial_ports(
flavor, image_meta)
for port in six.moves.range(num_ports):
if guest_arch in (arch.S390, arch.S390X):
console = vconfig.LibvirtConfigGuestConsole()
else:
console = vconfig.LibvirtConfigGuestSerial()
console.port = port
console.type = "tcp"
console.listen_host = (
CONF.serial_console.proxyclient_address)
console.listen_port = (
serial_console.acquire_port(
console.listen_host))
guest.add_device(console)
else:
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
if guest_arch in (arch.S390, arch.S390X):
consolelog = vconfig.LibvirtConfigGuestConsole()
consolelog.target_type = "sclplm"
else:
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
def _add_video_driver(self, guest, image_meta, img_meta_prop, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta['properties'], which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
elif guestarch in (arch.PPC, arch.PPC64):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram * units.Mi / units.Ki
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance.name))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, img_meta_prop):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', '')
if strutils.bool_from_string(hw_qga):
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
qga_enabled = True
if qga_enabled:
self._add_qga_device(guest, instance)
rng_is_virtio = img_meta_prop.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = strutils.bool_from_string(rng_allowed_str)
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(self, inst_topology, numatune):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
if cell.pagesize:
wantsmempages = True
break
if not wantsmempages:
return
if not self._has_hugepage_support():
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.MemoryPagesUnsupported()
host_topology = self._get_host_numa_topology()
if host_topology is None:
# As above, we should not get here but just in case...
raise exception.MemoryPagesUnsupported()
# Currently libvirt does not support the smallest
# pagesize set as a backend memory.
# https://bugzilla.redhat.com/show_bug.cgi?id=1173507
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
smallest = avail_pagesize[0]
pages = []
for guest_cellid, inst_cell in enumerate(inst_topology.cells):
if inst_cell.pagesize and inst_cell.pagesize > smallest:
for memnode in numatune.memnodes:
if guest_cellid == memnode.cellid:
page = (
vconfig.LibvirtConfigGuestMemoryBackingPage())
page.nodeset = [guest_cellid]
page.size_kb = inst_cell.pagesize
pages.append(page)
break # Quit early...
if pages:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.hugepages = pages
return membacking
def _get_flavor(self, ctxt, instance, flavor):
if flavor is not None:
return flavor
return instance.flavor
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
if guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
elif virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
guest.os_bootmenu = strutils.bool_from_string(
flavor.extra_specs.get(
'hw:boot_menu', image_meta.get('properties', {}).get(
'hw_boot_menu', 'no')))
elif virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
elif virt_type == "parallels":
if guest.os_type == vm_mode.EXE:
guest.os_init_path = "/sbin/init"
def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info):
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance.kernel_id:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
def _create_consoles(self, virt_type, guest, instance, flavor, image_meta,
caps):
if virt_type in ("qemu", "kvm"):
# Create the serial console char devices
self._create_serial_console_devices(guest, instance, flavor,
image_meta)
if caps.host.cpu.arch in (arch.S390, arch.S390X):
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.target_type = "sclp"
else:
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
return consolepty
def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model):
"""Update VirtCPUModel object according to libvirt CPU config.
:param:cpu_config: vconfig.LibvirtConfigGuestCPU presenting the
instance's virtual cpu configuration.
:param:vcpu_model: VirtCPUModel object. A new object will be created
if None.
:return: Updated VirtCPUModel object, or None if cpu_config is None
"""
if not cpu_config:
return
if not vcpu_model:
vcpu_model = objects.VirtCPUModel()
vcpu_model.arch = cpu_config.arch
vcpu_model.vendor = cpu_config.vendor
vcpu_model.model = cpu_config.model
vcpu_model.mode = cpu_config.mode
vcpu_model.match = cpu_config.match
if cpu_config.sockets:
vcpu_model.topology = objects.VirtCPUTopology(
sockets=cpu_config.sockets,
cores=cpu_config.cores,
threads=cpu_config.threads)
else:
vcpu_model.topology = None
features = [objects.VirtCPUFeature(
name=f.name,
policy=f.policy) for f in cpu_config.features]
vcpu_model.features = features
return vcpu_model
def _vcpu_model_to_cpu_config(self, vcpu_model):
"""Create libvirt CPU config according to VirtCPUModel object.
:param:vcpu_model: VirtCPUModel object.
:return: vconfig.LibvirtConfigGuestCPU.
"""
cpu_config = vconfig.LibvirtConfigGuestCPU()
cpu_config.arch = vcpu_model.arch
cpu_config.model = vcpu_model.model
cpu_config.mode = vcpu_model.mode
cpu_config.match = vcpu_model.match
cpu_config.vendor = vcpu_model.vendor
if vcpu_model.topology:
cpu_config.sockets = vcpu_model.topology.sockets
cpu_config.cores = vcpu_model.topology.cores
cpu_config.threads = vcpu_model.topology.threads
if vcpu_model.features:
for f in vcpu_model.features:
xf = vconfig.LibvirtConfigGuestCPUFeature()
xf.name = f.name
xf.policy = f.policy
cpu_config.features.add(xf)
return cpu_config
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = virt_type
guest.name = instance.name
guest.uuid = instance.uuid
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
guest_numa_config = self._get_guest_numa_config(
instance.numa_topology, flavor, pci_devs, allowed_cpus)
guest.cpuset = guest_numa_config.cpuset
guest.cputune = guest_numa_config.cputune
guest.numatune = guest_numa_config.numatune
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology, guest_numa_config.numatune)
guest.metadata.append(self._get_guest_config_meta(context,
instance))
guest.idmaps = self._get_guest_idmaps()
self._update_guest_cputune(guest, flavor, virt_type)
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_numa_config.numaconfig,
instance.numa_topology)
# Notes(yjiang5): we always sync the instance's vcpu model with
# the corresponding config file.
instance.vcpu_model = self._cpu_config_to_vcpu_model(
guest.cpu, instance.vcpu_model)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
guest.os_type = (vm_mode.get_from_instance(instance) or
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
storage_configs = self._get_guest_storage_config(
instance, image_meta, disk_info, rescue, block_device_info,
flavor, guest.os_type)
for config in storage_configs:
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type)
guest.add_device(config)
consolepty = self._create_consoles(virt_type, guest, instance, flavor,
image_meta, caps)
if virt_type != 'parallels':
consolepty.type = "pty"
guest.add_device(consolepty)
tablet = self._get_guest_usb_tablet(guest.os_type)
if tablet:
guest.add_device(tablet)
if (CONF.spice.enabled and CONF.spice.agent_enabled and
virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc.enabled and
virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc.keymap
graphics.listen = CONF.vnc.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and
virt_type not in ('lxc', 'uml', 'xen')):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
self._add_video_driver(guest, image_meta, img_meta_prop, flavor)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, img_meta_prop)
if virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_devs) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=virt_type)
if 'hw_watchdog_action' in flavor.extra_specs:
LOG.warn(_LW('Old property name "hw_watchdog_action" is now '
'deprecated and will be removed in the next release. '
'Use updated property name '
'"hw:watchdog_action" instead'), instance=instance)
# TODO(pkholkin): accepting old property name 'hw_watchdog_action'
# should be removed in the next release
watchdog_action = (flavor.extra_specs.get('hw_watchdog_action') or
flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
if (image_meta is not None and
image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
return guest
def _get_guest_usb_tablet(self, os_type):
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
#
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc.enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
tablet = None
if need_usb_tablet and os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
return tablet
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None, write_to_disk=False):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
try:
dom_info = self._host.get_domain_info(virt_dom)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.name)
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
return hardware.InstanceInfo(state=LIBVIRT_POWER_STATE[dom_info[0]],
max_mem_kb=dom_info[1],
mem_kb=dom_info[2],
num_cpu=dom_info[3],
cpu_time_ns=dom_info[4],
id=virt_dom.ID())
def _create_domain_setup_lxc(self, instance, image_meta,
block_device_info, disk_info):
inst_path = libvirt_utils.get_instance_path(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
disk_info = disk_info or {}
disk_mapping = disk_info.get('mapping', [])
if self._is_booted_from_volume(instance, disk_mapping):
root_disk = block_device.get_root_bdm(block_device_mapping)
disk_path = root_disk['connection_info']['data']['device_path']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, root_disk)
self._connect_volume(root_disk['connection_info'], disk_info)
# Get the system metadata from the instance
use_cow = instance.system_metadata['image_disk_format'] == 'qcow2'
else:
image = self.image_backend.image(instance, 'disk')
disk_path = image.path
use_cow = CONF.use_cow_images
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
fmt = imgmodel.FORMAT_RAW
if use_cow:
fmt = imgmodel.FORMAT_QCOW2
image = imgmodel.LocalFileImage(disk_path, fmt)
rootfs_dev = disk.setup_container(image,
container_dir=container_dir)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, instance, image_meta,
block_device_info, disk_info):
"""Context manager to handle the pre and post instance boot,
LXC specific disk operations.
An image or a volume path will be prepared and setup to be
used by the container, prior to starting it.
The disk will be disconnected and unmounted if a container has
failed to start.
"""
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(instance, image_meta,
block_device_info, disk_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
# TODO(sahid): Consider renaming this to _create_guest.
def _create_domain(self, xml=None, domain=None,
power_on=True, pause=False):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
:returns guest.Guest: Guest just created
"""
if xml:
guest = libvirt_guest.Guest.create(xml, self._host)
else:
guest = libvirt_guest.Guest(domain)
if power_on or pause:
guest.launch(pause=pause)
if not utils.is_neutron():
guest.enable_hairpin()
return guest
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
pause = bool(events)
guest = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, image_meta,
block_device_info, disk_info):
guest = self._create_domain(
xml, pause=pause, power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if guest:
guest.poweroff()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance.uuid},
instance=instance)
if CONF.vif_plugging_is_fatal:
if guest:
guest.poweroff()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if pause:
guest.resume()
return guest
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom in self._host.list_instance_domains():
try:
# TODO(sahid): list_instance_domain should
# be renamed as list_guest and so returning
# Guest objects.
guest = libvirt_guest.Guest(dom)
doc = etree.fromstring(guest.get_xml_desc())
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the XML from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": guest.id, "ex": e})
continue
except Exception:
continue
sources = doc.findall("./devices/disk[@type='block']/source")
for source in sources:
devices.append(source.get('dev'))
return devices
def _get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
try:
total_pcpus = self._host.get_cpu_count()
except libvirt.libvirtError:
LOG.warn(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
return total_pcpus
available_ids = hardware.get_vcpu_pin_set()
# We get the list of online CPUs on the host and see if the requested
# set falls under these. If not, we retain the old behavior.
online_pcpus = None
try:
online_pcpus = self._host.get_online_cpus()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW("Couldn't retrieve the online CPUs due to a Libvirt "
"error: %(error)s with error code: %(error_code)s"),
{'error': ex, 'error_code': error_code})
if online_pcpus:
if not (available_ids <= online_pcpus):
msg = (_("Invalid vcpu_pin_set config, one or more of the "
"specified cpuset is not online. Online cpuset(s): "
"%(online)s, requested cpuset(s): %(req)s"),
{'online': sorted(online_pcpus),
'req': sorted(available_ids)})
raise exception.Invalid(msg)
elif sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
return len(available_ids)
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in six.iteritems(info):
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for dom in self._host.list_instance_domains():
try:
# TODO(sahid): list_instance_domains should
# return Guest objects.
vcpus = libvirt_guest.Guest(dom).get_vcpus_info()
if vcpus is not None:
total += len(list(vcpus))
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the vpu count from domain id:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": dom.UUIDString(), "ex": e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._host.get_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (
arch.canonicalize(g.arch),
hv_type.canonicalize(dt),
vm_mode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities.
:return: see above description
"""
caps = self._host.get_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = set()
for f in caps.host.cpu.features:
features.add(f.name)
cpu_info['features'] = features
return cpu_info
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._host.device_lookup_by_name(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
device["numa_node"] = cfgdev.pci_capability.numa_node
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containaing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._host.list_pci_devices() or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warn(_LW("URI %(uri)s does not support "
"listDevices: %(error)s"),
{'uri': self._uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_info.append(self._get_pcidev_info(name))
return jsonutils.dumps(pci_info)
def _has_numa_support(self):
# This means that the host can support LibvirtConfigGuestNUMATune
# and the nodeset field in LibvirtConfigGuestMemoryBackingPage
supported_archs = [arch.I686, arch.X86_64]
caps = self._host.get_capabilities()
for ver in BAD_LIBVIRT_NUMA_VERSIONS:
if self._host.has_version(ver):
return False
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_NUMA_VERSION,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU))
def _has_hugepage_support(self):
# This means that the host can support multiple values for the size
# field in LibvirtConfigGuestMemoryBackingPage
supported_archs = [arch.I686, arch.X86_64]
caps = self._host.get_capabilities()
return ((caps.host.cpu.arch in supported_archs) and
self._host.has_min_version(MIN_LIBVIRT_HUGEPAGE_VERSION,
MIN_QEMU_NUMA_HUGEPAGE_VERSION,
host.HV_DRIVER_QEMU))
def _get_host_numa_topology(self):
if not self._has_numa_support():
return
caps = self._host.get_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
online_cpus = self._host.get_online_cpus()
if allowed_cpus:
allowed_cpus &= online_cpus
else:
allowed_cpus = online_cpus
for cell in topology.cells:
cpuset = set(cpu.id for cpu in cell.cpus)
siblings = sorted(map(set,
set(tuple(cpu.siblings)
if cpu.siblings else ()
for cpu in cell.cpus)
))
cpuset &= allowed_cpus
siblings = [sib & allowed_cpus for sib in siblings]
# Filter out singles and empty sibling sets that may be left
siblings = [sib for sib in siblings if len(sib) > 1]
mempages = []
if self._has_hugepage_support():
mempages = [
objects.NUMAPagesTopology(
size_kb=pages.size,
total=pages.total,
used=0)
for pages in cell.mempages]
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
memory=cell.memory / units.Ki,
cpu_usage=0, memory_usage=0,
siblings=siblings,
pinned_cpus=set([]),
mempages=mempages)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id, instance=instance)
vol_stats = self.block_stats(instance, mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance, disk_id):
"""Note that this function takes an instance name."""
try:
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e},
instance=instance)
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance.name,
instance=instance)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
# Temporary convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
data["supported_instances"] = jsonutils.dumps(
self._get_instance_capabilities())
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._host.get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._host.get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
data["cpu_info"] = jsonutils.dumps(self._get_cpu_info())
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology._to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
:returns:
- tempfile: A dict containing the tempfile info on the destination
host
- None:
1. If the instance path is not existing.
2. If the image backend is shared block storage type.
"""
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb
# Compare CPU
if not instance.vcpu_model or not instance.vcpu_model.model:
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(None, source_cpu_info)
else:
self._compare_cpu(instance.vcpu_model, None)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"image_type": CONF.libvirt.images_type,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
dest_check_data.update({'is_shared_instance_path':
self._check_shared_storage_test_file(
dest_check_data['filename'])})
dest_check_data.update({'is_shared_block_storage':
self._is_shared_block_storage(instance, dest_check_data,
block_device_info)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'],
block_device_info)
elif not (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
# NOTE(danms): Emulate this old flag in case we're talking to
# an older client (<= Juno). We can remove this when we bump the
# compute RPC API to 4.0.
dest_check_data['is_shared_storage'] = (
dest_check_data['is_shared_instance_path'])
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data,
block_device_info=None):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
:param instance: nova.objects.instance.Instance object
:param dest_check_data: dict with boolean fields image_type,
is_shared_instance_path, and is_volume_backed
"""
if (CONF.libvirt.images_type == dest_check_data.get('image_type') and
self.image_backend.backend().is_shared_block_storage()):
# NOTE(dgenin): currently true only for RBD image backend
return True
if (dest_check_data.get('is_shared_instance_path') and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Raw, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.get('is_volume_backed') and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance,
block_device_info)))):
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info=None):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance,
block_device_info=block_device_info)
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance.uuid,
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, guest_cpu, host_cpu_str):
"""Check the host is compatible with the requested CPU
:param guest_cpu: nova.objects.VirtCPUModel or None
:param host_cpu_str: JSON from _get_cpu_info() method
If the 'guest_cpu' parameter is not None, this will be
validated for migration compatibility with the host.
Otherwise the 'host_cpu_str' JSON string will be used for
validation.
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt.virt_type not in ['qemu', 'kvm']:
return
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
LOG.info(_LI('Instance launched has CPU info: %s'), host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
else:
cpu = self._vcpu_model_to_cpu_config(guest_cpu)
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._host.compare_cpu(cpu.to_xml())
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.debug("URI %(uri)s does not support cpu comparison. "
"It will be proceeded though. Error: %(error)s",
{'uri': self._uri(), 'error': e})
return
else:
LOG.error(m, {'ret': e, 'u': u})
raise exception.MigrationPreCheckError(
reason=m % {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = list(range(CONF.live_migration_retry_count))
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
utils.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _update_xml(self, xml_str, volume, listen_addrs):
xml_doc = etree.fromstring(xml_str)
if volume:
xml_doc = self._update_volume_xml(xml_doc, volume)
if listen_addrs:
xml_doc = self._update_graphics_xml(xml_doc, listen_addrs)
else:
self._check_graphics_addresses_can_live_migrate(listen_addrs)
return etree.tostring(xml_doc)
def _update_graphics_xml(self, xml_doc, listen_addrs):
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return xml_doc
def _update_volume_xml(self, xml_doc, volume):
"""Update XML using device information of destination host."""
# Update volume xml
parser = etree.XMLParser(remove_blank_text=True)
disk_nodes = xml_doc.findall('./devices/disk')
for pos, disk_dev in enumerate(disk_nodes):
serial_source = disk_dev.findtext('serial')
if serial_source is None or volume.get(serial_source) is None:
continue
if ('connection_info' not in volume[serial_source] or
'disk_info' not in volume[serial_source]):
continue
conf = self._get_volume_config(
volume[serial_source]['connection_info'],
volume[serial_source]['disk_info'])
xml_doc2 = etree.XML(conf.to_xml(), parser)
serial_dest = xml_doc2.findtext('serial')
# Compare source serial and destination serial number.
# If these serial numbers match, continue the process.
if (serial_dest and (serial_source == serial_dest)):
LOG.debug("Find same serial number: pos=%(pos)s, "
"serial=%(num)s",
{'pos': pos, 'num': serial_source})
for cnt, item_src in enumerate(disk_dev):
# If source and destination have same item, update
# the item using destination value.
for item_dst in xml_doc2.findall(item_src.tag):
disk_dev.remove(item_src)
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
# If destination has additional items, thses items should be
# added here.
for item_dst in list(xml_doc2):
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
return xml_doc
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vnc.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc.enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
msg = _('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).')
raise exception.MigrationError(reason=msg)
if listen_addrs is not None:
dest_local_vnc = listen_addrs['vnc'] in LOCAL_ADDRS
dest_local_spice = listen_addrs['spice'] in LOCAL_ADDRS
if ((CONF.vnc.enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warn(_LW('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the '
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, dom):
"""Invoke the live migration operation
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
:param dom: the libvirt domain object
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
"""
# TODO(sahid): Should pass a guest to this method.
guest = libvirt_guest.Guest(dom)
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
pre_live_migrate_data = (migrate_data or {}).get(
'pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
volume = pre_live_migrate_data.get('volume')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
if (migratable_flag is None or
(listen_addrs is None and not volume)):
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = guest.get_xml_desc(dump_migratable=True)
new_xml_str = self._update_xml(old_xml_str,
volume,
listen_addrs)
try:
dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
except libvirt.libvirtError as ex:
# NOTE(mriedem): There is a bug in older versions of
# libvirt where the VIR_DOMAIN_XML_MIGRATABLE flag causes
# virDomainDefCheckABIStability to not compare the source
# and target domain xml's correctly for the CPU model.
# We try to handle that error here and attempt the legacy
# migrateToURI path, which could fail if the console
# addresses are not correct, but in that case we have the
# _check_graphics_addresses_can_live_migrate check in place
# to catch it.
# TODO(mriedem): Remove this workaround when
# Red Hat BZ #1141838 is closed.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
LOG.warn(_LW('An error occurred trying to live '
'migrate. Falling back to legacy live '
'migrate flow. Error: %s'), ex,
instance=instance)
self._check_graphics_addresses_can_live_migrate(
listen_addrs)
dom.migrateToURI(
CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
raise
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
# If 'migrateToURI' fails we don't know what state the
# VM instances on each host are in. Possibilities include
#
# 1. src==running, dst==none
#
# Migration failed & rolled back, or never started
#
# 2. src==running, dst==paused
#
# Migration started but is still ongoing
#
# 3. src==paused, dst==paused
#
# Migration data transfer completed, but switchover
# is still ongoing, or failed
#
# 4. src==paused, dst==running
#
# Migration data transfer completed, switchover
# happened but cleanup on source failed
#
# 5. src==none, dst==running
#
# Migration fully succeeded.
#
# Libvirt will aim to complete any migration operation
# or roll it back. So even if the migrateToURI call has
# returned an error, if the migration was not finished
# libvirt should clean up.
#
# So we take the error raise here with a pinch of salt
# and rely on the domain job info status to figure out
# what really happened to the VM, which is a much more
# reliable indicator.
#
# In particular we need to try very hard to ensure that
# Nova does not "forget" about the guest. ie leaving it
# running on a different host to the one recorded in
# the database, as that would be a serious resource leak
LOG.debug("Migration operation thread has finished",
instance=instance)
def _live_migration_monitor(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data, dom, finish_event):
n = 0
while True:
info = host.DomainJobInfo.for_domain(dom)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Annoyingly this could indicate many possible
# states, so we must fix the mess:
#
# 1. Migration has not yet begun
# 2. Migration has stopped due to failure
# 3. Migration has stopped due to completion
#
# We can detect option 1 by seeing if thread is still
# running. We can distinguish 2 vs 3 by seeing if the
# VM still exists & running on the current host
#
if not finish_event.ready():
LOG.debug("Operation thread is still running",
instance=instance)
# Leave type untouched
else:
try:
if dom.isActive():
LOG.debug("VM running on src, migration failed",
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_FAILED
else:
LOG.debug("VM is shutoff, migration finished",
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_COMPLETED
except libvirt.libvirtError as ex:
LOG.debug("Error checking domain status %(ex)s",
ex, instance=instance)
if ex.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("VM is missing, migration finished",
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_COMPLETED
else:
LOG.info(_LI("Error %(ex)s, migration failed"),
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_FAILED
if info.type != libvirt.VIR_DOMAIN_JOB_NONE:
LOG.debug("Fixed incorrect job type to be %d",
info.type, instance=instance)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Migration is not yet started
LOG.debug("Migration not running yet",
instance=instance)
elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED:
# We loop every 500ms, so don't log on every
# iteration to avoid spamming logs for long
# running migrations. Just once every 5 secs
# is sufficient for developers to debug problems.
# We log once every 30 seconds at info to help
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
# remaining to copy and ignore what's done already
#
# TODO(berrange) perhaps we could include disk
# transfer stats in the progress too, but it
# might make memory info more obscure as large
# disk sizes might dwarf memory size
remaining = 100
if info.memory_total != 0:
remaining = round(info.memory_remaining *
100 / info.memory_total)
instance.progress = 100 - remaining
instance.save()
lg = LOG.debug
if (n % 60) == 0:
lg = LOG.info
lg(_LI("Migration running for %(secs)d secs, "
"memory %(remaining)d%% remaining; "
"(bytes processed=%(processed_memory)d, "
"remaining=%(remaining_memory)d, "
"total=%(total_memory)d)"),
{"secs": n / 2, "remaining": remaining,
"processed_memory": info.memory_processed,
"remaining_memory": info.memory_remaining,
"total_memory": info.memory_total}, instance=instance)
# Migration is still running
#
# This is where we'd wire up calls to change live
# migration status. eg change max downtime, cancel
# the operation, change max bandwidth
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
LOG.info(_LI("Migration operation has completed"),
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
LOG.error(_LE("Migration operation has aborted"),
instance=instance)
recover_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
LOG.warn(_LW("Migration operation was cancelled"),
instance=instance)
recover_method(context, instance, dest, block_migration,
migrate_data)
break
else:
LOG.warn(_LW("Unexpected migration job type: %d"),
info.type, instance=instance)
time.sleep(0.5)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
This fires off a new thread to run the blocking migration
operation, and then this thread monitors the progress of
migration and controls its operation
"""
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove dom at the end.
dom = guest._domain
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, dom)
finish_event = eventlet.event.Event()
def thread_finished(thread, event):
LOG.debug("Migration operation thread notification",
instance=instance)
event.send()
opthread.link(thread_finished, finish_event)
# Let eventlet schedule the new thread right away
time.sleep(0)
try:
LOG.debug("Starting monitoring of live migration",
instance=instance)
self._live_migration_monitor(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data,
dom, finish_event)
except Exception as ex:
LOG.warn(_LW("Error monitoring migration: %(ex)s"),
{"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
instance=instance)
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
libvirt_utils.fetch_image(context, path,
image_id,
instance.user_id,
instance.project_id)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore on "
"image service, attempting to copy image "
"from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
libvirt_utils.copy_image(src=path, dest=path,
host=fallback_from_host,
receive=True)
def _fetch_instance_kernel_ramdisk(self, context, instance,
fallback_from_host=None):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance.kernel_id:
kernel_path = os.path.join(instance_dir, 'kernel')
# NOTE(dsanders): only fetch image if it's not available at
# kernel_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(kernel_path):
self._try_fetch_image(context,
kernel_path,
instance.kernel_id,
instance, fallback_from_host)
if instance.ramdisk_id:
ramdisk_path = os.path.join(instance_dir, 'ramdisk')
# NOTE(dsanders): only fetch image if it's not available at
# ramdisk_path. This also avoids ImageNotFound exception if
# the image has been deleted from glance
if not os.path.exists(ramdisk_path):
self._try_fetch_image(context,
ramdisk_path,
instance.ramdisk_id,
instance, fallback_from_host)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
try:
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
finally:
# NOTE(gcb): Failed block live migration may leave instance
# directory at destination node, ensure it is always deleted.
is_shared_instance_path = True
if migrate_data:
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
if disk_info is not None:
disk_info = jsonutils.loads(disk_info)
# Steps for volume backed instance live migration w/o shared storage.
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
if migrate_data:
LOG.debug('migrate_data in pre_live_migration: %s', migrate_data,
instance=instance)
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
if not (is_shared_instance_path and is_shared_block_storage):
# NOTE(dims): Using config drive with iso format does not work
# because of a bug in libvirt with read only devices. However
# one can use vfat as config_drive_format which works fine.
# Please see bug/1246201 for details on the libvirt bug.
if CONF.config_drive_format != 'vfat':
if configdrive.required_by(instance):
raise exception.NoLiveMigrationForConfigDriveInLibVirt()
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
LOG.debug('Creating instance directory: %s', instance_dir,
instance=instance)
os.mkdir(instance_dir)
if not is_shared_block_storage:
# Ensure images and backing files are present.
LOG.debug('Checking to make sure images and backing files are '
'present before live migration.', instance=instance)
self._create_images_and_backing(
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
if not (is_block_migration or is_shared_instance_path):
# NOTE(angdraug): when block storage is shared between source and
# destination and instance path isn't (e.g. volume backed or rbd
# backed instance), instance path on destination has to be prepared
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
LOG.debug('Touch instance console log: %s', console_file,
instance=instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if len(block_device_mapping):
LOG.debug('Connecting volumes before live migration.',
instance=instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, vol)
self._connect_volume(connection_info, disk_info)
if is_block_migration and len(block_device_mapping):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will
# result in volumes being copied from themselves to
# themselves, which is a recipe for disaster.
LOG.error(
_LE('Cannot block migrate instance %s with mapped volumes'),
instance.uuid, instance=instance)
msg = (_('Cannot block migrate instance %s with mapped volumes') %
instance.uuid)
raise exception.MigrationError(reason=msg)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
LOG.debug('Plugging VIFs before live migration.', instance=instance)
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
# Store vncserver_listen and latest disk device info
res_data = {'graphics_listen_addrs': {}, 'volume': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vnc.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
serial = connection_info['serial']
res_data['volume'][serial] = {'connection_info': {},
'disk_info': {}}
res_data['volume'][serial]['connection_info'] = \
connection_info
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, vol)
res_data['volume'][serial]['disk_info'] = disk_info
return res_data
def _try_fetch_image_cache(self, image, fetch_func, context, filename,
image_id, instance, size,
fallback_from_host=None):
try:
image.cache(fetch_func=fetch_func,
context=context,
filename=filename,
image_id=image_id,
user_id=instance.user_id,
project_id=instance.project_id,
size=size)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore "
"on image service, attempting to copy "
"image from %(host)s",
{'image_id': image_id, 'host': fallback_from_host},
instance=instance)
def copy_from_host(target, max_size):
libvirt_utils.copy_image(src=target,
dest=target,
host=fallback_from_host,
receive=True)
image.cache(fetch_func=copy_from_host,
filename=filename)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info, fallback_from_host=None):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info:
disk info specified in _get_instance_disk_info
:param fallback_from_host:
host where we can retrieve images if the glance images are
not available.
"""
if not disk_info:
disk_info = []
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance.os_type,
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance.ephemeral_gb)
elif cache_name.startswith('swap'):
inst_type = instance.get_flavor()
swap_mb = inst_type.swap
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
self._try_fetch_image_cache(image,
libvirt_utils.fetch_image,
context, cache_name,
instance.image_ref,
instance,
info['virt_disk_size'],
fallback_from_host)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(
context, instance, fallback_from_host=fallback_from_host)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
image_meta, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._host.write_instance_config(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file') or path_node.get('dev')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
if disk_type == 'file':
dk_size = int(os.path.getsize(path))
elif disk_type == 'block' and block_device_info:
dk_size = lvm.get_volume_size(path)
else:
LOG.debug('skipping disk %(path)s (%(target)s) - unable to '
'determine if volume',
{'path': path, 'target': target})
continue
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return disk_info
def get_instance_disk_info(self, instance,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s'),
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex},
instance=instance)
raise exception.InstanceNotFound(instance_id=instance.uuid)
return jsonutils.dumps(
self._get_instance_disk_info(instance.name, xml,
block_device_info))
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
for dom in self._host.list_instance_domains():
try:
# TODO(sahid): list_instance_domain should
# be renamed as list_guest and so returning
# Guest objects.
guest = libvirt_guest.Guest(dom)
xml = guest.get_xml_desc()
disk_infos = self._get_instance_disk_info(guest.name, xml)
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
), {'instance_name': guest.name,
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno == errno.ENOENT:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': guest.name})
elif e.errno == errno.EACCES:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': guest.name})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warn(_LW('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s'),
{'i_name': guest.name,
'error': e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
def get_host_uptime(self):
"""Returns the result of calling "uptime"."""
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.ssh_execute(dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.ssh_execute(dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.ssh_execute(dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# get_bdm_ephemeral_disk_size() will return 0 if the new
# instance's requested block device mapping contain no
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
# ensure that the new requested flavor ephemeral size is greater
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.ephemeral_gb)
# Checks if the migration needs a disk resize down.
root_down = flavor.root_gb < instance.root_gb
ephemeral_down = flavor.ephemeral_gb < eph_size
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
try:
utils.ssh_execute(dest, 'mkdir', '-p', inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
active_flavor = instance.get_flavor()
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
continue
on_execute = lambda process: self.job_tracker.add_job(
instance, process.pid)
on_completion = lambda process: self.job_tracker.remove_job(
instance, process.pid)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest,
on_execute=on_execute,
on_completion=on_completion)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest,
on_execute=on_execute,
on_completion=on_completion)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, info):
"""Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
"""
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance.root_gb
elif fname == 'disk.local':
size = instance.ephemeral_gb
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, image, size):
"""Attempts to resize a disk to size
:param image: an instance of nova.virt.image.model.Image
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
"""
if not isinstance(image, imgmodel.LocalFileImage):
LOG.debug("Skipping resize of non-local image")
return
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
converted = False
if (size and
image.format == imgmodel.FORMAT_QCOW2 and
disk.can_resize_image(image.path, size) and
disk.is_image_extendable(image)):
self._disk_qcow2_to_raw(image.path)
converted = True
image = imgmodel.LocalFileImage(image.path,
imgmodel.FORMAT_RAW)
if size:
disk.extend(image, size)
if converted:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(image.path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
size = self._disk_size_from_instance(instance, info)
if resize_instance:
image = imgmodel.LocalFileImage(info['path'],
info['type'])
self._disk_resize(image, size)
if info['type'] == 'raw' and CONF.use_cow_images:
self._disk_raw_to_qcow2(info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance, disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False,
fallback_from_host=migration.source_compute)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
# L2 agent (or neutron server) so neutron may not know that the VIF was
# unplugged in the first place and never send an event.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
LOG.debug("finish_revert_migration finished successfully.",
instance=instance)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
output["cpu" + str(vcpu.id) + "_time"] = vcpu.time
except libvirt.libvirtError:
pass
# get io status
xml = guest.get_xml_desc()
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove domain at the end.
domain = guest._domain
xml = guest.get_xml_desc()
xml_doc = etree.fromstring(xml)
(state, max_mem, mem, num_cpu, cpu_time) = \
self._host.get_domain_info(domain)
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance.launched_at)
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
for vcpu in guest.get_vcpus_info():
diags.add_cpu(time=vcpu.time)
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path,
instance=instance)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
self.job_tracker.terminate_jobs(instance)
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
block_device_mapping = list(itertools.chain(*block_device_lists))
# NOTE(ndipanov): Null out the device names so that blockinfo code
# will assign them
for bdm in block_device_mapping:
if bdm.device_name is not None:
LOG.warn(_LW("Ignoring supplied device name: %(device_name)s. "
"Libvirt can't honour user-supplied dev names"),
{'device_name': bdm.device_name}, instance=instance)
bdm.device_name = None
block_device_info = driver.get_block_device_info(instance,
block_device_mapping)
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance,
block_device_info,
image_meta)
def get_device_name_for_instance(self, instance, bdms, block_device_obj):
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
block_device_info = driver.get_block_device_info(instance, bdms)
instance_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
image_meta, block_device_info=block_device_info)
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
LOG.warn(_LW('Ignoring supplied device name: %(suggested_dev)s'),
{'suggested_dev': suggested_dev_name}, instance=instance)
# NOTE(ndipanov): get_info_from_bdm will generate the new device name
# only when it's actually not set on the bd object
block_device_obj.device_name = None
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, block_device_obj,
mapping=instance_info['mapping'])
return block_device.prepend_dev(disk_info['dev'])
def is_supported_fs_format(self, fs_type):
return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
def _get_power_state(self, virt_dom):
dom_info = self._host.get_domain_info(virt_dom)
return LIBVIRT_POWER_STATE[dom_info[0]]
|
apache-2.0
|
BonexGu/Blik2D-SDK
|
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/layers/python/layers/utils_test.py
|
65
|
10705
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConstantValueTest(test.TestCase):
def test_value(self):
for v in [True, False, 1, 0, 1.0]:
value = utils.constant_value(v)
self.assertEqual(value, v)
def test_constant(self):
for v in [True, False, 1, 0, 1.0]:
c = constant_op.constant(v)
value = utils.constant_value(c)
self.assertEqual(value, v)
with self.test_session():
self.assertEqual(c.eval(), v)
def test_variable(self):
for v in [True, False, 1, 0, 1.0]:
with ops.Graph().as_default() as g, self.test_session(g) as sess:
x = variables.Variable(v)
value = utils.constant_value(x)
self.assertEqual(value, None)
sess.run(variables.global_variables_initializer())
self.assertEqual(x.eval(), v)
def test_placeholder(self):
for v in [True, False, 1, 0, 1.0]:
p = array_ops.placeholder(np.dtype(type(v)), [])
x = array_ops.identity(p)
value = utils.constant_value(p)
self.assertEqual(value, None)
with self.test_session():
self.assertEqual(x.eval(feed_dict={p: v}), v)
class StaticCondTest(test.TestCase):
def test_value(self):
fn1 = lambda: 'fn1'
fn2 = lambda: 'fn2'
expected = lambda v: 'fn1' if v else 'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
self.assertEqual(o, expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.static_cond(v, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
class SmartCondStaticTest(test.TestCase):
def test_value(self):
fn1 = lambda: 'fn1'
fn2 = lambda: 'fn2'
expected = lambda v: 'fn1' if v else 'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
self.assertEqual(o, expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
for v in [True, False, 1, 0]:
o = utils.smart_cond(constant_op.constant(v), fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(), expected(v))
class SmartCondDynamicTest(test.TestCase):
def test_value(self):
fn1 = lambda: ops.convert_to_tensor('fn1')
fn2 = lambda: ops.convert_to_tensor('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_constant(self):
fn1 = lambda: constant_op.constant('fn1')
fn2 = lambda: constant_op.constant('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_variable(self):
fn1 = lambda: variables.Variable('fn1')
fn2 = lambda: variables.Variable('fn2')
expected = lambda v: b'fn1' if v else b'fn2'
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
def test_tensors(self):
fn1 = lambda: constant_op.constant(0) - constant_op.constant(1)
fn2 = lambda: constant_op.constant(0) - constant_op.constant(2)
expected = lambda v: -1 if v else -2
p = array_ops.placeholder(dtypes.bool, [])
for v in [True, False, 1, 0]:
o = utils.smart_cond(p, fn1, fn2)
with self.test_session():
self.assertEqual(o.eval(feed_dict={p: v}), expected(v))
class CollectNamedOutputsTest(test.TestCase):
def test_collect(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
self.assertEqual(ops.get_collection('end_points'), [t1, t2])
def test_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
self.assertEqual(t1.aliases, ['a1'])
self.assertEqual(t2.aliases, ['a2'])
def test_multiple_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a11', t1)
utils.collect_named_outputs('end_points', 'a12', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
self.assertEqual(t1.aliases, ['a11', 'a12'])
self.assertEqual(t2.aliases, ['a21', 'a22'])
def test_gather_aliases(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
t3 = constant_op.constant(2.0, name='t3')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a2', t2)
ops.add_to_collection('end_points', t3)
aliases = utils.gather_tensors_aliases(ops.get_collection('end_points'))
self.assertEqual(aliases, ['a1', 'a2', 't3'])
def test_convert_collection_to_dict(self):
t1 = constant_op.constant(1.0, name='t1')
t2 = constant_op.constant(2.0, name='t2')
utils.collect_named_outputs('end_points', 'a1', t1)
utils.collect_named_outputs('end_points', 'a21', t2)
utils.collect_named_outputs('end_points', 'a22', t2)
end_points = utils.convert_collection_to_dict('end_points')
self.assertEqual(end_points['a1'], t1)
self.assertEqual(end_points['a21'], t2)
self.assertEqual(end_points['a22'], t2)
class NPositiveIntegersTest(test.TestCase):
def test_invalid_input(self):
with self.assertRaises(ValueError):
utils.n_positive_integers('3', [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(3.3, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(-1, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(0, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [1, 2])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [-1])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [0])
with self.assertRaises(ValueError):
utils.n_positive_integers(1, [0])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, [1])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, [1, 2, 3])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, ['hello', 2])
with self.assertRaises(ValueError):
utils.n_positive_integers(2, tensor_shape.TensorShape([2, 3, 1]))
with self.assertRaises(ValueError):
utils.n_positive_integers(3, tensor_shape.TensorShape([2, None, 1]))
with self.assertRaises(ValueError):
utils.n_positive_integers(3, tensor_shape.TensorShape(None))
def test_valid_input(self):
self.assertEqual(utils.n_positive_integers(1, 2), (2,))
self.assertEqual(utils.n_positive_integers(2, 2), (2, 2))
self.assertEqual(utils.n_positive_integers(2, (2, 3)), (2, 3))
self.assertEqual(utils.n_positive_integers(3, (2, 3, 1)), (2, 3, 1))
self.assertEqual(utils.n_positive_integers(3, (2, 3, 1)), (2, 3, 1))
self.assertEqual(
utils.n_positive_integers(3, tensor_shape.TensorShape([2, 3, 1])),
(2, 3, 1))
if __name__ == '__main__':
test.main()
|
mit
|
sophgn/aosc-os-abbs
|
extra-libs/dbus-factory/autobuild/json_build.py
|
1
|
1605
|
#!/usr/bin/env python2
import os
PROXYER = '/usr/bin/dbus-generator'
def generate_source(json,target):
json_header = json.split('.in.json')[0]
outdir = ''
install_dir = ''
json_meta = []
if target == 'qml':
outdir += 'qml/'
t_list =[]
for i in json_header.split('.'):
t_list.append(i.capitalize())
path = '_'.join(t_list)
elif target == 'golang':
outdir += 'go/src/'
path = json_header.split('.')[-1].lower()
json_meta = json_header.split('.')
json_meta.pop()
for i in json_meta:
install_dir += i.lower() + '/'
outdir += install_dir
else:
print ("%s is not supported yet!" % target)
exit()
outdir += path
cmd = PROXYER+' -in '+json+' -out '+outdir+' -target '+target
os.system(cmd)
print install_dir
return path,install_dir
def build(path,target,installdir):
owd = os.getcwd()
work = ''
try:
if target == 'qml':
work = work + target +'/'+path
os.chdir(work)
os.system('qmake')
os.system('make')
elif target == 'golang':
target = 'go'
work = work + target + '/'
finally:
os.chdir(owd)
if __name__ == '__main__':
files = os.listdir('.')
json_in = []
for i in files:
if '.in.json' in i:
json_in.append(i)
for each_json_in in json_in:
for target in ['qml','golang']:
path,install_dir = generate_source(each_json_in,target)
build(path,target,install_dir)
|
gpl-2.0
|
kakunbsc/enigma2
|
lib/python/Screens/MovieSelection.py
|
10
|
14103
|
from Screen import Screen
from Components.Button import Button
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.MenuList import MenuList
from Components.MovieList import MovieList
from Components.DiskInfo import DiskInfo
from Components.Pixmap import Pixmap
from Components.Label import Label
from Components.PluginComponent import plugins
from Components.config import config, ConfigSubsection, ConfigText, ConfigInteger, ConfigLocations, ConfigSet
from Components.Sources.ServiceEvent import ServiceEvent
from Components.UsageConfig import defaultMoviePath
from Plugins.Plugin import PluginDescriptor
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.LocationBox import MovieLocationBox
from Screens.HelpMenu import HelpableScreen
from Tools.Directories import *
from Tools.BoundFunction import boundFunction
from enigma import eServiceReference, eServiceCenter, eTimer, eSize
config.movielist = ConfigSubsection()
config.movielist.moviesort = ConfigInteger(default=MovieList.SORT_RECORDED)
config.movielist.listtype = ConfigInteger(default=MovieList.LISTTYPE_ORIGINAL)
config.movielist.description = ConfigInteger(default=MovieList.HIDE_DESCRIPTION)
config.movielist.last_videodir = ConfigText(default=resolveFilename(SCOPE_HDD))
config.movielist.last_timer_videodir = ConfigText(default=resolveFilename(SCOPE_HDD))
config.movielist.videodirs = ConfigLocations(default=[resolveFilename(SCOPE_HDD)])
config.movielist.first_tags = ConfigText(default="")
config.movielist.second_tags = ConfigText(default="")
config.movielist.last_selected_tags = ConfigSet([], default=[])
def setPreferredTagEditor(te):
global preferredTagEditor
try:
if preferredTagEditor == None:
preferredTagEditor = te
print "Preferred tag editor changed to ", preferredTagEditor
else:
print "Preferred tag editor already set to ", preferredTagEditor
print "ignoring ", te
except:
preferredTagEditor = te
print "Preferred tag editor set to ", preferredTagEditor
def getPreferredTagEditor():
global preferredTagEditor
return preferredTagEditor
setPreferredTagEditor(None)
class MovieContextMenu(Screen):
def __init__(self, session, csel, service):
Screen.__init__(self, session)
self.csel = csel
self.service = service
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick,
"cancel": self.cancelClick
})
menu = [(_("delete..."), self.delete)]
menu.extend([(p.description, boundFunction(self.execPlugin, p)) for p in plugins.getPlugins(PluginDescriptor.WHERE_MOVIELIST)])
if config.movielist.moviesort.value == MovieList.SORT_ALPHANUMERIC:
menu.append((_("sort by date"), boundFunction(self.sortBy, MovieList.SORT_RECORDED)))
else:
menu.append((_("alphabetic sort"), boundFunction(self.sortBy, MovieList.SORT_ALPHANUMERIC)))
menu.extend((
(_("list style default"), boundFunction(self.listType, MovieList.LISTTYPE_ORIGINAL)),
(_("list style compact with description"), boundFunction(self.listType, MovieList.LISTTYPE_COMPACT_DESCRIPTION)),
(_("list style compact"), boundFunction(self.listType, MovieList.LISTTYPE_COMPACT)),
(_("list style single line"), boundFunction(self.listType, MovieList.LISTTYPE_MINIMAL))
))
if config.movielist.description.value == MovieList.SHOW_DESCRIPTION:
menu.append((_("hide extended description"), boundFunction(self.showDescription, MovieList.HIDE_DESCRIPTION)))
else:
menu.append((_("show extended description"), boundFunction(self.showDescription, MovieList.SHOW_DESCRIPTION)))
self["menu"] = MenuList(menu)
def okbuttonClick(self):
self["menu"].getCurrent()[1]()
def cancelClick(self):
self.close(False)
def sortBy(self, newType):
config.movielist.moviesort.value = newType
self.csel.setSortType(newType)
self.csel.reloadList()
self.close()
def listType(self, newType):
config.movielist.listtype.value = newType
self.csel.setListType(newType)
self.csel.list.redrawList()
self.close()
def showDescription(self, newType):
config.movielist.description.value = newType
self.csel.setDescriptionState(newType)
self.csel.updateDescription()
self.close()
def execPlugin(self, plugin):
plugin(session=self.session, service=self.service)
def delete(self):
serviceHandler = eServiceCenter.getInstance()
offline = serviceHandler.offlineOperations(self.service)
info = serviceHandler.info(self.service)
name = info and info.getName(self.service) or _("this recording")
result = False
if offline is not None:
# simulate first
if not offline.deleteFromDisk(1):
result = True
if result == True:
self.session.openWithCallback(self.deleteConfirmed, MessageBox, _("Do you really want to delete %s?") % (name))
else:
self.session.openWithCallback(self.close, MessageBox, _("You cannot delete this!"), MessageBox.TYPE_ERROR)
def deleteConfirmed(self, confirmed):
if not confirmed:
return self.close()
serviceHandler = eServiceCenter.getInstance()
offline = serviceHandler.offlineOperations(self.service)
result = False
if offline is not None:
# really delete!
if not offline.deleteFromDisk(0):
result = True
if result == False:
self.session.openWithCallback(self.close, MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
else:
self.csel["list"].removeService(self.service)
self.csel["freeDiskSpace"].update()
self.close()
class SelectionEventInfo:
def __init__(self):
self["Service"] = ServiceEvent()
self.list.connectSelChanged(self.__selectionChanged)
self.timer = eTimer()
self.timer.callback.append(self.updateEventInfo)
self.onShown.append(self.__selectionChanged)
def __selectionChanged(self):
if self.execing and config.movielist.description.value == MovieList.SHOW_DESCRIPTION:
self.timer.start(100, True)
def updateEventInfo(self):
serviceref = self.getCurrent()
self["Service"].newService(serviceref)
class MovieSelection(Screen, HelpableScreen, SelectionEventInfo):
def __init__(self, session, selectedmovie = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.tags = [ ]
if selectedmovie:
self.selected_tags = config.movielist.last_selected_tags.value
else:
self.selected_tags = None
self.selected_tags_ele = None
self.movemode = False
self.bouquet_mark_edit = False
self.delayTimer = eTimer()
self.delayTimer.callback.append(self.updateHDDData)
self["waitingtext"] = Label(_("Please wait... Loading list..."))
# create optional description border and hide immediately
self["DescriptionBorder"] = Pixmap()
self["DescriptionBorder"].hide()
if not fileExists(config.movielist.last_videodir.value):
config.movielist.last_videodir.value = defaultMoviePath()
config.movielist.last_videodir.save()
self.current_ref = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + config.movielist.last_videodir.value)
self["list"] = MovieList(None,
config.movielist.listtype.value,
config.movielist.moviesort.value,
config.movielist.description.value)
self.list = self["list"]
self.selectedmovie = selectedmovie
# Need list for init
SelectionEventInfo.__init__(self)
self["key_red"] = Button(_("All"))
self["key_green"] = Button("")
self["key_yellow"] = Button("")
self["key_blue"] = Button("")
self["freeDiskSpace"] = self.diskinfo = DiskInfo(config.movielist.last_videodir.value, DiskInfo.FREE, update=False)
if config.usage.setup_level.index >= 2: # expert+
self["InfobarActions"] = HelpableActionMap(self, "InfobarActions",
{
"showMovies": (self.doPathSelect, _("select the movie path")),
})
self["MovieSelectionActions"] = HelpableActionMap(self, "MovieSelectionActions",
{
"contextMenu": (self.doContext, _("menu")),
"showEventInfo": (self.showEventInformation, _("show event details")),
})
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": (self.showAll, _("show all")),
"green": (self.showTagsFirst, _("show first selected tag")),
"yellow": (self.showTagsSecond, _("show second selected tag")),
"blue": (self.showTagsSelect, _("show tag menu")),
})
self["OkCancelActions"] = HelpableActionMap(self, "OkCancelActions",
{
"cancel": (self.abort, _("exit movielist")),
"ok": (self.movieSelected, _("select movie")),
})
self.onShown.append(self.go)
self.onLayoutFinish.append(self.saveListsize)
self.inited = False
def updateDescription(self):
if config.movielist.description.value == MovieList.SHOW_DESCRIPTION:
self["DescriptionBorder"].show()
self["list"].instance.resize(eSize(self.listWidth, self.listHeight-self["DescriptionBorder"].instance.size().height()))
else:
self["Service"].newService(None)
self["DescriptionBorder"].hide()
self["list"].instance.resize(eSize(self.listWidth, self.listHeight))
def showEventInformation(self):
from Screens.EventView import EventViewSimple
from ServiceReference import ServiceReference
evt = self["list"].getCurrentEvent()
if evt:
self.session.open(EventViewSimple, evt, ServiceReference(self.getCurrent()))
def go(self):
if not self.inited:
# ouch. this should redraw our "Please wait..."-text.
# this is of course not the right way to do this.
self.delayTimer.start(10, 1)
self.inited=True
def saveListsize(self):
listsize = self["list"].instance.size()
self.listWidth = listsize.width()
self.listHeight = listsize.height()
self.updateDescription()
def updateHDDData(self):
self.reloadList(self.selectedmovie)
self["waitingtext"].visible = False
def moveTo(self):
self["list"].moveTo(self.selectedmovie)
def getCurrent(self):
return self["list"].getCurrent()
def movieSelected(self):
current = self.getCurrent()
if current is not None:
self.saveconfig()
self.close(current)
def doContext(self):
current = self.getCurrent()
if current is not None:
self.session.open(MovieContextMenu, self, current)
def abort(self):
self.saveconfig()
self.close(None)
def saveconfig(self):
config.movielist.last_selected_tags.value = self.selected_tags
config.movielist.moviesort.save()
config.movielist.listtype.save()
config.movielist.description.save()
def getTagDescription(self, tag):
# TODO: access the tag database
return tag
def updateTags(self):
# get a list of tags available in this list
self.tags = list(self["list"].tags)
if not self.tags:
# by default, we do not display any filtering options
self.tag_first = ""
self.tag_second = ""
else:
tmp = config.movielist.first_tags.value
if tmp in self.tags:
self.tag_first = tmp
else:
self.tag_first = "<"+_("Tag 1")+">"
tmp = config.movielist.second_tags.value
if tmp in self.tags:
self.tag_second = tmp
else:
self.tag_second = "<"+_("Tag 2")+">"
self["key_green"].text = self.tag_first
self["key_yellow"].text = self.tag_second
# the rest is presented in a list, available on the
# fourth ("blue") button
if self.tags:
self["key_blue"].text = _("Tags")+"..."
else:
self["key_blue"].text = ""
def setListType(self, type):
self["list"].setListType(type)
def setDescriptionState(self, val):
self["list"].setDescriptionState(val)
def setSortType(self, type):
self["list"].setSortType(type)
def reloadList(self, sel = None, home = False):
if not fileExists(config.movielist.last_videodir.value):
path = defaultMoviePath()
config.movielist.last_videodir.value = path
config.movielist.last_videodir.save()
self.current_ref = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + path)
self["freeDiskSpace"].path = path
if sel is None:
sel = self.getCurrent()
self["list"].reload(self.current_ref, self.selected_tags)
title = _("Recorded files...")
if config.usage.setup_level.index >= 2: # expert+
title += " " + config.movielist.last_videodir.value
if self.selected_tags is not None:
title += " - " + ','.join(self.selected_tags)
self.setTitle(title)
if not (sel and self["list"].moveTo(sel)):
if home:
self["list"].moveToIndex(0)
self.updateTags()
self["freeDiskSpace"].update()
def doPathSelect(self):
self.session.openWithCallback(
self.gotFilename,
MovieLocationBox,
_("Please select the movie path..."),
config.movielist.last_videodir.value
)
def gotFilename(self, res):
if res is not None and res is not config.movielist.last_videodir.value:
if fileExists(res):
config.movielist.last_videodir.value = res
config.movielist.last_videodir.save()
self.current_ref = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + res)
self["freeDiskSpace"].path = res
self.reloadList(home = True)
else:
self.session.open(
MessageBox,
_("Directory %s nonexistent.") % (res),
type = MessageBox.TYPE_ERROR,
timeout = 5
)
def showAll(self):
self.selected_tags_ele = None
self.selected_tags = None
self.reloadList(home = True)
def showTagsN(self, tagele):
if not self.tags:
self.showTagWarning()
elif not tagele or (self.selected_tags and tagele.value in self.selected_tags) or not tagele.value in self.tags:
self.showTagsMenu(tagele)
else:
self.selected_tags_ele = tagele
self.selected_tags = set([tagele.value])
self.reloadList(home = True)
def showTagsFirst(self):
self.showTagsN(config.movielist.first_tags)
def showTagsSecond(self):
self.showTagsN(config.movielist.second_tags)
def showTagsSelect(self):
self.showTagsN(None)
def tagChosen(self, tag):
if tag is not None:
self.selected_tags = set([tag[0]])
if self.selected_tags_ele:
self.selected_tags_ele.value = tag[0]
self.selected_tags_ele.save()
self.reloadList(home = True)
def showTagsMenu(self, tagele):
self.selected_tags_ele = tagele
list = [(tag, self.getTagDescription(tag)) for tag in self.tags ]
self.session.openWithCallback(self.tagChosen, ChoiceBox, title=_("Please select tag to filter..."), list = list)
def showTagWarning(self):
self.session.open(MessageBox, _("No tags are set on these movies."), MessageBox.TYPE_ERROR)
|
gpl-2.0
|
rhndg/openedx
|
openedx/core/djangoapps/credit/migrations/0001_initial.py
|
85
|
8897
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CreditCourse'
db.create_table('credit_creditcourse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_key', self.gf('xmodule_django.models.CourseKeyField')(unique=True, max_length=255, db_index=True)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('credit', ['CreditCourse'])
# Adding model 'CreditProvider'
db.create_table('credit_creditprovider', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('provider_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, db_index=True)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('credit', ['CreditProvider'])
# Adding model 'CreditRequirement'
db.create_table('credit_creditrequirement', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('course', self.gf('django.db.models.fields.related.ForeignKey')(related_name='credit_requirements', to=orm['credit.CreditCourse'])),
('namespace', self.gf('django.db.models.fields.CharField')(max_length=255)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('configuration', self.gf('jsonfield.fields.JSONField')()),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('credit', ['CreditRequirement'])
# Adding unique constraint on 'CreditRequirement', fields ['namespace', 'name', 'course']
db.create_unique('credit_creditrequirement', ['namespace', 'name', 'course_id'])
# Adding model 'CreditRequirementStatus'
db.create_table('credit_creditrequirementstatus', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('username', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('requirement', self.gf('django.db.models.fields.related.ForeignKey')(related_name='statuses', to=orm['credit.CreditRequirement'])),
('status', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('credit', ['CreditRequirementStatus'])
# Adding model 'CreditEligibility'
db.create_table('credit_crediteligibility', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('username', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('course', self.gf('django.db.models.fields.related.ForeignKey')(related_name='eligibilities', to=orm['credit.CreditCourse'])),
('provider', self.gf('django.db.models.fields.related.ForeignKey')(related_name='eligibilities', to=orm['credit.CreditProvider'])),
))
db.send_create_signal('credit', ['CreditEligibility'])
# Adding unique constraint on 'CreditEligibility', fields ['username', 'course']
db.create_unique('credit_crediteligibility', ['username', 'course_id'])
def backwards(self, orm):
# Removing unique constraint on 'CreditEligibility', fields ['username', 'course']
db.delete_unique('credit_crediteligibility', ['username', 'course_id'])
# Removing unique constraint on 'CreditRequirement', fields ['namespace', 'name', 'course']
db.delete_unique('credit_creditrequirement', ['namespace', 'name', 'course_id'])
# Deleting model 'CreditCourse'
db.delete_table('credit_creditcourse')
# Deleting model 'CreditProvider'
db.delete_table('credit_creditprovider')
# Deleting model 'CreditRequirement'
db.delete_table('credit_creditrequirement')
# Deleting model 'CreditRequirementStatus'
db.delete_table('credit_creditrequirementstatus')
# Deleting model 'CreditEligibility'
db.delete_table('credit_crediteligibility')
models = {
'credit.creditcourse': {
'Meta': {'object_name': 'CreditCourse'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'credit.crediteligibility': {
'Meta': {'unique_together': "(('username', 'course'),)", 'object_name': 'CreditEligibility'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eligibilities'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eligibilities'", 'to': "orm['credit.CreditProvider']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'credit.creditprovider': {
'Meta': {'object_name': 'CreditProvider'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'provider_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'credit.creditrequirement': {
'Meta': {'unique_together': "(('namespace', 'name', 'course'),)", 'object_name': 'CreditRequirement'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'configuration': ('jsonfield.fields.JSONField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requirements'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'credit.creditrequirementstatus': {
'Meta': {'object_name': 'CreditRequirementStatus'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'requirement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['credit.CreditRequirement']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['credit']
|
agpl-3.0
|
dreamsxin/kbengine
|
kbe/src/lib/python/Lib/test/test_tokenize.py
|
80
|
48357
|
doctests = """
Tests for the tokenize module.
The tests can be really simple. Given a small fragment of source
code, print out a table with tokens. The ENDMARKER is omitted for
brevity.
>>> dump_tokens("1 + 1")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '1' (1, 0) (1, 1)
OP '+' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
>>> dump_tokens("if False:\\n"
... " # NL\\n"
... " True = False # NEWLINE\\n")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'if' (1, 0) (1, 2)
NAME 'False' (1, 3) (1, 8)
OP ':' (1, 8) (1, 9)
NEWLINE '\\n' (1, 9) (1, 10)
COMMENT '# NL' (2, 4) (2, 8)
NL '\\n' (2, 8) (2, 9)
INDENT ' ' (3, 0) (3, 4)
NAME 'True' (3, 4) (3, 8)
OP '=' (3, 9) (3, 10)
NAME 'False' (3, 11) (3, 16)
COMMENT '# NEWLINE' (3, 17) (3, 26)
NEWLINE '\\n' (3, 26) (3, 27)
DEDENT '' (4, 0) (4, 0)
>>> indent_error_file = \"""
... def k(x):
... x += 2
... x += 5
... \"""
>>> readline = BytesIO(indent_error_file.encode('utf-8')).readline
>>> for tok in tokenize(readline): pass
Traceback (most recent call last):
...
IndentationError: unindent does not match any outer indentation level
There are some standard formatting practices that are easy to get right.
>>> roundtrip("if x == 1:\\n"
... " print(x)\\n")
True
>>> roundtrip("# This is a comment\\n# This also")
True
Some people use different formatting conventions, which makes
untokenize a little trickier. Note that this test involves trailing
whitespace after the colon. Note that we use hex escapes to make the
two trailing blanks apparent in the expected output.
>>> roundtrip("if x == 1 : \\n"
... " print(x)\\n")
True
>>> f = support.findfile("tokenize_tests.txt")
>>> roundtrip(open(f, 'rb'))
True
>>> roundtrip("if x == 1:\\n"
... " # A comment by itself.\\n"
... " print(x) # Comment here, too.\\n"
... " # Another comment.\\n"
... "after_if = True\\n")
True
>>> roundtrip("if (x # The comments need to go in the right place\\n"
... " == 1):\\n"
... " print('x==1')\\n")
True
>>> roundtrip("class Test: # A comment here\\n"
... " # A comment with weird indent\\n"
... " after_com = 5\\n"
... " def x(m): return m*5 # a one liner\\n"
... " def y(m): # A whitespace after the colon\\n"
... " return y*4 # 3-space indent\\n")
True
Some error-handling code
>>> roundtrip("try: import somemodule\\n"
... "except ImportError: # comment\\n"
... " print('Can not import' # comment2\\n)"
... "else: print('Loaded')\\n")
True
Balancing continuation
>>> roundtrip("a = (3,4, \\n"
... "5,6)\\n"
... "y = [3, 4,\\n"
... "5]\\n"
... "z = {'a': 5,\\n"
... "'b':15, 'c':True}\\n"
... "x = len(y) + 5 - a[\\n"
... "3] - a[2]\\n"
... "+ len(z) - z[\\n"
... "'b']\\n")
True
Ordinary integers and binary operators
>>> dump_tokens("0xff <= 255")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xff' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
>>> dump_tokens("0b10 <= 255")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0b10' (1, 0) (1, 4)
OP '<=' (1, 5) (1, 7)
NUMBER '255' (1, 8) (1, 11)
>>> dump_tokens("0o123 <= 0O123")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0o123' (1, 0) (1, 5)
OP '<=' (1, 6) (1, 8)
NUMBER '0O123' (1, 9) (1, 14)
>>> dump_tokens("1234567 > ~0x15")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '1234567' (1, 0) (1, 7)
OP '>' (1, 8) (1, 9)
OP '~' (1, 10) (1, 11)
NUMBER '0x15' (1, 11) (1, 15)
>>> dump_tokens("2134568 != 1231515")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '2134568' (1, 0) (1, 7)
OP '!=' (1, 8) (1, 10)
NUMBER '1231515' (1, 11) (1, 18)
>>> dump_tokens("(-124561-1) & 200000000")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '(' (1, 0) (1, 1)
OP '-' (1, 1) (1, 2)
NUMBER '124561' (1, 2) (1, 8)
OP '-' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP ')' (1, 10) (1, 11)
OP '&' (1, 12) (1, 13)
NUMBER '200000000' (1, 14) (1, 23)
>>> dump_tokens("0xdeadbeef != -1")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xdeadbeef' (1, 0) (1, 10)
OP '!=' (1, 11) (1, 13)
OP '-' (1, 14) (1, 15)
NUMBER '1' (1, 15) (1, 16)
>>> dump_tokens("0xdeadc0de & 12345")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xdeadc0de' (1, 0) (1, 10)
OP '&' (1, 11) (1, 12)
NUMBER '12345' (1, 13) (1, 18)
>>> dump_tokens("0xFF & 0x15 | 1234")
ENCODING 'utf-8' (0, 0) (0, 0)
NUMBER '0xFF' (1, 0) (1, 4)
OP '&' (1, 5) (1, 6)
NUMBER '0x15' (1, 7) (1, 11)
OP '|' (1, 12) (1, 13)
NUMBER '1234' (1, 14) (1, 18)
Long integers
>>> dump_tokens("x = 0")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0' (1, 4) (1, 5)
>>> dump_tokens("x = 0xfffffffffff")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '0xffffffffff (1, 4) (1, 17)
>>> dump_tokens("x = 123141242151251616110")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '123141242151 (1, 4) (1, 25)
>>> dump_tokens("x = -15921590215012591")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
OP '-' (1, 4) (1, 5)
NUMBER '159215902150 (1, 5) (1, 22)
Floating point numbers
>>> dump_tokens("x = 3.14159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14159' (1, 4) (1, 11)
>>> dump_tokens("x = 314159.")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '314159.' (1, 4) (1, 11)
>>> dump_tokens("x = .314159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '.314159' (1, 4) (1, 11)
>>> dump_tokens("x = 3e14159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3e14159' (1, 4) (1, 11)
>>> dump_tokens("x = 3E123")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3E123' (1, 4) (1, 9)
>>> dump_tokens("x+y = 3e-1230")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '+' (1, 1) (1, 2)
NAME 'y' (1, 2) (1, 3)
OP '=' (1, 4) (1, 5)
NUMBER '3e-1230' (1, 6) (1, 13)
>>> dump_tokens("x = 3.14e159")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '3.14e159' (1, 4) (1, 12)
String literals
>>> dump_tokens("x = ''; y = \\\"\\\"")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "''" (1, 4) (1, 6)
OP ';' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '=' (1, 10) (1, 11)
STRING '""' (1, 12) (1, 14)
>>> dump_tokens("x = '\\\"'; y = \\\"'\\\"")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '\\'"\\'' (1, 4) (1, 7)
OP ';' (1, 7) (1, 8)
NAME 'y' (1, 9) (1, 10)
OP '=' (1, 11) (1, 12)
STRING '"\\'"' (1, 13) (1, 16)
>>> dump_tokens("x = \\\"doesn't \\\"shrink\\\", does it\\\"")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"doesn\\'t "' (1, 4) (1, 14)
NAME 'shrink' (1, 14) (1, 20)
STRING '", does it"' (1, 20) (1, 31)
>>> dump_tokens("x = 'abc' + 'ABC'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "'abc'" (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
STRING "'ABC'" (1, 12) (1, 17)
>>> dump_tokens('y = "ABC" + "ABC"')
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING '"ABC"' (1, 4) (1, 9)
OP '+' (1, 10) (1, 11)
STRING '"ABC"' (1, 12) (1, 17)
>>> dump_tokens("x = r'abc' + r'ABC' + R'ABC' + R'ABC'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING "r'abc'" (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING "r'ABC'" (1, 13) (1, 19)
OP '+' (1, 20) (1, 21)
STRING "R'ABC'" (1, 22) (1, 28)
OP '+' (1, 29) (1, 30)
STRING "R'ABC'" (1, 31) (1, 37)
>>> dump_tokens('y = r"abc" + r"ABC" + R"ABC" + R"ABC"')
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'y' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
STRING 'r"abc"' (1, 4) (1, 10)
OP '+' (1, 11) (1, 12)
STRING 'r"ABC"' (1, 13) (1, 19)
OP '+' (1, 20) (1, 21)
STRING 'R"ABC"' (1, 22) (1, 28)
OP '+' (1, 29) (1, 30)
STRING 'R"ABC"' (1, 31) (1, 37)
>>> dump_tokens("u'abc' + U'abc'")
ENCODING 'utf-8' (0, 0) (0, 0)
STRING "u'abc'" (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING "U'abc'" (1, 9) (1, 15)
>>> dump_tokens('u"abc" + U"abc"')
ENCODING 'utf-8' (0, 0) (0, 0)
STRING 'u"abc"' (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING 'U"abc"' (1, 9) (1, 15)
>>> dump_tokens("b'abc' + B'abc'")
ENCODING 'utf-8' (0, 0) (0, 0)
STRING "b'abc'" (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING "B'abc'" (1, 9) (1, 15)
>>> dump_tokens('b"abc" + B"abc"')
ENCODING 'utf-8' (0, 0) (0, 0)
STRING 'b"abc"' (1, 0) (1, 6)
OP '+' (1, 7) (1, 8)
STRING 'B"abc"' (1, 9) (1, 15)
>>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'")
ENCODING 'utf-8' (0, 0) (0, 0)
STRING "br'abc'" (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING "bR'abc'" (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING "Br'abc'" (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING "BR'abc'" (1, 30) (1, 37)
>>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"')
ENCODING 'utf-8' (0, 0) (0, 0)
STRING 'br"abc"' (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING 'bR"abc"' (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING 'Br"abc"' (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING 'BR"abc"' (1, 30) (1, 37)
>>> dump_tokens("rb'abc' + rB'abc' + Rb'abc' + RB'abc'")
ENCODING 'utf-8' (0, 0) (0, 0)
STRING "rb'abc'" (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING "rB'abc'" (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING "Rb'abc'" (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING "RB'abc'" (1, 30) (1, 37)
>>> dump_tokens('rb"abc" + rB"abc" + Rb"abc" + RB"abc"')
ENCODING 'utf-8' (0, 0) (0, 0)
STRING 'rb"abc"' (1, 0) (1, 7)
OP '+' (1, 8) (1, 9)
STRING 'rB"abc"' (1, 10) (1, 17)
OP '+' (1, 18) (1, 19)
STRING 'Rb"abc"' (1, 20) (1, 27)
OP '+' (1, 28) (1, 29)
STRING 'RB"abc"' (1, 30) (1, 37)
Operators
>>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'd22' (1, 4) (1, 7)
OP '(' (1, 7) (1, 8)
NAME 'a' (1, 8) (1, 9)
OP ',' (1, 9) (1, 10)
NAME 'b' (1, 11) (1, 12)
OP ',' (1, 12) (1, 13)
NAME 'c' (1, 14) (1, 15)
OP '=' (1, 15) (1, 16)
NUMBER '2' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
NAME 'd' (1, 19) (1, 20)
OP '=' (1, 20) (1, 21)
NUMBER '2' (1, 21) (1, 22)
OP ',' (1, 22) (1, 23)
OP '*' (1, 24) (1, 25)
NAME 'k' (1, 25) (1, 26)
OP ')' (1, 26) (1, 27)
OP ':' (1, 27) (1, 28)
NAME 'pass' (1, 29) (1, 33)
>>> dump_tokens("def d01v_(a=1, *k, **w): pass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'd01v_' (1, 4) (1, 9)
OP '(' (1, 9) (1, 10)
NAME 'a' (1, 10) (1, 11)
OP '=' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP ',' (1, 13) (1, 14)
OP '*' (1, 15) (1, 16)
NAME 'k' (1, 16) (1, 17)
OP ',' (1, 17) (1, 18)
OP '**' (1, 19) (1, 21)
NAME 'w' (1, 21) (1, 22)
OP ')' (1, 22) (1, 23)
OP ':' (1, 23) (1, 24)
NAME 'pass' (1, 25) (1, 29)
Comparison
>>> dump_tokens("if 1 < 1 > 1 == 1 >= 5 <= 0x15 <= 0x12 != " +
... "1 and 5 in 1 not in 1 is 1 or 5 is not 1: pass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'if' (1, 0) (1, 2)
NUMBER '1' (1, 3) (1, 4)
OP '<' (1, 5) (1, 6)
NUMBER '1' (1, 7) (1, 8)
OP '>' (1, 9) (1, 10)
NUMBER '1' (1, 11) (1, 12)
OP '==' (1, 13) (1, 15)
NUMBER '1' (1, 16) (1, 17)
OP '>=' (1, 18) (1, 20)
NUMBER '5' (1, 21) (1, 22)
OP '<=' (1, 23) (1, 25)
NUMBER '0x15' (1, 26) (1, 30)
OP '<=' (1, 31) (1, 33)
NUMBER '0x12' (1, 34) (1, 38)
OP '!=' (1, 39) (1, 41)
NUMBER '1' (1, 42) (1, 43)
NAME 'and' (1, 44) (1, 47)
NUMBER '5' (1, 48) (1, 49)
NAME 'in' (1, 50) (1, 52)
NUMBER '1' (1, 53) (1, 54)
NAME 'not' (1, 55) (1, 58)
NAME 'in' (1, 59) (1, 61)
NUMBER '1' (1, 62) (1, 63)
NAME 'is' (1, 64) (1, 66)
NUMBER '1' (1, 67) (1, 68)
NAME 'or' (1, 69) (1, 71)
NUMBER '5' (1, 72) (1, 73)
NAME 'is' (1, 74) (1, 76)
NAME 'not' (1, 77) (1, 80)
NUMBER '1' (1, 81) (1, 82)
OP ':' (1, 82) (1, 83)
NAME 'pass' (1, 84) (1, 88)
Shift
>>> dump_tokens("x = 1 << 1 >> 5")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '<<' (1, 6) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '>>' (1, 11) (1, 13)
NUMBER '5' (1, 14) (1, 15)
Additive
>>> dump_tokens("x = 1 - y + 15 - 1 + 0x124 + z + a[5]")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '-' (1, 6) (1, 7)
NAME 'y' (1, 8) (1, 9)
OP '+' (1, 10) (1, 11)
NUMBER '15' (1, 12) (1, 14)
OP '-' (1, 15) (1, 16)
NUMBER '1' (1, 17) (1, 18)
OP '+' (1, 19) (1, 20)
NUMBER '0x124' (1, 21) (1, 26)
OP '+' (1, 27) (1, 28)
NAME 'z' (1, 29) (1, 30)
OP '+' (1, 31) (1, 32)
NAME 'a' (1, 33) (1, 34)
OP '[' (1, 34) (1, 35)
NUMBER '5' (1, 35) (1, 36)
OP ']' (1, 36) (1, 37)
Multiplicative
>>> dump_tokens("x = 1//1*1/5*12%0x12")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'x' (1, 0) (1, 1)
OP '=' (1, 2) (1, 3)
NUMBER '1' (1, 4) (1, 5)
OP '//' (1, 5) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '/' (1, 10) (1, 11)
NUMBER '5' (1, 11) (1, 12)
OP '*' (1, 12) (1, 13)
NUMBER '12' (1, 13) (1, 15)
OP '%' (1, 15) (1, 16)
NUMBER '0x12' (1, 16) (1, 20)
Unary
>>> dump_tokens("~1 ^ 1 & 1 |1 ^ -1")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '~' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '^' (1, 3) (1, 4)
NUMBER '1' (1, 5) (1, 6)
OP '&' (1, 7) (1, 8)
NUMBER '1' (1, 9) (1, 10)
OP '|' (1, 11) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '^' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
NUMBER '1' (1, 17) (1, 18)
>>> dump_tokens("-1*1/1+1*1//1 - ---1**1")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '-' (1, 0) (1, 1)
NUMBER '1' (1, 1) (1, 2)
OP '*' (1, 2) (1, 3)
NUMBER '1' (1, 3) (1, 4)
OP '/' (1, 4) (1, 5)
NUMBER '1' (1, 5) (1, 6)
OP '+' (1, 6) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '*' (1, 8) (1, 9)
NUMBER '1' (1, 9) (1, 10)
OP '//' (1, 10) (1, 12)
NUMBER '1' (1, 12) (1, 13)
OP '-' (1, 14) (1, 15)
OP '-' (1, 16) (1, 17)
OP '-' (1, 17) (1, 18)
OP '-' (1, 18) (1, 19)
NUMBER '1' (1, 19) (1, 20)
OP '**' (1, 20) (1, 22)
NUMBER '1' (1, 22) (1, 23)
Selector
>>> dump_tokens("import sys, time\\nx = sys.modules['time'].time()")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'import' (1, 0) (1, 6)
NAME 'sys' (1, 7) (1, 10)
OP ',' (1, 10) (1, 11)
NAME 'time' (1, 12) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'x' (2, 0) (2, 1)
OP '=' (2, 2) (2, 3)
NAME 'sys' (2, 4) (2, 7)
OP '.' (2, 7) (2, 8)
NAME 'modules' (2, 8) (2, 15)
OP '[' (2, 15) (2, 16)
STRING "'time'" (2, 16) (2, 22)
OP ']' (2, 22) (2, 23)
OP '.' (2, 23) (2, 24)
NAME 'time' (2, 24) (2, 28)
OP '(' (2, 28) (2, 29)
OP ')' (2, 29) (2, 30)
Methods
>>> dump_tokens("@staticmethod\\ndef foo(x,y): pass")
ENCODING 'utf-8' (0, 0) (0, 0)
OP '@' (1, 0) (1, 1)
NAME 'staticmethod (1, 1) (1, 13)
NEWLINE '\\n' (1, 13) (1, 14)
NAME 'def' (2, 0) (2, 3)
NAME 'foo' (2, 4) (2, 7)
OP '(' (2, 7) (2, 8)
NAME 'x' (2, 8) (2, 9)
OP ',' (2, 9) (2, 10)
NAME 'y' (2, 10) (2, 11)
OP ')' (2, 11) (2, 12)
OP ':' (2, 12) (2, 13)
NAME 'pass' (2, 14) (2, 18)
Backslash means line continuation, except for comments
>>> roundtrip("x=1+\\\\n"
... "1\\n"
... "# This is a comment\\\\n"
... "# This also\\n")
True
>>> roundtrip("# Comment \\\\nx = 0")
True
Two string literals on the same line
>>> roundtrip("'' ''")
True
Test roundtrip on random python modules.
pass the '-ucpu' option to process the full directory.
>>> import random
>>> tempdir = os.path.dirname(f) or os.curdir
>>> testfiles = glob.glob(os.path.join(tempdir, "test*.py"))
Tokenize is broken on test_pep3131.py because regular expressions are
broken on the obscure unicode identifiers in it. *sigh*
With roundtrip extended to test the 5-tuple mode of untokenize,
7 more testfiles fail. Remove them also until the failure is diagnosed.
>>> testfiles.remove(os.path.join(tempdir, "test_pep3131.py"))
>>> for f in ('buffer', 'builtin', 'fileio', 'inspect', 'os', 'platform', 'sys'):
... testfiles.remove(os.path.join(tempdir, "test_%s.py") % f)
...
>>> if not support.is_resource_enabled("cpu"):
... testfiles = random.sample(testfiles, 10)
...
>>> for testfile in testfiles:
... if not roundtrip(open(testfile, 'rb')):
... print("Roundtrip failed for file %s" % testfile)
... break
... else: True
True
Evil tabs
>>> dump_tokens("def f():\\n\\tif x\\n \\tpass")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'def' (1, 0) (1, 3)
NAME 'f' (1, 4) (1, 5)
OP '(' (1, 5) (1, 6)
OP ')' (1, 6) (1, 7)
OP ':' (1, 7) (1, 8)
NEWLINE '\\n' (1, 8) (1, 9)
INDENT '\\t' (2, 0) (2, 1)
NAME 'if' (2, 1) (2, 3)
NAME 'x' (2, 4) (2, 5)
NEWLINE '\\n' (2, 5) (2, 6)
INDENT ' \\t' (3, 0) (3, 9)
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
Non-ascii identifiers
>>> dump_tokens("Örter = 'places'\\ngrün = 'green'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'Örter' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
STRING "'places'" (1, 8) (1, 16)
NEWLINE '\\n' (1, 16) (1, 17)
NAME 'grün' (2, 0) (2, 4)
OP '=' (2, 5) (2, 6)
STRING "'green'" (2, 7) (2, 14)
Legacy unicode literals:
>>> dump_tokens("Örter = u'places'\\ngrün = U'green'")
ENCODING 'utf-8' (0, 0) (0, 0)
NAME 'Örter' (1, 0) (1, 5)
OP '=' (1, 6) (1, 7)
STRING "u'places'" (1, 8) (1, 17)
NEWLINE '\\n' (1, 17) (1, 18)
NAME 'grün' (2, 0) (2, 4)
OP '=' (2, 5) (2, 6)
STRING "U'green'" (2, 7) (2, 15)
"""
from test import support
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer)
from io import BytesIO
from unittest import TestCase
import os, sys, glob
import token
def dump_tokens(s):
"""Print out the tokens in s in a table format.
The ENDMARKER is omitted.
"""
f = BytesIO(s.encode('utf-8'))
for type, token, start, end, line in tokenize(f.readline):
if type == ENDMARKER:
break
type = tok_name[type]
print("%(type)-10.10s %(token)-13.13r %(start)s %(end)s" % locals())
def roundtrip(f):
"""
Test roundtrip for `untokenize`. `f` is an open file or a string.
The source code in f is tokenized to both 5- and 2-tuples.
Both sequences are converted back to source code via
tokenize.untokenize(), and the latter tokenized again to 2-tuples.
The test fails if the 3 pair tokenizations do not match.
When untokenize bugs are fixed, untokenize with 5-tuples should
reproduce code that does not contain a backslash continuation
following spaces. A proper test should test this.
This function would be more useful for correcting bugs if it reported
the first point of failure, like assertEqual, rather than just
returning False -- or if it were only used in unittests and not
doctest and actually used assertEqual.
"""
# Get source code and original tokenizations
if isinstance(f, str):
code = f.encode('utf-8')
else:
code = f.read()
f.close()
readline = iter(code.splitlines(keepends=True)).__next__
tokens5 = list(tokenize(readline))
tokens2 = [tok[:2] for tok in tokens5]
# Reproduce tokens2 from pairs
bytes_from2 = untokenize(tokens2)
readline2 = iter(bytes_from2.splitlines(keepends=True)).__next__
tokens2_from2 = [tok[:2] for tok in tokenize(readline2)]
# Reproduce tokens2 from 5-tuples
bytes_from5 = untokenize(tokens5)
readline5 = iter(bytes_from5.splitlines(keepends=True)).__next__
tokens2_from5 = [tok[:2] for tok in tokenize(readline5)]
# Compare 3 versions
return tokens2 == tokens2_from2 == tokens2_from5
# This is an example from the docs, set up as a doctest.
def decistmt(s):
"""Substitute Decimals for floats in a string of statements.
>>> from decimal import Decimal
>>> s = 'print(+21.3e-5*-.1234/81.7)'
>>> decistmt(s)
"print (+Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7'))"
The format of the exponent is inherited from the platform C library.
Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
we're only showing 11 digits, and the 12th isn't close to 5, the
rest of the output should be platform-independent.
>>> exec(s) #doctest: +ELLIPSIS
-3.2171603427...e-0...7
Output from calculations with Decimal should be identical across all
platforms.
>>> exec(decistmt(s))
-3.217160342717258261933904529E-7
"""
result = []
g = tokenize(BytesIO(s.encode('utf-8')).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and '.' in tokval: # replace NUMBER tokens
result.extend([
(NAME, 'Decimal'),
(OP, '('),
(STRING, repr(tokval)),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result).decode('utf-8')
class TestTokenizerAdheresToPep0263(TestCase):
"""
Test that tokenizer adheres to the coding behaviour stipulated in PEP 0263.
"""
def _testFile(self, filename):
path = os.path.join(os.path.dirname(__file__), filename)
return roundtrip(open(path, 'rb'))
def test_utf8_coding_cookie_and_no_utf8_bom(self):
f = 'tokenize_tests-utf8-coding-cookie-and-no-utf8-bom-sig.txt'
self.assertTrue(self._testFile(f))
def test_latin1_coding_cookie_and_utf8_bom(self):
"""
As per PEP 0263, if a file starts with a utf-8 BOM signature, the only
allowed encoding for the comment is 'utf-8'. The text file used in
this test starts with a BOM signature, but specifies latin1 as the
coding, so verify that a SyntaxError is raised, which matches the
behaviour of the interpreter when it encounters a similar condition.
"""
f = 'tokenize_tests-latin1-coding-cookie-and-utf8-bom-sig.txt'
self.assertRaises(SyntaxError, self._testFile, f)
def test_no_coding_cookie_and_utf8_bom(self):
f = 'tokenize_tests-no-coding-cookie-and-utf8-bom-sig-only.txt'
self.assertTrue(self._testFile(f))
def test_utf8_coding_cookie_and_utf8_bom(self):
f = 'tokenize_tests-utf8-coding-cookie-and-utf8-bom-sig.txt'
self.assertTrue(self._testFile(f))
def test_bad_coding_cookie(self):
self.assertRaises(SyntaxError, self._testFile, 'bad_coding.py')
self.assertRaises(SyntaxError, self._testFile, 'bad_coding2.py')
class Test_Tokenize(TestCase):
def test__tokenize_decodes_with_specified_encoding(self):
literal = '"ЉЊЈЁЂ"'
line = literal.encode('utf-8')
first = False
def readline():
nonlocal first
if not first:
first = True
return line
else:
return b''
# skip the initial encoding token and the end token
tokens = list(_tokenize(readline, encoding='utf-8'))[1:-1]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
def test__tokenize_does_not_decode_with_encoding_none(self):
literal = '"ЉЊЈЁЂ"'
first = False
def readline():
nonlocal first
if not first:
first = True
return literal
else:
return b''
# skip the end token
tokens = list(_tokenize(readline, encoding=None))[:-1]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"string not tokenized when encoding is None")
class TestDetectEncoding(TestCase):
def get_readline(self, lines):
index = 0
def readline():
nonlocal index
if index == len(lines):
raise StopIteration
line = lines[index]
index += 1
return line
return readline
def test_no_bom_no_encoding_cookie(self):
lines = (
b'# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, list(lines[:2]))
def test_bom_no_cookie(self):
lines = (
b'\xef\xbb\xbf# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines,
[b'# something\n', b'print(something)\n'])
def test_cookie_first_line_no_bom(self):
lines = (
b'# -*- coding: latin-1 -*-\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso-8859-1')
self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n'])
def test_matched_bom_and_cookie_first_line(self):
lines = (
b'\xef\xbb\xbf# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [b'# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_first_line_raises_syntaxerror(self):
lines = (
b'\xef\xbb\xbf# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_cookie_second_line_no_bom(self):
lines = (
b'#! something\n',
b'# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'ascii')
expected = [b'#! something\n', b'# vim: set fileencoding=ascii :\n']
self.assertEqual(consumed_lines, expected)
def test_matched_bom_and_cookie_second_line(self):
lines = (
b'\xef\xbb\xbf#! something\n',
b'f# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines,
[b'#! something\n', b'f# coding=utf-8\n'])
def test_mismatched_bom_and_cookie_second_line_raises_syntaxerror(self):
lines = (
b'\xef\xbb\xbf#! something\n',
b'# vim: set fileencoding=ascii :\n',
b'print(something)\n',
b'do_something(else)\n'
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_cookie_second_line_noncommented_first_line(self):
lines = (
b"print('\xc2\xa3')\n",
b'# vim: set fileencoding=iso8859-15 :\n',
b"print('\xe2\x82\xac')\n"
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
expected = [b"print('\xc2\xa3')\n"]
self.assertEqual(consumed_lines, expected)
def test_cookie_second_line_commented_first_line(self):
lines = (
b"#print('\xc2\xa3')\n",
b'# vim: set fileencoding=iso8859-15 :\n',
b"print('\xe2\x82\xac')\n"
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso8859-15')
expected = [b"#print('\xc2\xa3')\n", b'# vim: set fileencoding=iso8859-15 :\n']
self.assertEqual(consumed_lines, expected)
def test_cookie_second_line_empty_first_line(self):
lines = (
b'\n',
b'# vim: set fileencoding=iso8859-15 :\n',
b"print('\xe2\x82\xac')\n"
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso8859-15')
expected = [b'\n', b'# vim: set fileencoding=iso8859-15 :\n']
self.assertEqual(consumed_lines, expected)
def test_latin1_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("latin-1", "iso-8859-1", "iso-latin-1", "latin-1-unix",
"iso-8859-1-unix", "iso-latin-1-mac")
for encoding in encodings:
for rep in ("-", "_"):
enc = encoding.replace("-", rep)
lines = (b"#!/usr/bin/python\n",
b"# coding: " + enc.encode("ascii") + b"\n",
b"print(things)\n",
b"do_something += 4\n")
rl = self.get_readline(lines)
found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "iso-8859-1")
def test_syntaxerror_latin1(self):
# Issue 14629: need to raise SyntaxError if the first
# line(s) have non-UTF-8 characters
lines = (
b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
)
readline = self.get_readline(lines)
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_utf8_normalization(self):
# See get_normal_name() in tokenizer.c.
encodings = ("utf-8", "utf-8-mac", "utf-8-unix")
for encoding in encodings:
for rep in ("-", "_"):
enc = encoding.replace("-", rep)
lines = (b"#!/usr/bin/python\n",
b"# coding: " + enc.encode("ascii") + b"\n",
b"1 + 3\n")
rl = self.get_readline(lines)
found, consumed_lines = detect_encoding(rl)
self.assertEqual(found, "utf-8")
def test_short_files(self):
readline = self.get_readline((b'print(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'print(something)\n'])
encoding, consumed_lines = detect_encoding(self.get_readline(()))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [])
readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [b'print(something)\n'])
readline = self.get_readline((b'\xef\xbb\xbf',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [])
readline = self.get_readline((b'# coding: bad\n',))
self.assertRaises(SyntaxError, detect_encoding, readline)
def test_false_encoding(self):
# Issue 18873: "Encoding" detected in non-comment lines
readline = self.get_readline((b'print("#coding=fake")',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, [b'print("#coding=fake")'])
def test_open(self):
filename = support.TESTFN + '.py'
self.addCleanup(support.unlink, filename)
# test coding cookie
for encoding in ('iso-8859-15', 'utf-8'):
with open(filename, 'w', encoding=encoding) as fp:
print("# coding: %s" % encoding, file=fp)
print("print('euro:\u20ac')", file=fp)
with tokenize_open(filename) as fp:
self.assertEqual(fp.encoding, encoding)
self.assertEqual(fp.mode, 'r')
# test BOM (no coding cookie)
with open(filename, 'w', encoding='utf-8-sig') as fp:
print("print('euro:\u20ac')", file=fp)
with tokenize_open(filename) as fp:
self.assertEqual(fp.encoding, 'utf-8-sig')
self.assertEqual(fp.mode, 'r')
def test_filename_in_exception(self):
# When possible, include the file name in the exception.
path = 'some_file_path'
lines = (
b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
)
class Bunk:
def __init__(self, lines, path):
self.name = path
self._lines = lines
self._index = 0
def readline(self):
if self._index == len(lines):
raise StopIteration
line = lines[self._index]
self._index += 1
return line
with self.assertRaises(SyntaxError):
ins = Bunk(lines, path)
# Make sure lacking a name isn't an issue.
del ins.name
detect_encoding(ins.readline)
with self.assertRaisesRegex(SyntaxError, '.*{}'.format(path)):
ins = Bunk(lines, path)
detect_encoding(ins.readline)
class TestTokenize(TestCase):
def test_tokenize(self):
import tokenize as tokenize_module
encoding = object()
encoding_used = None
def mock_detect_encoding(readline):
return encoding, ['first', 'second']
def mock__tokenize(readline, encoding):
nonlocal encoding_used
encoding_used = encoding
out = []
while True:
next_line = readline()
if next_line:
out.append(next_line)
continue
return out
counter = 0
def mock_readline():
nonlocal counter
counter += 1
if counter == 5:
return b''
return counter
orig_detect_encoding = tokenize_module.detect_encoding
orig__tokenize = tokenize_module._tokenize
tokenize_module.detect_encoding = mock_detect_encoding
tokenize_module._tokenize = mock__tokenize
try:
results = tokenize(mock_readline)
self.assertEqual(list(results), ['first', 'second', 1, 2, 3, 4])
finally:
tokenize_module.detect_encoding = orig_detect_encoding
tokenize_module._tokenize = orig__tokenize
self.assertTrue(encoding_used, encoding)
def assertExactTypeEqual(self, opstr, *optypes):
tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
num_optypes = len(optypes)
self.assertEqual(len(tokens), 2 + num_optypes)
self.assertEqual(token.tok_name[tokens[0].exact_type],
token.tok_name[ENCODING])
for i in range(num_optypes):
self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
token.tok_name[optypes[i]])
self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
token.tok_name[token.ENDMARKER])
def test_exact_type(self):
self.assertExactTypeEqual('()', token.LPAR, token.RPAR)
self.assertExactTypeEqual('[]', token.LSQB, token.RSQB)
self.assertExactTypeEqual(':', token.COLON)
self.assertExactTypeEqual(',', token.COMMA)
self.assertExactTypeEqual(';', token.SEMI)
self.assertExactTypeEqual('+', token.PLUS)
self.assertExactTypeEqual('-', token.MINUS)
self.assertExactTypeEqual('*', token.STAR)
self.assertExactTypeEqual('/', token.SLASH)
self.assertExactTypeEqual('|', token.VBAR)
self.assertExactTypeEqual('&', token.AMPER)
self.assertExactTypeEqual('<', token.LESS)
self.assertExactTypeEqual('>', token.GREATER)
self.assertExactTypeEqual('=', token.EQUAL)
self.assertExactTypeEqual('.', token.DOT)
self.assertExactTypeEqual('%', token.PERCENT)
self.assertExactTypeEqual('{}', token.LBRACE, token.RBRACE)
self.assertExactTypeEqual('==', token.EQEQUAL)
self.assertExactTypeEqual('!=', token.NOTEQUAL)
self.assertExactTypeEqual('<=', token.LESSEQUAL)
self.assertExactTypeEqual('>=', token.GREATEREQUAL)
self.assertExactTypeEqual('~', token.TILDE)
self.assertExactTypeEqual('^', token.CIRCUMFLEX)
self.assertExactTypeEqual('<<', token.LEFTSHIFT)
self.assertExactTypeEqual('>>', token.RIGHTSHIFT)
self.assertExactTypeEqual('**', token.DOUBLESTAR)
self.assertExactTypeEqual('+=', token.PLUSEQUAL)
self.assertExactTypeEqual('-=', token.MINEQUAL)
self.assertExactTypeEqual('*=', token.STAREQUAL)
self.assertExactTypeEqual('/=', token.SLASHEQUAL)
self.assertExactTypeEqual('%=', token.PERCENTEQUAL)
self.assertExactTypeEqual('&=', token.AMPEREQUAL)
self.assertExactTypeEqual('|=', token.VBAREQUAL)
self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL)
self.assertExactTypeEqual('^=', token.CIRCUMFLEXEQUAL)
self.assertExactTypeEqual('<<=', token.LEFTSHIFTEQUAL)
self.assertExactTypeEqual('>>=', token.RIGHTSHIFTEQUAL)
self.assertExactTypeEqual('**=', token.DOUBLESTAREQUAL)
self.assertExactTypeEqual('//', token.DOUBLESLASH)
self.assertExactTypeEqual('//=', token.DOUBLESLASHEQUAL)
self.assertExactTypeEqual('@', token.AT)
self.assertExactTypeEqual('a**2+b**2==c**2',
NAME, token.DOUBLESTAR, NUMBER,
token.PLUS,
NAME, token.DOUBLESTAR, NUMBER,
token.EQEQUAL,
NAME, token.DOUBLESTAR, NUMBER)
self.assertExactTypeEqual('{1, 2, 3}',
token.LBRACE,
token.NUMBER, token.COMMA,
token.NUMBER, token.COMMA,
token.NUMBER,
token.RBRACE)
self.assertExactTypeEqual('^(x & 0x1)',
token.CIRCUMFLEX,
token.LPAR,
token.NAME, token.AMPER, token.NUMBER,
token.RPAR)
def test_pathological_trailing_whitespace(self):
# See http://bugs.python.org/issue16152
self.assertExactTypeEqual('@ ', token.AT)
class UntokenizeTest(TestCase):
def test_bad_input_order(self):
# raise if previous row
u = Untokenizer()
u.prev_row = 2
u.prev_col = 2
with self.assertRaises(ValueError) as cm:
u.add_whitespace((1,3))
self.assertEqual(cm.exception.args[0],
'start (1,3) precedes previous end (2,2)')
# raise if previous column in row
self.assertRaises(ValueError, u.add_whitespace, (2,1))
def test_backslash_continuation(self):
# The problem is that <whitespace>\<newline> leaves no token
u = Untokenizer()
u.prev_row = 1
u.prev_col = 1
u.tokens = []
u.add_whitespace((2, 0))
self.assertEqual(u.tokens, ['\\\n'])
u.prev_row = 2
u.add_whitespace((4, 4))
self.assertEqual(u.tokens, ['\\\n', '\\\n\\\n', ' '])
self.assertTrue(roundtrip('a\n b\n c\n \\\n c\n'))
def test_iter_compat(self):
u = Untokenizer()
token = (NAME, 'Hello')
tokens = [(ENCODING, 'utf-8'), token]
u.compat(token, iter([]))
self.assertEqual(u.tokens, ["Hello "])
u = Untokenizer()
self.assertEqual(u.untokenize(iter([token])), 'Hello ')
u = Untokenizer()
self.assertEqual(u.untokenize(iter(tokens)), 'Hello ')
self.assertEqual(u.encoding, 'utf-8')
self.assertEqual(untokenize(iter(tokens)), b'Hello ')
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
def test_main():
from test import test_tokenize
support.run_doctest(test_tokenize, True)
support.run_unittest(TestTokenizerAdheresToPep0263)
support.run_unittest(Test_Tokenize)
support.run_unittest(TestDetectEncoding)
support.run_unittest(TestTokenize)
support.run_unittest(UntokenizeTest)
if __name__ == "__main__":
test_main()
|
lgpl-3.0
|
t0x0/random
|
unlockerv.py
|
2
|
3216
|
#!/usr/bin/env python
# MIT License, Copyright 2015 t0x0
# Full text in 'LICENSE' file
# Will not work for any ransomware other than "Locker v*" by Poka Brightminds.
# Untested. Alpha code. Use at your own risk.
# Do not drink, recycled code in use. Code green. Feedback: [email protected]
# Prerequisite: pycrypto - https://www.dlitz.net/software/pycrypto/
# Python 2.7.9 and pycrypto 2.6.1
import sys, os, struct, string, httplib, copy
import pyaes
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from xml.dom import minidom
from base64 import b64decode
if len(sys.argv) < 3:
sys.exit("Error: incorrect arguments. Usage: unlockerv.py <encrypted file name> <bitcoin address> [decrypted file name]\nWarning, will overwrite output file without prior permission.")
encpath = sys.argv[1]
btcaddress = sys.argv[2]
if len(sys.argv) == 4:
decpath = sys.argv[3]
else:
splitencpath = string.rsplit(encpath, '.', 1)
decpath = splitencpath[0] + '.decrypted.' + splitencpath[1]
print 'Input File: ' + encpath
print 'Output File: ' + decpath
print 'Bitcoin Address: ' + btcaddress
encfp = open(encpath, 'rb')
decfp = open(decpath, 'wb')
#Get btc address/keys via HTTP
conn = httplib.HTTPConnection("www.t0x0.com")
conn.request("GET", "/lockervkey/" + btcaddress)
res = conn.getresponse()
if res.status != 200:
sys.exit("Error: bitcoin address not found. Please check the address and try again.\n If it is still not found, the keys are not available and decryption can not proceed.")
keystring = "<x>" + string.translate(res.read() + "</x>", None, ",")
xmldoc = minidom.parseString(keystring)
(key1, key2) = xmldoc.getElementsByTagName('RSAKeyValue')
modulusraw = b64decode(key1.childNodes[0].childNodes[0].nodeValue)
modulus = long(eval('0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in modulusraw])))
exponentraw = b64decode(key1.childNodes[1].childNodes[0].nodeValue)
exponent = long(eval('0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in exponentraw])))
praw = b64decode(key2.childNodes[2].childNodes[0].nodeValue)
p = long(eval('0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in praw])))
draw = b64decode(key2.childNodes[7].childNodes[0].nodeValue)
d = long(eval('0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in draw])))
qraw = b64decode(key2.childNodes[3].childNodes[0].nodeValue)
q = long(eval('0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in qraw])))
qinvraw = b64decode(key2.childNodes[6].childNodes[0].nodeValue)
qinv = long(eval('0x' + ''.join(['%02X' % struct.unpack('B', x)[0] for x in qinvraw])))
r = RSA.construct((modulus, exponent, d, p, q))
h = SHA.new()
cipher = PKCS1_OAEP.new(r, h)
(headerlen) = struct.unpack("<L4", encfp.read(4))
header = encfp.read(headerlen[0])
decryptedheader = cipher.decrypt(header)
(ivlen) = struct.unpack("L4", decryptedheader[0:4])
ivlen = int(ivlen[0])
iv = decryptedheader[4:ivlen+4]
(keylen) = struct.unpack("L4", decryptedheader[int(ivlen)+4:int(ivlen)+8])
keylen = int(keylen[0])
key = decryptedheader[ivlen+4:ivlen+4+keylen]
ciphertext = encfp.read()
# Decrypt using Rjindael 256 CBC. Need to write. Haven't found an implementation that works.
decfp.write(plaintext)
|
mit
|
0Chencc/CTFCrackTools
|
Lib/distutils/tests/test_bdist_wininst.py
|
99
|
1044
|
"""Tests for distutils.command.bdist_wininst."""
import unittest
from test.test_support import run_unittest
from distutils.command.bdist_wininst import bdist_wininst
from distutils.tests import support
class BuildWinInstTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_get_exe_bytes(self):
# issue5731: command was broken on non-windows platforms
# this test makes sure it works now for every platform
# let's create a command
pkg_pth, dist = self.create_dist()
cmd = bdist_wininst(dist)
cmd.ensure_finalized()
# let's run the code that finds the right wininst*.exe file
# and make sure it finds it and returns its content
# no matter what platform we have
exe_file = cmd.get_exe_bytes()
self.assertTrue(len(exe_file) > 10)
def test_suite():
return unittest.makeSuite(BuildWinInstTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
gpl-3.0
|
ict-felix/stack
|
modules/resource/utilities/rspecs/serm/request_formatter.py
|
1
|
4625
|
from core.utils.urns import URNUtils
from rspecs.tnrm.request_formatter import DEFAULT_XS,\
TNRMv3RequestFormatter, DEFAULT_XMLNS, DEFAULT_SHARED_VLAN,\
DEFAULT_REQ_SCHEMA_LOCATION
from lxml import etree
DEFAULT_FELIX = "http://ict-felix.eu/serm_request"
class SERMv3RequestFormatter(TNRMv3RequestFormatter):
def __init__(self, xmlns=DEFAULT_XMLNS, xs=DEFAULT_XS,
sharedvlan=DEFAULT_SHARED_VLAN,
schema_location=DEFAULT_REQ_SCHEMA_LOCATION):
super(SERMv3RequestFormatter, self).__init__(
xmlns, xs, sharedvlan, DEFAULT_FELIX, schema_location)
self.__sv = sharedvlan
def node(self, n, sharedvlan_ns_in_ifaces=True):
"""
Same logic as in TNRM, but with ability to avoid including
'sharedvlan' namespace for RSPecs with new format
"""
node_ = etree.SubElement(self.rspec, "{%s}node" % (self.xmlns))
node_.attrib["client_id"] = n.get("component_id")
node_.attrib["component_manager_id"] = n.get("component_manager_id")
if n.get("exclusive") is not None:
node_.attrib["exclusive"] = n.get("exclusive")
if n.get("sliver_type_name") is not None:
sliver_ = etree.SubElement(node_, "{%s}sliver_type" % (self.xmlns))
sliver_.attrib["name"] = n.get("sliver_type_name")
for i in n.get("interfaces"):
intf_ = etree.SubElement(node_, "{%s}interface" % (self.xmlns))
intf_.attrib["client_id"] = i.get("component_id")
if sharedvlan_ns_in_ifaces:
for v in i.get("vlan"):
svlan_ = etree.SubElement(intf_,
"{%s}link_shared_vlan" % (self.__sv))
svlan_.attrib["vlantag"] = v.get("tag")
if v.get("name") is not None:
svlan_.attrib["name"] = v.get("name")
if v.get("description") is not None:
svlan_.attrib["description"] = v.get("description")
def link(self, link):
l = etree.SubElement(self.rspec, "{%s}link" % (self.xmlns))
link_cid = link.get("component_id")
l.attrib["client_id"] = link_cid
if link.get("component_manager_name") is not None:
m = etree.SubElement(l, "{%s}component_manager" % (self.xmlns))
m.attrib["name"] = link.get("component_manager_name")
t = etree.SubElement(l, "{%s}link_type" % (self.xmlns))
t.attrib["name"] = link.get("link_type")
for i in link.get("interface_ref"):
interface = etree.SubElement(l, "{%s}interface_ref" % (self.xmlns))
if_cid = i.get("component_id")
# NOTE: commented due to failure on internal DB retrieval
# New RSpec style (including VLANs in link) =>
# add "+vlan=<VLAN>" to the comp. ID of each interface
# if_cid = add_vlan_to_link(link_cid, iface_cid)
interface.attrib["client_id"] = if_cid
# Note: vlantag attribute used only in original RSpec format
# (where VLANs are not defined in the interface's component ID)
if "vlan=" not in i.get("component_id") and \
i.get('vlantag') is not None:
interface.attrib["{%s}vlan" % (DEFAULT_FELIX)] =\
i.get('vlantag')
# NOTE: property tag not used
# for p in link.get("property"):
# prop = etree.SubElement(l, "{%s}property" % (self.xmlns))
# prop.attrib["source_id"] = p.get("source_id")
# prop.attrib["dest_id"] = p.get("dest_id")
# prop.attrib["capacity"] = p.get("capacity")
def add_vlan_to_link(self, link_cid, iface_cid):
"""
Add vlan to component ID of link's interface
when using a newly formatted RSpec.
This format is used: "urn+...+datapath+<dpid>_<port>+vlan=<vlan>.
"""
if "vlan=" in link_cid:
urn_src, vlan_src, urn_dst, vlan_dst = \
URNUtils.get_fields_from_domain_link_id(link_cid)
if_vlan_pairs = {urn_src: vlan_src, urn_dst: vlan_dst}
# if_dpid = URNUtils.get_datapath_from_datapath_id(if_cid)
if_dpid, if_port = \
URNUtils.get_datapath_and_port_from_datapath_id(iface_cid)
if_dpid_port = "%s_%s" % (if_dpid, if_port)
for if_vlan in if_vlan_pairs.keys():
if if_dpid_port in if_vlan:
iface_cid += "+vlan=%s" % if_vlan_pairs[if_vlan]
break
return iface_cid
|
apache-2.0
|
allwinnertech/platform_external_chromium
|
chrome/common/extensions/docs/server/chromeextensionsdocs.py
|
64
|
15204
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cgi
import logging
import re
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import memcache
from google.appengine.api import urlfetch
# TODO(nickbaum): unit tests
# TODO(nickbaum): is this the right way to do constants?
class Channel():
def __init__(self, name, tag):
self.name = name
self.tag = tag
# TODO(nickbaum): unit test this
def matchPath(self, path):
match = "/" + self.name + "/"
if path[0:len(match)] == match:
return true
else:
return false
Channel.DEV = Channel("dev", "2.0-dev")
Channel.BETA = Channel("beta", "1.1-beta")
Channel.STABLE = Channel("stable", "")
Channel.CHANNELS = [Channel.DEV, Channel.BETA, Channel.STABLE]
Channel.TRUNK = Channel("trunk", "")
Channel.DEFAULT = Channel.STABLE
DEFAULT_CACHE_TIME = 300
class MainPage(webapp.RequestHandler):
# get page from memcache, or else fetch it from src
def get(self):
path = os.path.realpath(os.path.join('/', self.request.path))
# special path to invoke the unit tests
# TODO(nickbaum): is there a less ghetto way to invoke the unit test?
if path == "/test":
self.unitTest()
return
# if root, redirect to index.html
# TODO(nickbaum): this doesn't handle /chrome/extensions/trunk, etc
if (path == "/chrome/extensions") or (path == "chrome/extensions/"):
self.redirect("/chrome/extensions/index.html")
return
# else remove prefix
if(path[:18] == "/chrome/extensions"):
path = path[18:]
# TODO(nickbaum): there's a subtle bug here: if there are two instances of the app,
# their default caches will override each other. This is bad!
result = memcache.get(path)
if result is None:
logging.info("Cache miss: " + path)
url = self.getSrcUrl(path)
if (url[1] is not Channel.TRUNK) and (url[0] != "http://src.chromium.org/favicon.ico"):
branch = self.getBranch(url[1])
url = url[0] % branch
else:
url = url[0]
logging.info("Path: " + self.request.path)
logging.info("Url: " + url)
try:
result = urlfetch.fetch(url + self.request.query_string)
if result.status_code != 200:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
except:
logging.error("urlfetch failed: " + url)
# TODO(nickbaum): what should we do when the urlfetch fails?
try:
if not memcache.add(path, result, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
except:
logging.error("Memcache set failed.")
for key in result.headers:
self.response.headers[key] = result.headers[key]
self.response.out.write(result.content)
def head(self):
self.get()
# get the src url corresponding to the request
# returns a tuple of the url and the branch
# this function is the only part that is unit tested
def getSrcUrl(self, path):
# from the path they provided, figure out which channel they requested
# TODO(nickbaum) clean this logic up
# find the first subdirectory of the path
path = path.split('/', 2)
url = "http://src.chromium.org/viewvc/chrome/"
channel = None
# if there's no subdirectory, choose the default channel
# otherwise, figure out if the subdirectory corresponds to a channel
if len(path) == 2:
path.append("")
if path[1] == "":
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
path = ""
elif path[1] == Channel.TRUNK.name:
url = url + "trunk/src/chrome/"
channel = Channel.TRUNK
path = path[2]
else:
# otherwise, run through the different channel options
for c in Channel.CHANNELS:
if(path[1] == c.name):
channel = c
url = url + "branches/%s/src/chrome/"
path = path[2]
break
# if the subdirectory doesn't correspond to a channel, use the default
if channel is None:
channel = Channel.DEFAULT
if(Channel.DEFAULT == Channel.TRUNK):
url = url + "trunk/src/chrome/"
else:
url = url + "branches/%s/src/chrome/"
if path[2] != "":
path = path[1] + "/" + path[2]
else:
path = path[1]
# special cases
# TODO(nickbaum): this is super cumbersome to maintain
if path == "third_party/jstemplate/jstemplate_compiled.js":
url = url + path
elif path == "api/extension_api.json":
url = url + "common/extensions/" + path
elif path == "favicon.ico":
url = "http://src.chromium.org/favicon.ico"
else:
if path == "":
path = "index.html"
url = url + "common/extensions/docs/" + path
return [url, channel]
# get the current version number for the channel requested (dev, beta or stable)
# TODO(nickbaum): move to Channel object
def getBranch(self, channel):
branch = memcache.get(channel.name)
if branch is None:
# query Omaha to figure out which version corresponds to this channel
postdata = """<?xml version="1.0" encoding="UTF-8"?>
<o:gupdate xmlns:o="http://www.google.com/update2/request" protocol="2.0" testsource="crxdocs">
<o:app appid="{8A69D345-D564-463C-AFF1-A69D9E530F96}" version="0.0.0.0" lang="">
<o:updatecheck tag="%s" installsource="ondemandcheckforupdates" />
</o:app>
</o:gupdate>
""" % channel.tag
result = urlfetch.fetch(url="https://tools.google.com/service/update2",
payload=postdata,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded',
'X-USER-IP': '72.1.1.1'})
if result.status_code != 200:
logging.error("urlfetch failed.")
# TODO(nickbaum): what should we do when the urlfetch fails?
# find branch in response
match = re.search(r'<updatecheck Version="\d+\.\d+\.(\d+)\.\d+"', result.content)
if match is None:
logging.error("Version number not found: " + result.content)
#TODO(nickbaum): should we fall back on trunk in this case?
branch = match.group(1)
# TODO(nickbaum): make cache time a constant
if not memcache.add(channel.name, branch, DEFAULT_CACHE_TIME):
logging.error("Memcache set failed.")
return branch
# TODO(nickbaum): is there a more elegant way to write this unit test?
# I deliberately kept it dumb to avoid errors sneaking in, but it's so verbose...
# TODO(nickbaum): should I break this up into multiple files?
def unitTest(self):
self.response.out.write("Testing TRUNK<br/>")
self.check("/trunk/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/trunk/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/trunk/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.TRUNK)
self.response.out.write("<br/>Testing DEV<br/>")
self.check("/dev/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/dev/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/dev/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.DEV)
self.response.out.write("<br/>Testing BETA<br/>")
self.check("/beta/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.BETA)
self.check("/beta/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.BETA)
self.check("/beta/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.BETA)
self.response.out.write("<br/>Testing STABLE<br/>")
self.check("/stable/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.STABLE)
self.check("/stable/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.STABLE)
self.check("/stable/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.STABLE)
self.response.out.write("<br/>Testing jstemplate_compiled.js<br/>")
self.check("/trunk/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/dev/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/beta/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.BETA)
self.check("/stable/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.STABLE)
self.response.out.write("<br/>Testing extension_api.json<br/>")
self.check("/trunk/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/dev/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/beta/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.BETA)
self.check("/stable/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.STABLE)
self.response.out.write("<br/>Testing favicon.ico<br/>")
self.check("/trunk/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
self.check("/dev/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.check("/beta/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.BETA)
self.check("/stable/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.STABLE)
self.response.out.write("<br/>Testing DEFAULT<br/>")
temp = Channel.DEFAULT
Channel.DEFAULT = Channel.DEV
self.check("/", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/index.html", Channel.DEV)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/getstarted.html", Channel.DEV)
self.check("/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.DEV)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.DEV)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/api/extension_api.json", Channel.DEV)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/branches/%s/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.DEV)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.DEV)
self.response.out.write("<br/>Testing DEFAULT (trunk)<br/>")
Channel.DEFAULT = Channel.TRUNK
self.check("/", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/index.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/index.html", Channel.TRUNK)
self.check("/getstarted.html", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/getstarted.html", Channel.TRUNK)
self.check("/images/toolstrip.png", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/images/toolstrip.png", Channel.TRUNK)
self.check("/third_party/jstemplate/jstemplate_compiled.js", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/third_party/jstemplate/jstemplate_compiled.js", Channel.TRUNK)
self.check("/api/extension_api.json", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/api/extension_api.json", Channel.TRUNK)
self.check("/css/ApiRefStyles.css", "http://src.chromium.org/viewvc/chrome/trunk/src/chrome/common/extensions/docs/css/ApiRefStyles.css", Channel.TRUNK)
self.check("/favicon.ico", "http://src.chromium.org/favicon.ico", Channel.TRUNK)
Channel.DEFAULT = temp
return
# utility function for my unit test
# checks that getSrcUrl(path) returns the expected values
# TODO(nickbaum): can this be replaced by assert or something similar?
def check(self, path, expectedUrl, expectedChannel):
actual = self.getSrcUrl(path)
if (actual[0] != expectedUrl):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave url " + actual[0] + "<br/>")
elif (actual[1] != expectedChannel):
self.response.out.write('<span style="color:#f00;">Failure:</span> path ' + path + " gave branch " + actual[1].name + "<br/>")
else:
self.response.out.write("Path " + path + ' <span style="color:#0f0;">OK</span><br/>')
return
application = webapp.WSGIApplication([
('/.*', MainPage),
], debug=False)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.