desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Return True iff this function should be considered public.'
@property def is_public(self):
if (self.all is not None): return (self.name in self.all) else: return (not self.name.startswith('_'))
'Return True if this function is a test function/method. We exclude tests from the imperative mood check, because to phrase their docstring in the imperative mood, they would have to start with a highly redundant "Test that ...".'
@property def is_test(self):
return (self.name.startswith('test') or (self.name == 'runTest'))
'Return True iff this method is a magic method (e.g., `__str__`).'
@property def is_magic(self):
return (self.name.startswith('__') and self.name.endswith('__') and (self.name not in VARIADIC_MAGIC_METHODS))
'Return True iff this method should be considered public.'
@property def is_public(self):
for decorator in self.decorators: if re('^{}\\.'.format(self.name)).match(decorator.name): return False name_is_public = ((not self.name.startswith('_')) or (self.name in VARIADIC_MAGIC_METHODS) or self.is_magic) return (self.parent.is_public and name_is_public)
'Return True iff this class should be considered public.'
@property def is_public(self):
return ((not self.name.startswith('_')) and self.parent.is_class and self.parent.is_public)
'Initialize the error with a more specific message.'
def __init__(self, message):
Exception.__init__(self, (message + textwrap.dedent("\n That means pydocstyle cannot decide which definitions are\n public. Variable __all__ should be present at most once in\n each file, in form\n `__all__ = ('a_public_function', 'APublicClass', ...)`.\n More info on __all__: http://stackoverflow.com/q/44834/. ')\n ")))
'Parse the given file-like object and return its Module object.'
def parse(self, filelike, filename):
self.log = log self.source = filelike.readlines() src = ''.join(self.source) try: compile(src, filename, 'exec') except SyntaxError as error: six.raise_from(ParseError(), error) self.stream = TokenStream(StringIO(src)) self.filename = filename self.all = None self.future_imports = set() self._accumulated_decorators = [] return self.parse_module()
'Call the parse method.'
def __call__(self, *args, **kwargs):
return self.parse(*args, **kwargs)
'Consume one token and verify it is of the expected kind.'
def consume(self, kind):
next_token = self.stream.move() assert (next_token.kind == kind)
'Skip tokens in the stream until a certain token kind is reached. If `value` is specified, tokens whose values are different will also be skipped.'
def leapfrog(self, kind, value=None):
while (self.current is not None): if ((self.current.kind == kind) and ((value is None) or (self.current.value == value))): self.consume(kind) return self.stream.move()
'Parse a single docstring and return its value.'
def parse_docstring(self):
self.log.debug('parsing docstring, token is %r (%s)', self.current.kind, self.current.value) while (self.current.kind in (tk.COMMENT, tk.NEWLINE, tk.NL)): self.stream.move() self.log.debug('parsing docstring, token is %r (%s)', self.current.kind, self.current.value) if (self.current.kind == tk.STRING): docstring = self.current.value self.stream.move() return docstring return None
'Called after first @ is found. Parse decorators into self._accumulated_decorators. Continue to do so until encountering the \'def\' or \'class\' start token.'
def parse_decorators(self):
name = [] arguments = [] at_arguments = False while (self.current is not None): self.log.debug('parsing decorators, current token is %r (%s)', self.current.kind, self.current.value) if ((self.current.kind == tk.NAME) and (self.current.value in ['def', 'class'])): break elif ((self.current.kind == tk.OP) and (self.current.value == '@')): self._accumulated_decorators.append(Decorator(''.join(name), ''.join(arguments))) name = [] arguments = [] at_arguments = False elif ((self.current.kind == tk.OP) and (self.current.value == '(')): at_arguments = True elif ((self.current.kind == tk.OP) and (self.current.value == ')')): pass elif ((self.current.kind == tk.NEWLINE) or (self.current.kind == tk.NL)): pass elif (not at_arguments): name.append(self.current.value) else: arguments.append(self.current.value) self.stream.move() self._accumulated_decorators.append(Decorator(''.join(name), ''.join(arguments)))
'Parse multiple definitions and yield them.'
def parse_definitions(self, class_, all=False):
while (self.current is not None): self.log.debug('parsing definition list, current token is %r (%s)', self.current.kind, self.current.value) self.log.debug('got_newline: %s', self.stream.got_logical_newline) if (all and (self.current.value == '__all__')): self.parse_all() elif ((self.current.kind == tk.OP) and (self.current.value == '@') and self.stream.got_logical_newline): self.consume(tk.OP) self.parse_decorators() elif (self.current.value in ['def', 'class']): (yield self.parse_definition(class_._nest(self.current.value))) elif (self.current.kind == tk.INDENT): self.consume(tk.INDENT) for definition in self.parse_definitions(class_): (yield definition) elif (self.current.kind == tk.DEDENT): self.consume(tk.DEDENT) return elif (self.current.value == 'from'): self.parse_from_import_statement() else: self.stream.move()
'Parse the __all__ definition in a module.'
def parse_all(self):
assert (self.current.value == '__all__') self.consume(tk.NAME) if (self.current.value != '='): raise AllError('Could not evaluate contents of __all__. ') self.consume(tk.OP) if (self.current.value not in '(['): raise AllError('Could not evaluate contents of __all__. ') self.consume(tk.OP) self.all = [] all_content = '(' while ((self.current.kind != tk.OP) or (self.current.value not in ')]')): if (self.current.kind in (tk.NL, tk.COMMENT)): pass elif ((self.current.kind == tk.STRING) or (self.current.value == ',')): all_content += self.current.value else: raise AllError('Unexpected token kind in __all__: {!r}. '.format(self.current.kind)) self.stream.move() self.consume(tk.OP) all_content += ')' try: self.all = eval(all_content, {}) except BaseException as e: raise AllError('Could not evaluate contents of __all__.\x08The value was {}. The exception was:\n{}'.format(all_content, e))
'Parse a module (and its children) and return a Module object.'
def parse_module(self):
self.log.debug('parsing module.') start = self.line docstring = self.parse_docstring() children = list(self.parse_definitions(Module, all=True)) assert (self.current is None), self.current end = self.line cls = Module if self.filename.endswith('__init__.py'): cls = Package module = cls(self.filename, self.source, start, end, [], docstring, children, None, self.all, None, '') for child in module.children: child.parent = module module.future_imports = self.future_imports self.log.debug('finished parsing module.') return module
'Parse a definition and return its value in a `class_` object.'
def parse_definition(self, class_):
start = self.line self.consume(tk.NAME) name = self.current.value self.log.debug("parsing %s '%s'", class_.__name__, name) self.stream.move() if ((self.current.kind == tk.OP) and (self.current.value == '(')): parenthesis_level = 0 while True: if (self.current.kind == tk.OP): if (self.current.value == '('): parenthesis_level += 1 elif (self.current.value == ')'): parenthesis_level -= 1 if (parenthesis_level == 0): break self.stream.move() if ((self.current.kind != tk.OP) or (self.current.value != ':')): self.leapfrog(tk.OP, value=':') else: self.consume(tk.OP) if (self.current.kind in (tk.NEWLINE, tk.COMMENT)): skipped_error_codes = self.parse_skip_comment() self.leapfrog(tk.INDENT) assert (self.current.kind != tk.INDENT) docstring = self.parse_docstring() decorators = self._accumulated_decorators self.log.debug('current accumulated decorators: %s', decorators) self._accumulated_decorators = [] self.log.debug('parsing nested definitions.') children = list(self.parse_definitions(class_)) self.log.debug("finished parsing nested definitions for '%s'", name) end = (self.line - 1) else: skipped_error_codes = '' docstring = self.parse_docstring() decorators = [] children = [] end = self.line self.leapfrog(tk.NEWLINE) definition = class_(name, self.source, start, end, decorators, docstring, children, None, skipped_error_codes) for child in definition.children: child.parent = definition self.log.debug("finished parsing %s '%s'. Next token is %r (%s)", class_.__name__, name, self.current.kind, self.current.value) return definition
'Parse a definition comment for noqa skips.'
def parse_skip_comment(self):
skipped_error_codes = '' if (self.current.kind == tk.COMMENT): if ('noqa: ' in self.current.value): skipped_error_codes = ''.join(self.current.value.split('noqa: ')[1:]) elif self.current.value.startswith('# noqa'): skipped_error_codes = 'all' return skipped_error_codes
'Verify the current token is of type `kind` and equals `value`.'
def check_current(self, kind=None, value=None):
msg = textwrap.dedent('\n Unexpected token at line {self.line}:\n\n In file: {self.filename}\n\n Got kind {self.current.kind!r}\n Got value {self.current.value}\n '.format(self=self)) kind_valid = ((self.current.kind == kind) if kind else True) value_valid = ((self.current.value == value) if value else True) assert (kind_valid and value_valid), msg
'Parse a \'from x import y\' statement. The purpose is to find __future__ statements.'
def parse_from_import_statement(self):
self.log.debug('parsing from/import statement.') is_future_import = self._parse_from_import_source() self._parse_from_import_names(is_future_import)
'Parse the \'from x import\' part in a \'from x import y\' statement. Return true iff `x` is __future__.'
def _parse_from_import_source(self):
assert (self.current.value == 'from'), self.current.value self.stream.move() is_future_import = (self.current.value == '__future__') self.stream.move() while ((self.current is not None) and (self.current.kind in (tk.DOT, tk.NAME, tk.OP)) and (self.current.value != 'import')): self.stream.move() if ((self.current is None) or (self.current.value != 'import')): return False self.check_current(value='import') assert (self.current.value == 'import'), self.current.value self.stream.move() return is_future_import
'Parse the \'y\' part in a \'from x import y\' statement.'
def _parse_from_import_names(self, is_future_import):
if (self.current.value == '('): self.consume(tk.OP) expected_end_kinds = (tk.OP,) else: expected_end_kinds = (tk.NEWLINE, tk.ENDMARKER) while ((self.current.kind not in expected_end_kinds) and (not ((self.current.kind == tk.OP) and (self.current.value == ';')))): if (self.current.kind != tk.NAME): self.stream.move() continue self.log.debug('parsing import, token is %r (%s)', self.current.kind, self.current.value) if is_future_import: self.log.debug('found future import: %s', self.current.value) self.future_imports.add(self.current.value) self.consume(tk.NAME) self.log.debug('parsing import, token is %r (%s)', self.current.kind, self.current.value) if ((self.current.kind == tk.NAME) and (self.current.value == 'as')): self.consume(tk.NAME) if (self.current.kind == tk.NAME): self.consume(tk.NAME) if (self.current.value == ','): self.consume(tk.OP) self.log.debug('parsing import, token is %r (%s)', self.current.kind, self.current.value)
'Initialize the object. `parameters` are specific to the created error.'
def __init__(self, *parameters):
self.parameters = parameters self.definition = None self.explanation = None
'Set the source code context for this error.'
def set_context(self, definition, explanation):
self.definition = definition self.explanation = explanation
'Return the message to print to the user.'
@property def message(self):
ret = '{}: {}'.format(self.code, self.short_desc) if (self.context is not None): ret += ((' (' + self.context.format(*self.parameters)) + ')') return ret
'Return the source code lines for this error.'
@property def lines(self):
source = '' lines = self.definition._source[self.definition._slice] offset = self.definition.start lines_stripped = list(reversed(list(dropwhile(is_blank, reversed(lines))))) numbers_width = 0 for (n, line) in enumerate(lines_stripped): numbers_width = max(numbers_width, (n + offset)) numbers_width = len(str(numbers_width)) numbers_width = 6 for (n, line) in enumerate(lines_stripped): source += '{{}}{}: {{}}'.format(numbers_width).format((n + offset), line) source += ('%*d: %s' % (numbers_width, (n + offset), line)) if (n > 5): source += ' ...\n' break return source
'Initialize the object. `Prefix` should be the common prefix for errors in this group, e.g., "D1". `name` is the name of the group (its subject).'
def __init__(self, prefix, name):
self.prefix = prefix self.name = name self.errors = []
'Create an error, register it to this group and return it.'
def create_error(self, error_code, error_desc, error_context=None):
class _Error(Error, ): code = error_code short_desc = error_desc context = error_context self.errors.append(_Error) return _Error
'Create a new error group and return it.'
@classmethod def create_group(cls, prefix, name):
group = cls.ErrorGroup(prefix, name) cls.groups.append(group) return group
'Yield all registered codes.'
@classmethod def get_error_codes(cls):
for group in cls.groups: for error in group.errors: (yield error.code)
'Output the registry as reStructuredText, for documentation.'
@classmethod def to_rst(cls):
sep_line = (((('+' + (6 * '-')) + '+') + ('-' * 71)) + '+\n') blank_line = (('|' + (78 * ' ')) + '|\n') table = '' for group in cls.groups: table += sep_line table += blank_line table += (('|' + '**{}**'.format(group.name).center(78)) + '|\n') table += blank_line for error in group.errors: table += sep_line table += (((('|' + error.code.center(6)) + '| ') + error.short_desc.ljust(70)) + '|\n') table += sep_line return table
'Do preorder walk of tree using visitor'
def preorder(self, tree, visitor, *args):
self.visitor = visitor visitor.visit = self.dispatch self.dispatch(tree, *args)
'Return the McCabe complexity for the graph. V-E+2'
def complexity(self):
num_edges = sum([len(n) for n in self.nodes.values()]) num_nodes = len(self.nodes) return ((num_edges - num_nodes) + 2)
'create the subgraphs representing any `if` and `for` statements'
def _subgraph(self, node, name, extra_blocks=()):
if (self.graph is None): self.graph = PathGraph(name, name, node.lineno, node.col_offset) pathnode = PathNode(name) self._subgraph_parse(node, pathnode, extra_blocks) self.graphs[('%s%s' % (self.classname, name))] = self.graph self.reset() else: pathnode = self.appendPathNode(name) self._subgraph_parse(node, pathnode, extra_blocks)
'parse the body and any `else` block of `if` and `for` statements'
def _subgraph_parse(self, node, pathnode, extra_blocks):
loose_ends = [] self.tail = pathnode self.dispatch_list(node.body) loose_ends.append(self.tail) for extra in extra_blocks: self.tail = pathnode self.dispatch_list(extra.body) loose_ends.append(self.tail) if node.orelse: self.tail = pathnode self.dispatch_list(node.orelse) loose_ends.append(self.tail) else: loose_ends.append(pathnode) if pathnode: bottom = PathNode('', look='point') for le in loose_ends: self.graph.connect(le, bottom) self.tail = bottom
'Check if the syntax is valid.'
def report_invalid_syntax(self):
(exc_type, exc) = sys.exc_info()[:2] if (len(exc.args) > 1): offset = exc.args[1] if (len(offset) > 2): offset = offset[1:3] else: offset = (1, 0) self.report_error(offset[0], (offset[1] or 0), ('E901 %s: %s' % (exc_type.__name__, exc.args[0])), self.report_invalid_syntax)
'Get the next line from the input buffer.'
def readline(self):
if (self.line_number >= self.total_lines): return '' line = self.lines[self.line_number] self.line_number += 1 if ((self.indent_char is None) and (line[:1] in WHITESPACE)): self.indent_char = line[0] return line
'Run a check plugin.'
def run_check(self, check, argument_names):
arguments = [] for name in argument_names: arguments.append(getattr(self, name)) return check(*arguments)
'Prepare custom state for the specific checker plugin.'
def init_checker_state(self, name, argument_names):
if ('checker_state' in argument_names): self.checker_state = self._checker_states.setdefault(name, {})
'Run all physical checks on a raw input line.'
def check_physical(self, line):
self.physical_line = line for (name, check, argument_names) in self._physical_checks: self.init_checker_state(name, argument_names) result = self.run_check(check, argument_names) if (result is not None): (offset, text) = result self.report_error(self.line_number, offset, text, check) if (text[:4] == 'E101'): self.indent_char = line[0]
'Build a logical line from tokens.'
def build_tokens_line(self):
logical = [] comments = [] length = 0 prev_row = prev_col = mapping = None for (token_type, text, start, end, line) in self.tokens: if (token_type in SKIP_TOKENS): continue if (not mapping): mapping = [(0, start)] if (token_type == tokenize.COMMENT): comments.append(text) continue if (token_type == tokenize.STRING): text = mute_string(text) if prev_row: (start_row, start_col) = start if (prev_row != start_row): prev_text = self.lines[(prev_row - 1)][(prev_col - 1)] if ((prev_text == ',') or ((prev_text not in '{[(') and (text not in '}])'))): text = (' ' + text) elif (prev_col != start_col): text = (line[prev_col:start_col] + text) logical.append(text) length += len(text) mapping.append((length, end)) (prev_row, prev_col) = end self.logical_line = ''.join(logical) self.noqa = (comments and noqa(''.join(comments))) return mapping
'Build a line from tokens and run all logical checks on it.'
def check_logical(self):
self.report.increment_logical_line() mapping = self.build_tokens_line() if (not mapping): return (start_row, start_col) = mapping[0][1] start_line = self.lines[(start_row - 1)] self.indent_level = expand_indent(start_line[:start_col]) if (self.blank_before < self.blank_lines): self.blank_before = self.blank_lines if (self.verbose >= 2): print self.logical_line[:80].rstrip() for (name, check, argument_names) in self._logical_checks: if (self.verbose >= 4): print (' ' + name) self.init_checker_state(name, argument_names) for (offset, text) in (self.run_check(check, argument_names) or ()): if (not isinstance(offset, tuple)): for (token_offset, pos) in mapping: if (offset <= token_offset): break offset = (pos[0], ((pos[1] + offset) - token_offset)) self.report_error(offset[0], offset[1], text, check) if self.logical_line: self.previous_indent_level = self.indent_level self.previous_logical = self.logical_line if (not self.indent_level): self.previous_unindented_logical_line = self.logical_line self.blank_lines = 0 self.tokens = []
'Build the file\'s AST and run all AST checks.'
def check_ast(self):
try: tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) except (ValueError, SyntaxError, TypeError): return self.report_invalid_syntax() for (name, cls, __) in self._ast_checks: checker = cls(tree, self.filename) for (lineno, offset, text, check) in checker.run(): if ((not self.lines) or (not noqa(self.lines[(lineno - 1)]))): self.report_error(lineno, offset, text, check)
'Tokenize the file, run physical line checks and yield tokens.'
def generate_tokens(self):
if self._io_error: self.report_error(1, 0, ('E902 %s' % self._io_error), readlines) tokengen = tokenize.generate_tokens(self.readline) try: for token in tokengen: if (token[2][0] > self.total_lines): return self.noqa = (token[4] and noqa(token[4])) self.maybe_check_physical(token) (yield token) except (SyntaxError, tokenize.TokenError): self.report_invalid_syntax()
'If appropriate (based on token), check current physical line(s).'
def maybe_check_physical(self, token):
if _is_eol_token(token): self.check_physical(token[4]) elif ((token[0] == tokenize.STRING) and ('\n' in token[1])): if noqa(token[4]): return self.multiline = True self.line_number = token[2][0] for line in token[1].split('\n')[:(-1)]: self.check_physical((line + '\n')) self.line_number += 1 self.multiline = False
'Run all checks on the input file.'
def check_all(self, expected=None, line_offset=0):
self.report.init_file(self.filename, self.lines, expected, line_offset) self.total_lines = len(self.lines) if self._ast_checks: self.check_ast() self.line_number = 0 self.indent_char = None self.indent_level = self.previous_indent_level = 0 self.previous_logical = '' self.previous_unindented_logical_line = '' self.tokens = [] self.blank_lines = self.blank_before = 0 parens = 0 for token in self.generate_tokens(): self.tokens.append(token) (token_type, text) = token[0:2] if (self.verbose >= 3): if (token[2][0] == token[3][0]): pos = ('[%s:%s]' % ((token[2][1] or ''), token[3][1])) else: pos = ('l.%s' % token[3][0]) print ('l.%s DCTB %s DCTB %s DCTB %r' % (token[2][0], pos, tokenize.tok_name[token[0]], text)) if (token_type == tokenize.OP): if (text in '([{'): parens += 1 elif (text in '}])'): parens -= 1 elif (not parens): if (token_type in NEWLINE): if (token_type == tokenize.NEWLINE): self.check_logical() self.blank_before = 0 elif (len(self.tokens) == 1): self.blank_lines += 1 del self.tokens[0] else: self.check_logical() elif (COMMENT_WITH_NL and (token_type == tokenize.COMMENT)): if (len(self.tokens) == 1): token = list(token) token[1] = text.rstrip('\r\n') token[3] = (token[2][0], (token[2][1] + len(token[1]))) self.tokens = [tuple(token)] self.check_logical() if self.tokens: self.check_physical(self.lines[(-1)]) self.check_logical() return self.report.get_file_results()
'Start the timer.'
def start(self):
self._start_time = time.time()
'Stop the timer.'
def stop(self):
self.elapsed = (time.time() - self._start_time)
'Signal a new file.'
def init_file(self, filename, lines, expected, line_offset):
self.filename = filename self.lines = lines self.expected = (expected or ()) self.line_offset = line_offset self.file_errors = 0 self.counters['files'] += 1 self.counters['physical lines'] += len(lines)
'Signal a new logical line.'
def increment_logical_line(self):
self.counters['logical lines'] += 1
'Report an error, according to options.'
def error(self, line_number, offset, text, check):
code = text[:4] if self._ignore_code(code): return if (code in self.counters): self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = text[5:] if (code in self.expected): return if (self.print_filename and (not self.file_errors)): print self.filename self.file_errors += 1 self.total_errors += 1 return code
'Return the count of errors and warnings for this file.'
def get_file_results(self):
return self.file_errors
'Return the total count of errors and warnings.'
def get_count(self, prefix=''):
return sum([self.counters[key] for key in self.messages if key.startswith(prefix)])
'Get statistics for message codes that start with the prefix. prefix=\'\' matches all errors and warnings prefix=\'E\' matches all errors prefix=\'W\' matches all warnings prefix=\'E4\' matches all errors that have to do with imports'
def get_statistics(self, prefix=''):
return [('%-7s %s %s' % (self.counters[key], key, self.messages[key])) for key in sorted(self.messages) if key.startswith(prefix)]
'Print overall statistics (number of errors and warnings).'
def print_statistics(self, prefix=''):
for line in self.get_statistics(prefix): print line
'Print benchmark numbers.'
def print_benchmark(self):
print ('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) if self.elapsed: for key in self._benchmark_keys: print ('%-7d %s per second (%d total)' % ((self.counters[key] / self.elapsed), key, self.counters[key]))
'Signal a new file.'
def init_file(self, filename, lines, expected, line_offset):
self._deferred_print = [] return super(StandardReport, self).init_file(filename, lines, expected, line_offset)
'Report an error, according to options.'
def error(self, line_number, offset, text, check):
code = super(StandardReport, self).error(line_number, offset, text, check) if (code and ((self.counters[code] == 1) or self._repeat)): self._deferred_print.append((line_number, offset, code, text[5:], check.__doc__)) return code
'Print the result and return the overall count for this file.'
def get_file_results(self):
self._deferred_print.sort() for (line_number, offset, code, text, doc) in self._deferred_print: print (self._fmt % {'path': self.filename, 'row': (self.line_offset + line_number), 'col': (offset + 1), 'code': code, 'text': text}) if self._show_source: if (line_number > len(self.lines)): line = '' else: line = self.lines[(line_number - 1)] print line.rstrip() print (re.sub('\\S', ' ', line[:offset]) + '^') if (self._show_pep8 and doc): print (' ' + doc.strip()) sys.stdout.flush() return self.file_errors
'Initialize the report instance.'
def init_report(self, reporter=None):
self.options.report = (reporter or self.options.reporter)(self.options) return self.options.report
'Run all checks on the paths.'
def check_files(self, paths=None):
if (paths is None): paths = self.paths report = self.options.report runner = self.runner report.start() try: for path in paths: if os.path.isdir(path): self.input_dir(path) elif (not self.excluded(path)): runner(path) except KeyboardInterrupt: print '... stopped' report.stop() return report
'Run all checks on a Python source file.'
def input_file(self, filename, lines=None, expected=None, line_offset=0):
if self.options.verbose: print ('checking %s' % filename) fchecker = self.checker_class(filename, lines=lines, options=self.options) return fchecker.check_all(expected=expected, line_offset=line_offset)
'Check all files in this directory and all subdirectories.'
def input_dir(self, dirname):
dirname = dirname.rstrip('/') if self.excluded(dirname): return 0 counters = self.options.report.counters verbose = self.options.verbose filepatterns = self.options.filename runner = self.runner for (root, dirs, files) in os.walk(dirname): if verbose: print ('directory ' + root) counters['directories'] += 1 for subdir in sorted(dirs): if self.excluded(subdir, root): dirs.remove(subdir) for filename in sorted(files): if (filename_match(filename, filepatterns) and (not self.excluded(filename, root))): runner(os.path.join(root, filename))
'Check if the file should be excluded. Check if \'options.exclude\' contains a pattern that matches filename.'
def excluded(self, filename, parent=None):
if (not self.options.exclude): return False basename = os.path.basename(filename) if filename_match(basename, self.options.exclude): return True if parent: filename = os.path.join(parent, filename) filename = os.path.abspath(filename) return filename_match(filename, self.options.exclude)
'Check if the error code should be ignored. If \'options.select\' contains a prefix of the error code, return False. Else, if \'options.ignore\' contains a prefix of the error code, return True.'
def ignore_code(self, code):
if ((len(code) < 4) and any((s.startswith(code) for s in self.options.select))): return False return (code.startswith(self.options.ignore) and (not code.startswith(self.options.select)))
'Get all the checks for this category. Find all globally visible functions where the first argument name starts with argument_name and which contain selected tests.'
def get_checks(self, argument_name):
checks = [] for (check, attrs) in _checks[argument_name].items(): (codes, args) = attrs if any(((not (code and self.ignore_code(code))) for code in codes)): checks.append((check.__name__, check, args)) return sorted(checks)
'set output stream'
def set_output(self, output=None):
self.out = (output or sys.stdout)
'write a line in the output buffer'
def writeln(self, string=''):
print(self.encode(string), file=self.out)
'display results encapsulated in the layout tree'
def display_reports(self, layout):
self.section = 0 if hasattr(layout, 'report_id'): layout.children[0].children[0].data += (' (%s)' % layout.report_id) self._display(layout)
'display the layout'
def _display(self, layout):
raise NotImplementedError()
'Manage message of different type and in the context of path.'
def handle_message(self, msg):
self.messages.append({'type': msg.category, 'module': msg.module, 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path': msg.path, 'symbol': msg.symbol, 'message': cgi.escape((msg.msg or ''))})
'Launch layouts display'
def display_messages(self, layout):
if self.messages: print(json.dumps(self.messages, indent=4), file=self.out)
'Convenience method to write a formated message with class default template'
def write_message(self, msg):
self.writeln(msg.format(self._template))
'manage message of different type and in the context of path'
def handle_message(self, msg):
if (msg.module not in self._modules): if msg.module: self.writeln(('************* Module %s' % msg.module)) self._modules.add(msg.module) else: self.writeln('************* ') self.write_message(msg)
'launch layouts display'
def _display(self, layout):
print(file=self.out) TextWriter().format(layout, self.out)
'Returns the tuple color, style associated with msg_id as defined in self.color_mapping'
def _get_decoration(self, msg_id):
try: return self.color_mapping[msg_id[0]] except KeyError: return (None, None)
'manage message of different types, and colorize output using ansi escape codes'
def handle_message(self, msg):
if (msg.module not in self._modules): (color, style) = self._get_decoration('S') if msg.module: modsep = colorize_ansi(('************* Module %s' % msg.module), color, style) else: modsep = colorize_ansi(('************* %s' % msg.module), color, style) self.writeln(modsep) self._modules.add(msg.module) (color, style) = self._get_decoration(msg.C) msg = msg._replace(**{attr: colorize_ansi(getattr(msg, attr), color, style) for attr in ('msg', 'symbol', 'category', 'C')}) self.write_message(msg)
'format and write the given layout into the stream object unicode policy: unicode strings may be found in the layout; try to call stream.write with it, but give it back encoded using the given encoding if it fails'
def format(self, layout, stream=None, encoding=None):
if (stream is None): stream = sys.stdout if (not encoding): encoding = getattr(stream, 'encoding', 'UTF-8') self.encoding = (encoding or 'UTF-8') self.out = stream self.begin_format() layout.accept(self) self.end_format()
'recurse on the layout children and call their accept method (see the Visitor pattern)'
def format_children(self, layout):
for child in getattr(layout, 'children', ()): child.accept(self)
'write a line in the output buffer'
def writeln(self, string=u''):
self.write((string + os.linesep))
'write a string in the output buffer'
def write(self, string):
self.out.write(string)
'begin to format a layout'
def begin_format(self):
self.section = 0
'trick to get table content without actually writing it return an aligned list of lists containing table cells values as string'
def get_table_content(self, table):
result = [[]] cols = table.cols for cell in self.compute_content(table): if (cols == 0): result.append([]) cols = table.cols cols -= 1 result[(-1)].append(cell) while (len(result[(-1)]) < cols): result[(-1)].append(u'') return result
'trick to compute the formatting of children layout before actually writing it return an iterator on strings (one for each child element)'
def compute_content(self, layout):
out = self.out try: for child in layout.children: stream = six.StringIO() self.out = stream child.accept(self) (yield stream.getvalue()) finally: self.out = out
'add a node to children'
def append(self, child):
self.children.append(child) child.parent = self
'insert a child node'
def insert(self, index, child):
self.children.insert(index, child) child.parent = self
'return the visit name for the mixed class. When calling \'accept\', the method <\'visit_\' + name returned by this method> will be called on the visitor'
def _get_visit_name(self):
try: return self.TYPE.replace('-', '_') except Exception: return self.__class__.__name__.lower()
'overridden to detect problems easily'
def append(self, child):
assert (child not in self.parents()) VNode.append(self, child)
'return the ancestor nodes'
def parents(self):
assert (self.parent is not self) if (self.parent is None): return [] return ([self.parent] + self.parent.parents())
'shortcut to add text data'
def add_text(self, text):
self.children.append(Text(text))
'display a section as text'
def visit_section(self, layout):
self.section += 1 self.writeln() self.format_children(layout) self.section -= 1 self.writeln()
'Display an evaluation section as a text.'
def visit_evaluationsection(self, layout):
self.section += 1 self.format_children(layout) self.section -= 1 self.writeln()
'enter a paragraph'
def visit_paragraph(self, layout):
self.format_children(layout) self.writeln()
'display a table as text'
def visit_table(self, layout):
table_content = self.get_table_content(layout) cols_width = ([0] * len(table_content[0])) for row in table_content: for (index, col) in enumerate(row): cols_width[index] = max(cols_width[index], len(col)) self.default_table(layout, table_content, cols_width) self.writeln()
'format a table'
def default_table(self, layout, table_content, cols_width):
cols_width = [(size + 1) for size in cols_width] format_strings = u' '.join(([u'%%-%ss'] * len(cols_width))) format_strings = (format_strings % tuple(cols_width)) format_strings = format_strings.split(u' ') table_linesep = ((u'\n+' + u'+'.join([(u'-' * w) for w in cols_width])) + u'+\n') headsep = ((u'\n+' + u'+'.join([(u'=' * w) for w in cols_width])) + u'+\n') self.write(table_linesep) for (index, line) in enumerate(table_content): self.write(u'|') for (line_index, at_index) in enumerate(line): self.write((format_strings[line_index] % at_index)) self.write(u'|') if ((index == 0) and layout.rheaders): self.write(headsep) else: self.write(table_linesep)
'display a verbatim layout as text (so difficult ;)'
def visit_verbatimtext(self, layout):
self.writeln(u'::\n') for line in layout.data.splitlines(): self.writeln((u' ' + line)) self.writeln()
'add some text'
def visit_text(self, layout):
self.write((u'%s' % layout.data))
'Disable abbreviations.'
def _match_long_opt(self, opt):
if (opt not in self._long_opt): raise optparse.BadOptionError(opt) return opt
'register an options provider'
def register_options_provider(self, provider, own_group=True):
assert (provider.priority <= 0), "provider's priority can't be >= 0" for i in range(len(self.options_providers)): if (provider.priority > self.options_providers[i].priority): self.options_providers.insert(i, provider) break else: self.options_providers.append(provider) non_group_spec_options = [option for option in provider.options if ('group' not in option[1])] groups = getattr(provider, 'option_groups', ()) if (own_group and non_group_spec_options): self.add_option_group(provider.name.upper(), provider.__doc__, non_group_spec_options, provider) else: for (opt, optdict) in non_group_spec_options: self.add_optik_option(provider, self.cmdline_parser, opt, optdict) for (gname, gdoc) in groups: gname = gname.upper() goptions = [option for option in provider.options if (option[1].get('group', '').upper() == gname)] self.add_option_group(gname, gdoc, goptions, provider)
'get our personal option definition and return a suitable form for use with optik/optparse'
def optik_option(self, provider, opt, optdict):
optdict = copy.copy(optdict) if ('action' in optdict): self._nocallback_options[provider] = opt else: optdict['action'] = 'callback' optdict['callback'] = self.cb_set_provider_option if ('default' in optdict): if (('help' in optdict) and (optdict.get('default') is not None) and (optdict['action'] not in ('store_true', 'store_false'))): optdict['help'] += ' [current: %default]' del optdict['default'] args = [('--' + str(opt))] if ('short' in optdict): self._short_options[optdict['short']] = opt args.append(('-' + optdict['short'])) del optdict['short'] for key in list(optdict.keys()): if (key not in self._optik_option_attrs): optdict.pop(key) return (args, optdict)
'optik callback for option setting'
def cb_set_provider_option(self, option, opt, value, parser):
if opt.startswith('--'): opt = opt[2:] else: opt = self._short_options[opt[1:]] if (value is None): value = 1 self.global_set_option(opt, value)