desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Return the modification time and size of a `Resource`.'
def get_indicator(self, resource):
path = resource.real_path if ((os.name != 'posix') and os.path.isdir(path)): return (os.path.getmtime(path), len(os.listdir(path)), os.path.getsize(path)) return (os.path.getmtime(path), os.path.getsize(path))
'Move resource to `new_location`'
def move(self, new_location):
self._perform_change(change.MoveResource(self, new_location), ('Moving <%s> to <%s>' % (self.path, new_location)))
'Remove resource from the project'
def remove(self):
self._perform_change(change.RemoveResource(self), ('Removing <%s>' % self.path))
'Return the path of this resource relative to the project root The path is the list of parent directories separated by \'/\' followed by the resource name.'
@property def path(self):
return self._path
'Return the name of this resource'
@property def name(self):
return self.path.split('/')[(-1)]
'Return the file system path of this resource'
@property def real_path(self):
return self.project._get_resource_path(self.path)
'Return the children of this folder'
def get_children(self):
try: children = os.listdir(self.real_path) except OSError: return [] result = [] for name in children: try: child = self.get_child(name) except exceptions.ResourceNotFoundError: continue if (not self.project.is_ignored(child)): result.append(self.get_child(name)) return result
'Specify which resources to match `patterns` is a `list` of `str`\s that can contain ``*`` and ``?`` signs for matching resource names.'
def set_patterns(self, patterns):
self._compiled_patterns = None self.patterns = patterns
'Returns a (module, lineno) tuple'
def get_definition_location(self):
if ((self.lineno is None) and self.assignments): self.lineno = self.assignments[0].get_lineno() return (self.module, self.lineno)
'Forget the `PyObject` this `PyName` holds'
def invalidate(self):
self.pyobject.set(None)
'Returns the list of objects passed as this parameter'
def get_objects(self):
return rope.base.oi.soi.get_passed_objects(self.pyfunction, self.index)
'Perform the change and add it to the `self.undo_list` Note that uninteresting changes (changes to ignored files) will not be appended to `self.undo_list`.'
def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
try: self.current_change = changes changes.do(change.create_job_set(task_handle, changes)) finally: self.current_change = None if self._is_change_interesting(changes): self.undo_list.append(changes) self._remove_extra_items() del self.redo_list[:]
'Redo done changes from the history When `change` is `None`, the last done change will be undone. If change is not `None` it should be an item from `self.undo_list`; this change and all changes that depend on it will be undone. In both cases the list of undone changes will be returned. If `drop` is `True`, the undone change will not be appended to the redo list.'
def undo(self, change=None, drop=False, task_handle=taskhandle.NullTaskHandle()):
if (not self._undo_list): raise exceptions.HistoryError('Undo list is empty') if (change is None): change = self.undo_list[(-1)] dependencies = self._find_dependencies(self.undo_list, change) self._move_front(self.undo_list, dependencies) self._perform_undos(len(dependencies), task_handle) result = self.redo_list[(- len(dependencies)):] if drop: del self.redo_list[(- len(dependencies)):] return result
'Redo undone changes from the history When `change` is `None`, the last undone change will be redone. If change is not `None` it should be an item from `self.redo_list`; this change and all changes that depend on it will be redone. In both cases the list of redone changes will be returned.'
def redo(self, change=None, task_handle=taskhandle.NullTaskHandle()):
if (not self.redo_list): raise exceptions.HistoryError('Redo list is empty') if (change is None): change = self.redo_list[(-1)] dependencies = self._find_dependencies(self.redo_list, change) self._move_front(self.redo_list, dependencies) self._perform_redos(len(dependencies), task_handle) return self.undo_list[(- len(dependencies)):]
'The last done change if available, `None` otherwise'
@property def tobe_undone(self):
if self.undo_list: return self.undo_list[(-1)]
'The last undone change if available, `None` otherwise'
@property def tobe_redone(self):
if self.redo_list: return self.redo_list[(-1)]
'Forget all undo and redo information'
def clear(self):
del self.undo_list[:] del self.redo_list[:]
'It tries to find the undotted primary start It is different from `self._get_atom_start()` in that it follows function calls, too; such as in ``f(x)``.'
def _find_primary_without_dot_start(self, offset):
last_atom = offset offset = self._find_last_non_space_char(last_atom) while ((offset > 0) and (self.code[offset] in ')]')): last_atom = self._find_parens_start(offset) offset = self._find_last_non_space_char((last_atom - 1)) if ((offset >= 0) and ((self.code[offset] in '"\'})]') or self._is_id_char(offset))): atom_start = self._find_atom_start(offset) if (not keyword.iskeyword(self.code[atom_start:(offset + 1)])): return atom_start return last_atom
'returns expression, starting, starting_offset This function is used in `rope.codeassist.assist` function.'
def get_splitted_primary_before(self, offset):
if (offset == 0): return ('', '', 0) end = (offset - 1) word_start = self._find_atom_start(end) real_start = self._find_primary_start(end) if (self.code[word_start:offset].strip() == ''): word_start = end if self.code[end].isspace(): word_start = end if (self.code[real_start:word_start].strip() == ''): real_start = word_start if ((real_start == word_start == end) and (not self._is_id_char(end))): return ('', '', offset) if (real_start == word_start): return ('', self.raw[word_start:offset], word_start) else: if (self.code[end] == '.'): return (self.raw[real_start:end], '', offset) last_dot_position = word_start if (self.code[word_start] != '.'): last_dot_position = self._find_last_non_space_char((word_start - 1)) last_char_position = self._find_last_non_space_char((last_dot_position - 1)) if self.code[word_start].isspace(): word_start = offset return (self.raw[real_start:(last_char_position + 1)], self.raw[word_start:offset], word_start)
'Construct a TaskHandle If `interrupts` is `False` the task won\'t be interrupted by calling `TaskHandle.stop()`.'
def __init__(self, name='Task', interrupts=True):
self.name = name self.interrupts = interrupts self.stopped = False self.job_sets = [] self.observers = []
'Interrupts the refactoring'
def stop(self):
if self.interrupts: self.stopped = True self._inform_observers()
'Return the current `JobSet`'
def current_jobset(self):
if self.job_sets: return self.job_sets[(-1)]
'Register an observer for this task handle The observer is notified whenever the task is stopped or a job gets finished.'
def add_observer(self, observer):
self.observers.append(observer)
'Returns a `PyObject` if the module was found.'
@utils.deprecated('Use `project.get_module` instead') def get_module(self, name, folder=None):
return self.project.get_module(name, folder)
'Returns a `PyObject` object for the given code If `force_errors` is `True`, `exceptions.ModuleSyntaxError` is raised if module has syntax errors. This overrides ``ignore_syntax_errors`` project config.'
@utils.deprecated('Use `libutils.get_string_module` instead') def get_string_module(self, code, resource=None, force_errors=False):
return PyModule(self, code, resource, force_errors=force_errors)
'Returns a `Scope` object for the given code'
@utils.deprecated('Use `libutils.get_string_scope` instead') def get_string_scope(self, code, resource=None):
return rope.base.libutils.get_string_scope(code, resource)
'Returns a resource corresponding to the given module returns None if it can not be found'
@utils.deprecated('Use `project.find_module` instead') def find_module(self, modname, folder=None):
return self.project.find_module(modname, folder)
'Returns project source folders'
@utils.deprecated('Use `project.get_source_folders` instead') def get_source_folders(self):
return self.project.get_source_folders()
'Returns all python files available in the project'
@utils.deprecated('Use `project.get_python_files` instead') def get_python_files(self):
return self.project.get_python_files()
'Run `resource` module Returns a `rope.base.oi.doa.PythonFileRunner` object for controlling the process.'
def run_module(self, resource, args=None, stdin=None, stdout=None):
perform_doa = self.project.prefs.get('perform_doi', True) perform_doa = self.project.prefs.get('perform_doa', perform_doa) receiver = self.object_info.doa_data_received if (not perform_doa): receiver = None runner = rope.base.oi.doa.PythonFileRunner(self, resource, args, stdin, stdout, receiver) runner.add_finishing_observer(self.module_cache.forget_all_data) runner.run() return runner
'Analyze `resource` module for static object inference This function forces rope to analyze this module to collect information about function calls. `should_analyze` is a function that is called with a `PyDefinedObject` argument. If it returns `True` the element is analyzed. If it is `None` or returns `False` the element is not analyzed. `search_subscopes` is like `should_analyze`; The difference is that if it returns `False` the sub-scopes are all ignored. That is it is assumed that `should_analyze` returns `False` for all of its subscopes. `followed_calls` override the value of ``soa_followed_calls`` project config.'
def analyze_module(self, resource, should_analyze=(lambda py: True), search_subscopes=(lambda py: True), followed_calls=None):
if (followed_calls is None): followed_calls = self.project.prefs.get('soa_followed_calls', 0) pymodule = self.resource_to_pyobject(resource) self.module_cache.forget_all_data() rope.base.oi.soa.analyze_module(self, pymodule, should_analyze, search_subscopes, followed_calls)
'Tell whether any of start till end lines have changed The end points are inclusive and indices start from 1.'
def is_changed(self, start, end):
(left, right) = self._get_changed(start, end) if (left < right): return True return False
'Clear the changed status of lines from start till end'
def consume_changes(self, start, end):
(left, right) = self._get_changed(start, end) if (left < right): del self.lines[left:right] return (left < right)
'Return the description of this change This can be used for previewing the changes.'
def get_description(self):
return str(self)
'Return the list of resources that will be changed'
def get_changed_resources(self):
return []
'The names of the parameters the function takes. Returns None if this completion is not a function.'
@property def parameters(self):
pyname = self.pyname if isinstance(pyname, pynames.ImportedName): pyname = pyname._get_imported_pyname() if isinstance(pyname, pynames.DefinedName): pyobject = pyname.get_object() if isinstance(pyobject, pyobjects.AbstractFunction): return pyobject.get_param_names()
'Get the proposed object\'s docstring. Returns None if it can not be get.'
def get_doc(self):
if (not self.pyname): return None pyobject = self.pyname.get_object() if (not hasattr(pyobject, 'get_doc')): return None return self.pyname.get_object().get_doc()
'Get a string representation of a param\'s default value. Returns None if there is no default value for this param.'
def get_default(self):
definfo = functionutils.DefinitionInfo.read(self._function) for (arg, default) in definfo.args_with_defaults: if (self.argname == arg): return default return None
'Return a list of `CodeAssistProposal`'
def get_sorted_proposal_list(self):
proposals = {} for proposal in self.proposals: proposals.setdefault(proposal.scope, []).append(proposal) result = [] for scope in self.scopepref: scope_proposals = proposals.get(scope, []) scope_proposals = [proposal for proposal in scope_proposals if (proposal.type in self.typerank)] scope_proposals.sort(key=self._proposal_key) result.extend(scope_proposals) return result
'The sample code from :PEP:`257`'
def _trim_docstring(self, docstring, indents=0):
if (not docstring): return '' lines = docstring.expandtabs().splitlines() indent = sys.maxsize for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, (len(line) - len(stripped))) trimmed = [lines[0].strip()] if (indent < sys.maxsize): for line in lines[1:]: trimmed.append(line[indent:].rstrip()) while (trimmed and (not trimmed[(-1)])): trimmed.pop() while (trimmed and (not trimmed[0])): trimmed.pop(0) return '\n'.join((((' ' * indents) + line) for line in trimmed))
'Get a `PyModule`'
@utils.saveit def get_pymodule(self):
msg = None code = self.code tries = 0 while True: try: if ((tries == 0) and (self.resource is not None) and (self.resource.read() == code)): return self.project.get_pymodule(self.resource, force_errors=True) return libutils.get_string_module(self.project, code, resource=self.resource, force_errors=True) except exceptions.ModuleSyntaxError as e: if (msg is None): msg = ('%s:%s %s' % (e.filename, e.lineno, e.message_)) if (tries < self.maxfixes): tries += 1 self.commenter.comment(e.lineno) code = '\n'.join(self.commenter.lines) else: raise exceptions.ModuleSyntaxError(e.filename, e.lineno, 'Failed to fix error: {0}'.format(msg))
'Construct an AutoImport object If `observe` is `True`, listen for project changes and update the cache. If `underlined` is `True`, underlined names are cached, too.'
def __init__(self, project, observe=True, underlined=False):
self.project = project self.underlined = underlined self.names = project.data_files.read_data('globalnames') if (self.names is None): self.names = {} project.data_files.add_write_hook(self._write) observer = resourceobserver.ResourceObserver(changed=self._changed, moved=self._moved, removed=self._removed) if observe: project.add_observer(observer)
'Return a list of ``(name, module)`` tuples This function tries to find modules that have a global name that starts with `starting`.'
def import_assist(self, starting):
result = [] for module in self.names: for global_name in self.names[module]: if global_name.startswith(starting): result.append((global_name, module)) return result
'Return the list of modules that have global `name`'
def get_modules(self, name):
result = [] for module in self.names: if (name in self.names[module]): result.append(module) return result
'Return the list of all cached global names'
def get_all_names(self):
result = set() for module in self.names: result.update(set(self.names[module])) return result
'Return a list of ``(resource, lineno)`` tuples'
def get_name_locations(self, name):
result = [] for module in self.names: if (name in self.names[module]): try: pymodule = self.project.get_module(module) if (name in pymodule): pyname = pymodule[name] (module, lineno) = pyname.get_definition_location() if (module is not None): resource = module.get_module().get_resource() if ((resource is not None) and (lineno is not None)): result.append((resource, lineno)) except exceptions.ModuleNotFoundError: pass return result
'Generate global name cache for project files If `resources` is a list of `rope.base.resource.File`\s, only those files are searched; otherwise all python modules in the project are cached.'
def generate_cache(self, resources=None, underlined=None, task_handle=taskhandle.NullTaskHandle()):
if (resources is None): resources = self.project.get_python_files() job_set = task_handle.create_jobset('Generatig autoimport cache', len(resources)) for file in resources: job_set.started_job(('Working on <%s>' % file.path)) self.update_resource(file, underlined) job_set.finished_job()
'Generate global name cache for modules listed in `modules`'
def generate_modules_cache(self, modules, underlined=None, task_handle=taskhandle.NullTaskHandle()):
job_set = task_handle.create_jobset('Generatig autoimport cache for modules', len(modules)) for modname in modules: job_set.started_job(('Working on <%s>' % modname)) if modname.endswith('.*'): mod = self.project.find_module(modname[:(-2)]) if mod: for sub in submodules(mod): self.update_resource(sub, underlined) else: self.update_module(modname, underlined) job_set.finished_job()
'Clear all entries in global-name cache It might be a good idea to use this function before regenerating global names.'
def clear_cache(self):
self.names.clear()
'Guess at what line the new import should be inserted'
def find_insertion_line(self, code):
match = re.search('^(def|class)\\s+', code) if (match is not None): code = code[:match.start()] try: pymodule = libutils.get_string_module(self.project, code) except exceptions.ModuleSyntaxError: return 1 testmodname = '__rope_testmodule_rope' importinfo = importutils.NormalImport(((testmodname, None),)) module_imports = importutils.get_module_imports(self.project, pymodule) module_imports.add_import(importinfo) code = module_imports.get_changed_source() offset = code.index(testmodname) lineno = (code.count('\n', 0, offset) + 1) return lineno
'Update the cache for global names in `resource`'
def update_resource(self, resource, underlined=None):
try: pymodule = self.project.get_pymodule(resource) modname = self._module_name(resource) self._add_names(pymodule, modname, underlined) except exceptions.ModuleSyntaxError: pass
'Update the cache for global names in `modname` module `modname` is the name of a module.'
def update_module(self, modname, underlined=None):
try: pymodule = self.project.get_module(modname) self._add_names(pymodule, modname, underlined) except exceptions.ModuleNotFoundError: pass
'Fix module names `fixer` is a function that takes and returns a `str`. Given the name of a module, it should return the fixed name.'
def get_changes(self, fixer=str.lower, task_handle=taskhandle.NullTaskHandle()):
stack = changestack.ChangeStack(self.project, 'Fixing module names') jobset = task_handle.create_jobset('Fixing module names', (self._count_fixes(fixer) + 1)) try: while True: for resource in self._tobe_fixed(fixer): jobset.started_job(resource.path) renamer = rename.Rename(self.project, resource) changes = renamer.get_changes(fixer(self._name(resource))) stack.push(changes) jobset.finished_job() break else: break finally: jobset.started_job('Reverting to original state') stack.pop_all() jobset.finished_job() return stack.merged()
'D10{0,1,2,3}: Public definitions should have docstrings. All modules should normally have docstrings. [...] all functions and classes exported by a module should also have docstrings. Public methods (including the __init__ constructor) should also have docstrings. Note: Public (exported) definitions are either those with names listed in __all__ variable (if present), or those that do not start with a single underscore.'
@check_for(Definition, terminal=True) def check_docstring_missing(self, definition, docstring):
if (((not docstring) and definition.is_public) or (docstring and is_blank(ast.literal_eval(docstring)))): codes = {Module: violations.D100, Class: violations.D101, NestedClass: violations.D101, Method: (lambda : (violations.D105() if definition.is_magic else violations.D102())), Function: violations.D103, NestedFunction: violations.D103, Package: violations.D104} return codes[type(definition)]()
'D200: One-liner docstrings should fit on one line with quotes. The closing quotes are on the same line as the opening quotes. This looks better for one-liners.'
@check_for(Definition) def check_one_liners(self, definition, docstring):
if docstring: lines = ast.literal_eval(docstring).split('\n') if (len(lines) > 1): non_empty_lines = sum((1 for l in lines if (not is_blank(l)))) if (non_empty_lines == 1): return violations.D200(len(lines))
'D20{1,2}: No blank lines allowed around function/method docstring. There\'s no blank line either before or after the docstring.'
@check_for(Function) def check_no_blank_before(self, function, docstring):
if docstring: (before, _, after) = function.source.partition(docstring) blanks_before = list(map(is_blank, before.split('\n')[:(-1)])) blanks_after = list(map(is_blank, after.split('\n')[1:])) blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) blanks_after_count = sum(takewhile(bool, blanks_after)) if (blanks_before_count != 0): (yield violations.D201(blanks_before_count)) if ((not all(blanks_after)) and (blanks_after_count != 0)): (yield violations.D202(blanks_after_count))
'D20{3,4}: Class docstring should have 1 blank line around them. Insert a blank line before and after all docstrings (one-line or multi-line) that document a class -- generally speaking, the class\'s methods are separated from each other by a single blank line, and the docstring needs to be offset from the first method by a blank line; for symmetry, put a blank line between the class header and the docstring.'
@check_for(Class) def check_blank_before_after_class(self, class_, docstring):
if docstring: (before, _, after) = class_.source.partition(docstring) blanks_before = list(map(is_blank, before.split('\n')[:(-1)])) blanks_after = list(map(is_blank, after.split('\n')[1:])) blanks_before_count = sum(takewhile(bool, reversed(blanks_before))) blanks_after_count = sum(takewhile(bool, blanks_after)) if (blanks_before_count != 0): (yield violations.D211(blanks_before_count)) if (blanks_before_count != 1): (yield violations.D203(blanks_before_count)) if ((not all(blanks_after)) and (blanks_after_count != 1)): (yield violations.D204(blanks_after_count))
'D205: Put one blank line between summary line and description. Multi-line docstrings consist of a summary line just like a one-line docstring, followed by a blank line, followed by a more elaborate description. The summary line may be used by automatic indexing tools; it is important that it fits on one line and is separated from the rest of the docstring by a blank line.'
@check_for(Definition) def check_blank_after_summary(self, definition, docstring):
if docstring: lines = ast.literal_eval(docstring).strip().split('\n') if (len(lines) > 1): post_summary_blanks = list(map(is_blank, lines[1:])) blanks_count = sum(takewhile(bool, post_summary_blanks)) if (blanks_count != 1): return violations.D205(blanks_count)
'Return the indentation of the docstring\'s opening quotes.'
@staticmethod def _get_docstring_indent(definition, docstring):
(before_docstring, _, _) = definition.source.partition(docstring) (_, _, indent) = before_docstring.rpartition('\n') return indent
'D20{6,7,8}: The entire docstring should be indented same as code. The entire docstring is indented the same as the quotes at its first line.'
@check_for(Definition) def check_indent(self, definition, docstring):
if docstring: indent = self._get_docstring_indent(definition, docstring) lines = docstring.split('\n') if (len(lines) > 1): lines = lines[1:] indents = [leading_space(l) for l in lines if (not is_blank(l))] if (set(' DCTB ') == set((''.join(indents) + indent))): (yield violations.D206()) if (((len(indents) > 1) and (min(indents[:(-1)]) > indent)) or (indents[(-1)] > indent)): (yield violations.D208()) if (min(indents) < indent): (yield violations.D207())
'D209: Put multi-line docstring closing quotes on separate line. Unless the entire docstring fits on a line, place the closing quotes on a line by themselves.'
@check_for(Definition) def check_newline_after_last_paragraph(self, definition, docstring):
if docstring: lines = [l for l in ast.literal_eval(docstring).split('\n') if (not is_blank(l))] if (len(lines) > 1): if (docstring.split('\n')[(-1)].strip() not in ['"""', "'''"]): return violations.D209()
'D210: No whitespaces allowed surrounding docstring text.'
@check_for(Definition) def check_surrounding_whitespaces(self, definition, docstring):
if docstring: lines = ast.literal_eval(docstring).split('\n') if (lines[0].startswith(' ') or ((len(lines) == 1) and lines[0].endswith(' '))): return violations.D210()
'D21{2,3}: Multi-line docstring summary style check. A multi-line docstring summary should start either at the first, or separately at the second line of a docstring.'
@check_for(Definition) def check_multi_line_summary_start(self, definition, docstring):
if docstring: start_triple = ['"""', "'''", 'u"""', "u'''", 'r"""', "r'''", 'ur"""', "ur'''"] lines = ast.literal_eval(docstring).split('\n') if (len(lines) > 1): first = docstring.split('\n')[0].strip().lower() if (first in start_triple): return violations.D212() else: return violations.D213()
'D300: Use """triple double quotes""". For consistency, always use """triple double quotes""" around docstrings. Use r"""raw triple double quotes""" if you use any backslashes in your docstrings. For Unicode docstrings, use u"""Unicode triple-quoted strings""". Note: Exception to this is made if the docstring contains """ quotes in its body.'
@check_for(Definition) def check_triple_double_quotes(self, definition, docstring):
if docstring: if ('"""' in ast.literal_eval(docstring)): regex = re("[uU]?[rR]?'''[^'].*") else: regex = re('[uU]?[rR]?"""[^"].*') if (not regex.match(docstring)): illegal_matcher = re('[uU]?[rR]?("+|\'+).*') illegal_quotes = illegal_matcher.match(docstring).group(1) return violations.D300(illegal_quotes)
'D301: Use r""" if any backslashes in a docstring. Use r"""raw triple double quotes""" if you use any backslashes (\) in your docstrings.'
@check_for(Definition) def check_backslashes(self, definition, docstring):
if (docstring and ('\\' in docstring) and (not docstring.startswith(('r', 'ur')))): return violations.D301()
'D302: Use u""" for docstrings with Unicode. For Unicode docstrings, use u"""Unicode triple-quoted strings""".'
@check_for(Definition) def check_unicode_docstring(self, definition, docstring):
if ('unicode_literals' in definition.module.future_imports): return if (docstring and (sys.version_info[0] <= 2)): if ((not is_ascii(docstring)) and (not docstring.startswith(('u', 'ur')))): return violations.D302()
'D400: First line should end with a period. The [first line of a] docstring is a phrase ending in a period.'
@check_for(Definition) def check_ends_with_period(self, definition, docstring):
if docstring: summary_line = ast.literal_eval(docstring).strip().split('\n')[0] if (not summary_line.endswith('.')): return violations.D400(summary_line[(-1)])
'D401: First line should be in imperative mood: \'Do\', not \'Does\'. [Docstring] prescribes the function or method\'s effect as a command: ("Do this", "Return that"), not as a description; e.g. don\'t write "Returns the pathname ...".'
@check_for(Function) def check_imperative_mood(self, function, docstring):
if (docstring and (not function.is_test)): stripped = ast.literal_eval(docstring).strip() if stripped: first_word = stripped.split()[0] check_word = first_word.lower() if (check_word in IMPERATIVE_BLACKLIST): return violations.D401b(first_word) correct_form = IMPERATIVE_VERBS.get(stem(check_word)) if (correct_form and (correct_form != check_word)): return violations.D401(correct_form.capitalize(), first_word)
'D402: First line should not be function\'s or method\'s "signature". The one-line docstring should NOT be a "signature" reiterating the function/method parameters (which can be obtained by introspection).'
@check_for(Function) def check_no_signature(self, function, docstring):
if docstring: first_line = ast.literal_eval(docstring).strip().split('\n')[0] if ((function.name + '(') in first_line.replace(' ', '')): return violations.D402()
'D403: First word of the first line should be properly capitalized. The [first line of a] docstring is a phrase ending in a period.'
@check_for(Function) def check_capitalized(self, function, docstring):
if docstring: first_word = ast.literal_eval(docstring).split()[0] if (first_word == first_word.upper()): return for char in first_word: if ((char not in string.ascii_letters) and (char != "'")): return if (first_word != first_word.capitalize()): return violations.D403(first_word.capitalize(), first_word)
'D404: First word of the docstring should not be `This`. Docstrings should use short, simple language. They should not begin with "This class is [..]" or "This module contains [..]".'
@check_for(Definition) def check_starts_with_this(self, function, docstring):
if docstring: first_word = ast.literal_eval(docstring).split()[0] if (first_word.lower() == 'this'): return violations.D404()
'Return any leading set of words from `line`. For example, if `line` is " Hello world!!!", returns "Hello world".'
@staticmethod def _get_leading_words(line):
result = re('[A-Za-z ]+').match(line.strip()) if (result is not None): return result.group()
'Check if the suspected context is really a section header. Lets have a look at the following example docstring: \'\'\'Title. Some part of the docstring that specifies what the function returns. <----- Not a real section name. It has a suffix and the previous line is not empty and does not end with a punctuation sign. This is another line in the docstring. It describes stuff, but we forgot to add a blank line between it and the section name. Returns <----- A real section name. The previous line ends with ------- a period, therefore it is in a new grammatical context. Bla. To make sure this is really a section we check these conditions: * There\'s no suffix to the section name. * The previous line ends with punctuation. * The previous line is empty. If one of the conditions is true, we will consider the line as a section name.'
@staticmethod def _is_a_docstring_section(context):
section_name_suffix = context.line.lstrip(context.section_name).strip() punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')'] prev_line_ends_with_punctuation = any((context.previous_line.strip().endswith(x) for x in punctuation)) return (is_blank(section_name_suffix) or prev_line_ends_with_punctuation or is_blank(context.previous_line))
'D4{07,08,09,12}, D215: Section underline checks. Check for correct formatting for docstring sections. Checks that: * The line that follows the section name contains dashes (D40{7,8}). * The amount of dashes is equal to the length of the section name (D409). * The section\'s content does not begin in the line that follows the section header (D412). * The indentation of the dashed line is equal to the docstring\'s indentation (D215).'
@classmethod def _check_section_underline(cls, section_name, context, indentation):
blank_lines_after_header = 0 for line in context.following_lines: if (not is_blank(line)): break blank_lines_after_header += 1 else: (yield violations.D407(section_name)) return non_empty_line = context.following_lines[blank_lines_after_header] dash_line_found = (''.join(set(non_empty_line.strip())) == '-') if (not dash_line_found): (yield violations.D407(section_name)) if (blank_lines_after_header > 0): (yield violations.D412(section_name)) else: if (blank_lines_after_header > 0): (yield violations.D408(section_name)) if (non_empty_line.strip() != ('-' * len(section_name))): (yield violations.D409(len(section_name), section_name, len(non_empty_line.strip()))) if (leading_space(non_empty_line) > indentation): (yield violations.D215(section_name)) line_after_dashes_index = (blank_lines_after_header + 1) if (line_after_dashes_index < len(context.following_lines)): line_after_dashes = context.following_lines[line_after_dashes_index] if is_blank(line_after_dashes): rest_of_lines = context.following_lines[line_after_dashes_index:] if (not is_blank(''.join(rest_of_lines))): (yield violations.D412(section_name)) else: (yield violations.D414(section_name)) else: (yield violations.D414(section_name))
'D4{05,06,10,11,13}, D214: Section name checks. Check for valid section names. Checks that: * The section name is properly capitalized (D405). * The section is not over-indented (D214). * The section name has no superfluous suffix to it (D406). * There\'s a blank line after the section (D410, D413). * There\'s a blank line before the section (D411). Also yields all the errors from `_check_section_underline`.'
@classmethod def _check_section(cls, docstring, definition, context):
capitalized_section = context.section_name.title() indentation = cls._get_docstring_indent(definition, docstring) if ((context.section_name not in cls.SECTION_NAMES) and (capitalized_section in cls.SECTION_NAMES)): (yield violations.D405(capitalized_section, context.section_name)) if (leading_space(context.line) > indentation): (yield violations.D214(capitalized_section)) suffix = context.line.strip().lstrip(context.section_name) if suffix: (yield violations.D406(capitalized_section, context.line.strip())) if ((not context.following_lines) or (not is_blank(context.following_lines[(-1)]))): if context.is_last_section: (yield violations.D413(capitalized_section)) else: (yield violations.D410(capitalized_section)) if (not is_blank(context.previous_line)): (yield violations.D411(capitalized_section)) for err in cls._check_section_underline(capitalized_section, context, indentation): (yield err)
'D21{4,5}, D4{05,06,07,08,09,10}: Docstring sections checks. Check the general format of a sectioned docstring: \'\'\'This is my one-liner. Short Summary This is my summary. Returns None. Section names appear in `SECTION_NAMES`.'
@check_for(Definition) def check_docstring_sections(self, definition, docstring):
if (not docstring): return lines = docstring.split('\n') if (len(lines) < 2): return lower_section_names = [s.lower() for s in self.SECTION_NAMES] def _suspected_as_section(_line): result = self._get_leading_words(_line.lower()) return (result in lower_section_names) suspected_section_indices = [i for (i, line) in enumerate(lines) if _suspected_as_section(line)] SectionContext = namedtuple('SectionContext', ('section_name', 'previous_line', 'line', 'following_lines', 'original_index', 'is_last_section')) contexts = (SectionContext(self._get_leading_words(lines[i].strip()), lines[(i - 1)], lines[i], lines[(i + 1):], i, False) for i in suspected_section_indices) contexts = (c for c in contexts if self._is_a_docstring_section(c)) for (a, b) in pairwise(contexts, None): end = ((-1) if (b is None) else b.original_index) new_ctx = SectionContext(a.section_name, a.previous_line, a.line, lines[(a.original_index + 1):end], a.original_index, (b is None)) for err in self._check_section(docstring, definition, new_ctx): (yield err)
'Create a configuration parser.'
def __init__(self):
self._cache = {} self._override_by_cli = None self._options = self._arguments = self._run_conf = None self._parser = self._create_option_parser()
'Return a `RunConfiguration` object set with default values.'
def get_default_run_configuration(self):
(options, _) = self._parse_args([]) return self._create_run_config(options)
'Parse the configuration. If one of `BASE_ERROR_SELECTION_OPTIONS` was selected, overrides all error codes to check and disregards any error code related configurations from the configuration files.'
def parse(self):
(self._options, self._arguments) = self._parse_args() self._arguments = (self._arguments or ['.']) if (not self._validate_options(self._options)): raise IllegalConfiguration() self._run_conf = self._create_run_config(self._options) config = self._create_check_config(self._options, use_defaults=False) self._override_by_cli = config
'Return the run configuration for the script.'
@check_initialized def get_user_run_configuration(self):
return self._run_conf
'Generate files and error codes to check on each one. Walk dir trees under `self._arguments` and generate yield filnames that `match` under each directory that `match_dir`. The method locates the configuration for each file name and yields a tuple of (filename, [error_codes]). With every discovery of a new configuration file `IllegalConfiguration` might be raised.'
@check_initialized def get_files_to_check(self):
def _get_matches(config): 'Return the `match` and `match_dir` functions for `config`.' match_func = re((config.match + '$')).match match_dir_func = re((config.match_dir + '$')).match return (match_func, match_dir_func) def _get_ignore_decorators(config): 'Return the `ignore_decorators` as None or regex.' if config.ignore_decorators: ignore_decorators = re(config.ignore_decorators) else: ignore_decorators = None return ignore_decorators for name in self._arguments: if os.path.isdir(name): for (root, dirs, filenames) in os.walk(name): config = self._get_config(root) (match, match_dir) = _get_matches(config) ignore_decorators = _get_ignore_decorators(config) dirs[:] = [dir for dir in dirs if match_dir(dir)] for filename in filenames: if match(filename): full_path = os.path.join(root, filename) (yield (full_path, list(config.checked_codes), ignore_decorators)) else: config = self._get_config(name) (match, _) = _get_matches(config) ignore_decorators = _get_ignore_decorators(config) if match(name): (yield (name, list(config.checked_codes), ignore_decorators))
'Get a configuration for checking `node` by config discovery. Config discovery happens when no explicit config file is specified. The file system is searched for config files starting from the directory containing the file being checked, and up until the root directory of the project. See `_get_config` for further details.'
def _get_config_by_discovery(self, node):
path = self._get_node_dir(node) if (path in self._cache): return self._cache[path] config_file = self._get_config_file_in_folder(path) if (config_file is None): (parent_dir, tail) = os.path.split(path) if tail: config = self._get_config(parent_dir) else: config = self._create_check_config(self._options) else: (options, inherit) = self._read_configuration_file(config_file) (parent_dir, tail) = os.path.split(path) if (tail and inherit): parent_config = self._get_config(parent_dir) config = self._merge_configuration(parent_config, options) else: config = self._create_check_config(options) return config
'Get and cache the run configuration for `node`. If no configuration exists (not local and not for the parent node), returns and caches a default configuration. The algorithm: * If the current directory\'s configuration exists in `self._cache` - return it. * If a configuration file does not exist in this directory: * If the directory is not a root directory: * Cache its configuration as this directory\'s and return it. * Else: * Cache a default configuration and return it. * Else: * Read the configuration file. * If a parent directory exists AND the configuration file allows inheritance: * Read the parent configuration by calling this function with the parent directory as `node`. * Merge the parent configuration with the current one and cache it. * If the user has specified one of `BASE_ERROR_SELECTION_OPTIONS` in the CLI - return the CLI configuration with the configuration match clauses * Set the `--add-select` and `--add-ignore` CLI configurations.'
def _get_config(self, node):
if (self._run_conf.config is None): log.debug('No config file specified, discovering.') config = self._get_config_by_discovery(node) else: log.debug('Using config file %r', self._run_conf.config) if (not os.path.exists(self._run_conf.config)): raise IllegalConfiguration('Configuration file {!r} specified via --config was not found.'.format(self._run_conf.config)) if (None in self._cache): return self._cache[None] (options, _) = self._read_configuration_file(self._run_conf.config) config = self._create_check_config(options) final_config = {} for attr in CheckConfiguration._fields: cli_val = getattr(self._override_by_cli, attr) conf_val = getattr(config, attr) final_config[attr] = (cli_val if (cli_val is not None) else conf_val) config = CheckConfiguration(**final_config) self._set_add_options(config.checked_codes, self._options) if (self._run_conf.config is not None): self._cache[None] = config else: self._cache[self._get_node_dir(node)] = config return config
'Return the absolute path of the directory of a filesystem node.'
@staticmethod def _get_node_dir(node):
path = os.path.abspath(node) return (path if os.path.isdir(path) else os.path.dirname(path))
'Try to read and parse `path` as a configuration file. If the configurations were illegal (checked with `self._validate_options`), raises `IllegalConfiguration`. Returns (options, should_inherit).'
def _read_configuration_file(self, path):
parser = RawConfigParser() options = None should_inherit = True if (parser.read(path) and self._get_section_name(parser)): option_list = dict([(o.dest, (o.type or o.action)) for o in self._parser.option_list]) (new_options, _) = self._parse_args([]) section_name = self._get_section_name(parser) for opt in parser.options(section_name): if (opt == 'inherit'): should_inherit = parser.getboolean(section_name, opt) continue if (opt.replace('_', '-') not in self.CONFIG_FILE_OPTIONS): log.warning("Unknown option '{}' ignored".format(opt)) continue normalized_opt = opt.replace('-', '_') opt_type = option_list[normalized_opt] if (opt_type in ('int', 'count')): value = parser.getint(section_name, opt) elif (opt_type == 'string'): value = parser.get(section_name, opt) else: assert (opt_type in ('store_true', 'store_false')) value = parser.getboolean(section_name, opt) setattr(new_options, normalized_opt, value) options = self._fix_set_options(new_options) if (options is not None): if (not self._validate_options(options)): raise IllegalConfiguration('in file: {}'.format(path)) return (options, should_inherit)
'Merge parent config into the child options. The migration process requires an `options` object for the child in order to distinguish between mutually exclusive codes, add-select and add-ignore error codes.'
def _merge_configuration(self, parent_config, child_options):
error_codes = copy.deepcopy(parent_config.checked_codes) if self._has_exclusive_option(child_options): error_codes = self._get_exclusive_error_codes(child_options) self._set_add_options(error_codes, child_options) kwargs = dict(checked_codes=error_codes) for key in ('match', 'match_dir', 'ignore_decorators'): kwargs[key] = (getattr(child_options, key) or getattr(parent_config, key)) return CheckConfiguration(**kwargs)
'Parse the options using `self._parser` and reformat the options.'
def _parse_args(self, args=None, values=None):
(options, arguments) = self._parser.parse_args(args, values) return (self._fix_set_options(options), arguments)
'Create a `RunConfiguration` object from `options`.'
@staticmethod def _create_run_config(options):
values = dict([(opt, getattr(options, opt)) for opt in RunConfiguration._fields]) return RunConfiguration(**values)
'Create a `CheckConfiguration` object from `options`. If `use_defaults`, any of the match options that are `None` will be replaced with their default value and the default convention will be set for the checked codes.'
@classmethod def _create_check_config(cls, options, use_defaults=True):
checked_codes = None if (cls._has_exclusive_option(options) or use_defaults): checked_codes = cls._get_checked_errors(options) kwargs = dict(checked_codes=checked_codes) for key in ('match', 'match_dir', 'ignore_decorators'): kwargs[key] = (getattr(cls, 'DEFAULT_{0}_RE'.format(key.upper())) if ((getattr(options, key) is None) and use_defaults) else getattr(options, key)) return CheckConfiguration(**kwargs)
'Parse options from relevant section.'
@classmethod def _get_section_name(cls, parser):
for section_name in cls.POSSIBLE_SECTION_NAMES: if parser.has_section(section_name): return section_name return None
'Look for a configuration file in `path`. If exists return its full path, otherwise None.'
@classmethod def _get_config_file_in_folder(cls, path):
if os.path.isfile(path): path = os.path.dirname(path) for fn in cls.PROJECT_CONFIG_FILES: config = RawConfigParser() full_path = os.path.join(path, fn) if (config.read(full_path) and cls._get_section_name(config)): return full_path
'Extract the error codes from the selected exclusive option.'
@classmethod def _get_exclusive_error_codes(cls, options):
codes = set(ErrorRegistry.get_error_codes()) checked_codes = None if (options.ignore is not None): ignored = cls._expand_error_codes(options.ignore) checked_codes = (codes - ignored) elif (options.select is not None): checked_codes = cls._expand_error_codes(options.select) elif (options.convention is not None): checked_codes = getattr(conventions, options.convention) return copy.deepcopy(checked_codes)
'Set `checked_codes` by the `add_ignore` or `add_select` options.'
@classmethod def _set_add_options(cls, checked_codes, options):
checked_codes |= cls._expand_error_codes(options.add_select) checked_codes -= cls._expand_error_codes(options.add_ignore)
'Return an expanded set of error codes to ignore.'
@staticmethod def _expand_error_codes(code_parts):
codes = set(ErrorRegistry.get_error_codes()) expanded_codes = set() try: for part in code_parts: if (len(part) < 4): for code in codes: if code.startswith(part): expanded_codes.add(code) else: expanded_codes.add(part) except TypeError as e: raise IllegalConfiguration(e) return expanded_codes
'Extract the codes needed to be checked from `options`.'
@classmethod def _get_checked_errors(cls, options):
checked_codes = cls._get_exclusive_error_codes(options) if (checked_codes is None): checked_codes = cls.DEFAULT_CONVENTION cls._set_add_options(checked_codes, options) return checked_codes
'Validate the mutually exclusive options. Return `True` iff only zero or one of `BASE_ERROR_SELECTION_OPTIONS` was selected.'
@classmethod def _validate_options(cls, options):
for (opt1, opt2) in itertools.permutations(cls.BASE_ERROR_SELECTION_OPTIONS, 2): if (getattr(options, opt1) and getattr(options, opt2)): log.error('Cannot pass both {} and {}. They are mutually exclusive.'.format(opt1, opt2)) return False if (options.convention and (options.convention not in conventions)): log.error("Illegal convention '{}'. Possible conventions: {}".format(options.convention, ', '.join(conventions.keys()))) return False return True
'Return `True` iff one or more exclusive options were selected.'
@classmethod def _has_exclusive_option(cls, options):
return any([(getattr(options, opt) is not None) for opt in cls.BASE_ERROR_SELECTION_OPTIONS])
'Alter the set options from None/strings to sets in place.'
@staticmethod def _fix_set_options(options):
optional_set_options = ('ignore', 'select') mandatory_set_options = ('add_ignore', 'add_select') def _get_set(value_str): "Split `value_str` by the delimiter `,` and return a set.\n\n Removes any occurrences of '' in the set.\n\n " return (set(value_str.split(',')) - {''}) for opt in optional_set_options: value = getattr(options, opt) if (value is not None): setattr(options, opt, _get_set(value)) for opt in mandatory_set_options: value = getattr(options, opt) if (value is None): value = '' if (not isinstance(value, Set)): value = _get_set(value) setattr(options, opt, value) return options
'Return an option parser to parse the command line arguments.'
@classmethod def _create_option_parser(cls):
from optparse import OptionParser parser = OptionParser(version=__version__, usage='Usage: pydocstyle [options] [<file|dir>...]') option = parser.add_option option('-e', '--explain', action='store_true', default=False, help='show explanation of each error') option('-s', '--source', action='store_true', default=False, help='show source for each error') option('-d', '--debug', action='store_true', default=False, help='print debug information') option('-v', '--verbose', action='store_true', default=False, help='print status information') option('--count', action='store_true', default=False, help='print total number of errors to stdout') option('--config', metavar='<path>', default=None, help='use given config file and disable config discovery') option('--select', metavar='<codes>', default=None, help='choose the basic list of checked errors by specifying which errors to check for (with a list of comma-separated error codes or prefixes). for example: --select=D101,D2') option('--ignore', metavar='<codes>', default=None, help='choose the basic list of checked errors by specifying which errors to ignore (with a list of comma-separated error codes or prefixes). for example: --ignore=D101,D2') option('--convention', metavar='<name>', default=None, help='choose the basic list of checked errors by specifying an existing convention. Possible conventions: {}'.format(', '.join(conventions))) option('--add-select', metavar='<codes>', default=None, help='amend the list of errors to check for by specifying more error codes to check.') option('--add-ignore', metavar='<codes>', default=None, help='amend the list of errors to check for by specifying more error codes to ignore.') option('--match', metavar='<pattern>', default=None, help="check only files that exactly match <pattern> regular expression; default is --match='{}' which matches files that don't start with 'test_' but end with '.py'".format(cls.DEFAULT_MATCH_RE)) option('--match-dir', metavar='<pattern>', default=None, help="search only dirs that exactly match <pattern> regular expression; default is --match-dir='{}', which matches all dirs that don't start with a dot".format(cls.DEFAULT_MATCH_DIR_RE)) option('--ignore-decorators', metavar='<decorators>', default=None, help="ignore any functions or methods that are decorated by a function with a name fitting the <decorators> regular expression; default is --ignore-decorators='{0}' which does not ignore any decorated functions.".format(cls.DEFAULT_IGNORE_DECORATORS_RE)) return parser
'Return the source code for the definition.'
@property def source(self):
full_src = self._source[self._slice] def is_empty_or_comment(line): return ((line.strip() == '') or line.strip().startswith('#')) filtered_src = dropwhile(is_empty_or_comment, reversed(full_src)) return ''.join(reversed(list(filtered_src)))