desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'inferred lookup
return an iterator on inferred values of the statements returned by
the lookup method'
| def ilookup(self, name):
| (frame, stmts) = self.lookup(name)
context = contextmod.InferenceContext()
return bases._infer_stmts(stmts, context, frame)
|
'filter statements to remove ignorable statements.
If self is not a frame itself and the name is found in the inner
frame locals, statements will be filtered to remove ignorable
statements according to self\'s location'
| def _filter_stmts(self, stmts, frame, offset):
| if (offset == (-1)):
myframe = self.frame().parent.frame()
else:
myframe = self.frame()
if ((self.statement() is myframe) and myframe.parent):
myframe = myframe.parent.frame()
mystmt = self.statement()
if ((myframe is frame) and (mystmt.fromlineno is not None)):
assert (mystmt.fromlineno is not None), mystmt
mylineno = (mystmt.fromlineno + offset)
else:
mylineno = 0
_stmts = []
_stmt_parents = []
for node in stmts:
stmt = node.statement()
if ((mylineno > 0) and (stmt.fromlineno > mylineno)):
break
assert hasattr(node, 'assign_type'), (node, node.scope(), node.scope().locals)
assign_type = node.assign_type()
if node.has_base(self):
break
(_stmts, done) = assign_type._get_filtered_stmts(self, node, _stmts, mystmt)
if done:
break
optional_assign = assign_type.optional_assign
if (optional_assign and assign_type.parent_of(self)):
_stmts = [node]
_stmt_parents = [stmt.parent]
continue
try:
pindex = _stmt_parents.index(stmt.parent)
except ValueError:
pass
else:
if _stmts[pindex].assign_type().parent_of(assign_type):
continue
if (not (optional_assign or are_exclusive(_stmts[pindex], node))):
del _stmt_parents[pindex]
del _stmts[pindex]
if isinstance(node, AssignName):
if ((not optional_assign) and (stmt.parent is mystmt.parent)):
_stmts = []
_stmt_parents = []
elif isinstance(node, DelName):
_stmts = []
_stmt_parents = []
continue
if (not are_exclusive(self, node)):
_stmts.append(node)
_stmt_parents.append(stmt.parent)
return _stmts
|
'return arguments formatted as string'
| def format_args(self):
| result = []
if self.args:
result.append(_format_args(self.args, self.defaults, getattr(self, 'annotations', None)))
if self.vararg:
result.append(('*%s' % self.vararg))
if self.kwonlyargs:
if (not self.vararg):
result.append('*')
result.append(_format_args(self.kwonlyargs, self.kw_defaults, self.kwonlyargs_annotations))
if self.kwarg:
result.append(('**%s' % self.kwarg))
return ', '.join(result)
|
'return the default value for an argument
:raise `NoDefault`: if there is no default value defined'
| def default_value(self, argname):
| i = _find_arg(argname, self.args)[0]
if (i is not None):
idx = (i - (len(self.args) - len(self.defaults)))
if (idx >= 0):
return self.defaults[idx]
i = _find_arg(argname, self.kwonlyargs)[0]
if ((i is not None) and (self.kw_defaults[i] is not None)):
return self.kw_defaults[i]
raise exceptions.NoDefault(func=self.parent, name=argname)
|
'return True if the name is defined in arguments'
| def is_argument(self, name):
| if (name == self.vararg):
return True
if (name == self.kwarg):
return True
return ((self.find_argname(name, True)[1] is not None) or (self.kwonlyargs and (_find_arg(name, self.kwonlyargs, True)[1] is not None)))
|
'return index and Name node with given name'
| def find_argname(self, argname, rec=False):
| if self.args:
return _find_arg(argname, self.args, rec)
return (None, None)
|
'override get_children to skip over None elements in kw_defaults'
| def get_children(self):
| for child in super(Arguments, self).get_children():
if (child is not None):
(yield child)
|
'Return a list of TypeErrors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.'
| def type_errors(self, context=None):
| try:
results = self._infer_augassign(context=context)
return [result for result in results if isinstance(result, util.BadBinaryOperationMessage)]
except exceptions.InferenceError:
return []
|
'Return a list of TypeErrors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.'
| def type_errors(self, context=None):
| try:
results = self._infer_binop(context=context)
return [result for result in results if isinstance(result, util.BadBinaryOperationMessage)]
except exceptions.InferenceError:
return []
|
'override get_children for tuple fields'
| def get_children(self):
| (yield self.left)
for (_, comparator) in self.ops:
(yield comparator)
|
'override last_child'
| def last_child(self):
| return self.ops[(-1)][1]
|
'method used in filter_stmts'
| def _get_filtered_stmts(self, lookup_node, node, stmts, mystmt):
| if (self is mystmt):
if isinstance(lookup_node, (Const, Name)):
return ([lookup_node], True)
elif (self.statement() is mystmt):
return ([node], True)
return (stmts, False)
|
'get children of a Dict node'
| def get_children(self):
| for (key, value) in self.items:
(yield key)
(yield value)
|
'override last_child'
| def last_child(self):
| if self.items:
return self.items[(-1)][1]
return None
|
'handle block line numbers range for if statements'
| def block_range(self, lineno):
| if (lineno == self.body[0].fromlineno):
return (lineno, lineno)
if (lineno <= self.body[(-1)].tolineno):
return (lineno, self.body[(-1)].tolineno)
return self._elsed_block_range(lineno, self.orelse, (self.body[0].fromlineno - 1))
|
'Wrap the empty attributes of the Slice in a Const node.'
| def _wrap_attribute(self, attr):
| if (not attr):
const = const_factory(attr)
const.parent = self
return const
return attr
|
'handle block line numbers range for try/except statements'
| def block_range(self, lineno):
| last = None
for exhandler in self.handlers:
if (exhandler.type and (lineno == exhandler.type.fromlineno)):
return (lineno, lineno)
if (exhandler.body[0].fromlineno <= lineno <= exhandler.body[(-1)].tolineno):
return (lineno, exhandler.body[(-1)].tolineno)
if (last is None):
last = (exhandler.body[0].fromlineno - 1)
return self._elsed_block_range(lineno, self.orelse, last)
|
'handle block line numbers range for try/finally statements'
| def block_range(self, lineno):
| child = self.body[0]
if (isinstance(child, TryExcept) and (child.fromlineno == self.fromlineno) and (lineno > self.fromlineno) and (lineno <= child.tolineno)):
return child.block_range(lineno)
return self._elsed_block_range(lineno, self.finalbody)
|
'Return a list of TypeErrors which can occur during inference.
Each TypeError is represented by a :class:`BadUnaryOperationMessage`,
which holds the original exception.'
| def type_errors(self, context=None):
| try:
results = self._infer_unaryop(context=context)
return [result for result in results if isinstance(result, util.BadUnaryOperationMessage)]
except exceptions.InferenceError:
return []
|
'handle block line numbers range for and while statements'
| def block_range(self, lineno):
| return self._elsed_block_range(lineno, self.orelse)
|
'Inference on an Unknown node immediately terminates.'
| def infer(self, context=None, **kwargs):
| (yield util.Uninferable)
|
'build astroid from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it\'s a built-in module or because the .py is not available)'
| def inspect_build(self, module, modname=None, path=None):
| self._module = module
if (modname is None):
modname = module.__name__
try:
node = build_module(modname, module.__doc__)
except AttributeError:
node = build_module(modname)
node.file = node.path = (os.path.abspath(path) if path else path)
node.name = modname
MANAGER.cache_module(node)
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
|
'recursive method which create a partial ast from real objects
(only function, class, and method are handled)'
| def object_build(self, node, obj):
| if (obj in self._done):
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
attach_dummy_node(node, name)
continue
if inspect.ismethod(member):
member = six.get_method_function(member)
if inspect.isfunction(member):
_build_from_function(node, name, member, self._module)
elif inspect.isbuiltin(member):
if ((not _io_discrepancy(member)) and self.imported_member(node, member, name)):
continue
object_build_methoddescriptor(node, member, name)
elif inspect.isclass(member):
if self.imported_member(node, member, name):
continue
if (member in self._done):
class_node = self._done[member]
if (class_node not in node.locals.get(name, ())):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
self.object_build(class_node, member)
if ((name == '__class__') and (class_node.parent is None)):
class_node.parent = self._done[self._module]
elif inspect.ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif inspect.isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, _CONSTANTS):
attach_const_node(node, name, member)
elif inspect.isroutine(member):
_build_from_function(node, name, member, self._module)
else:
attach_dummy_node(node, name, member)
|
'verify this is not an imported class or handle it'
| def imported_member(self, node, member, name):
| try:
modname = getattr(member, '__module__', None)
except:
_LOG.exception('unexpected error while building astroid from living object')
modname = None
if (modname is None):
if ((name in ('__new__', '__subclasshook__')) or ((name in _BUILTINS) and _JYTHON)):
modname = six.moves.builtins.__name__
else:
attach_dummy_node(node, name, member)
return True
real_name = {'gtk': 'gtk_gtk', '_io': 'io'}.get(modname, modname)
if (real_name != self._module.__name__):
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False
|
'Call matching transforms for the given node if any and return the
transformed node.'
| def _transform(self, node):
| cls = node.__class__
if (cls not in self.transforms):
return node
transforms = self.transforms[cls]
orig_node = node
for (transform_func, predicate) in transforms:
if ((predicate is None) or predicate(node)):
ret = transform_func(node)
if (ret is not None):
if (node is not orig_node):
warnings.warn(('node %s substituted multiple times' % node))
node = ret
return node
|
'Register `transform(node)` function to be applied on the given
astroid\'s `node_class` if `predicate` is None or returns true
when called with the node as argument.
The transform function may return a value which is then used to
substitute the original node in the tree.'
| def register_transform(self, node_class, transform, predicate=None):
| self.transforms[node_class].append((transform, predicate))
|
'Unregister the given transform.'
| def unregister_transform(self, node_class, transform, predicate=None):
| self.transforms[node_class].remove((transform, predicate))
|
'Walk the given astroid *tree* and transform each encountered node
Only the nodes which have transforms registered will actually
be replaced or changed.'
| def visit(self, module):
| module.body = [self._visit(child) for child in module.body]
return self._transform(module)
|
'Determine if this path should be linted.'
| def allow(self, path):
| return path.endswith('.py')
|
'Lint the file. Return an array of error dicts if appropriate.'
| def run(self, path, **meta):
| with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
if SortImports(path, check=True).incorrectly_sorted:
return [{'lnum': 0, 'col': 0, 'text': 'Incorrectly sorted imports.', 'type': 'ISORT'}]
else:
return []
|
'Get options from config files.'
| def finalize_options(self):
| self.arguments = {}
computed_settings = from_path(os.getcwd())
for (key, value) in itemsview(computed_settings):
self.arguments[key] = value
|
'Find distribution packages.'
| def distribution_files(self):
| if self.distribution.packages:
package_dirs = (self.distribution.package_dir or {})
for package in self.distribution.packages:
pkg_dir = package
if (package in package_dirs):
pkg_dir = package_dirs[package]
elif (u'' in package_dirs):
pkg_dir = ((package_dirs[u''] + os.path.sep) + pkg_dir)
(yield pkg_dir.replace(u'.', os.path.sep))
if self.distribution.py_modules:
for filename in self.distribution.py_modules:
(yield (u'%s.py' % filename))
(yield u'setup.py')
|
'Strips # comments that exist at the top of the given lines'
| @staticmethod
def _strip_top_comments(lines):
| lines = copy.copy(lines)
while (lines and lines[0].startswith(u'#')):
lines = lines[1:]
return u'\n'.join(lines)
|
'Tries to determine if a module is a python std import, third party import, or project code:
if it can\'t determine - it assumes it is project code'
| def place_module(self, module_name):
| for forced_separate in self.config[u'forced_separate']:
path_glob = forced_separate
if (not forced_separate.endswith(u'*')):
path_glob = (u'%s*' % forced_separate)
if (fnmatch(module_name, path_glob) or fnmatch(module_name, (u'.' + path_glob))):
return forced_separate
if module_name.startswith(u'.'):
return self.sections.LOCALFOLDER
parts = module_name.split(u'.')
module_names_to_check = [u'.'.join(parts[:first_k]) for first_k in range(len(parts), 0, (-1))]
for module_name_to_check in module_names_to_check:
for (pattern, placement) in self.known_patterns:
if pattern.match(module_name_to_check):
return placement
paths = list(sys.path)
virtual_env = (self.config.get(u'virtual_env') or os.environ.get(u'VIRTUAL_ENV'))
virtual_env_src = False
if virtual_env:
paths += [path for path in glob(u'{0}/lib/python*/site-packages'.format(virtual_env)) if (path not in paths)]
paths += [path for path in glob(u'{0}/src/*'.format(virtual_env)) if os.path.isdir(path)]
virtual_env_src = u'{0}/src/'.format(virtual_env)
stdlib_lib_prefix = os.path.normcase(get_stdlib_path())
for prefix in paths:
module_path = u'/'.join((prefix, module_name.replace(u'.', u'/')))
package_path = u'/'.join((prefix, module_name.split(u'.')[0]))
is_module = (exists_case_sensitive((module_path + u'.py')) or exists_case_sensitive((module_path + u'.so')))
is_package = (exists_case_sensitive(package_path) and os.path.isdir(package_path))
if (is_module or is_package):
if ((u'site-packages' in prefix) or (u'dist-packages' in prefix) or (virtual_env and (virtual_env_src in prefix))):
return self.sections.THIRDPARTY
elif os.path.normcase(prefix).startswith(stdlib_lib_prefix):
return self.sections.STDLIB
else:
return self.config[u'default_section']
return self.config[u'default_section']
|
'Returns the current line from the file while incrementing the index.'
| def _get_line(self):
| line = self.in_lines[self.index]
self.index += 1
return line
|
'If the current line is an import line it will return its type (from or straight)'
| @staticmethod
def _import_type(line):
| if (u'isort:skip' in line):
return
elif line.startswith(u'import '):
return u'straight'
elif line.startswith(u'from '):
return u'from'
|
'returns True if we are at the end of the file.'
| def _at_end(self):
| return (self.index == self.number_of_lines)
|
'Returns a string with comments added'
| def _add_comments(self, comments, original_string=u''):
| return ((comments and u'{0} # {1}'.format(self._strip_comments(original_string)[0], u'; '.join(comments))) or original_string)
|
'Returns an import wrapped to the specified line-length, if possible.'
| def _wrap(self, line):
| wrap_mode = self.config[u'multi_line_output']
if ((len(line) > self.config[u'line_length']) and (wrap_mode != settings.WrapModes.NOQA)):
for splitter in (u'import', u'.', u'as'):
exp = ((u'\\b' + re.escape(splitter)) + u'\\b')
if (re.search(exp, line) and (not line.strip().startswith(splitter))):
line_parts = re.split(exp, line)
next_line = []
while (((len(line) + 2) > (self.config[u'wrap_length'] or self.config[u'line_length'])) and line_parts):
next_line.append(line_parts.pop())
line = splitter.join(line_parts)
if (not line):
line = next_line.pop()
cont_line = self._wrap((self.config[u'indent'] + splitter.join(next_line).lstrip()))
if self.config[u'use_parentheses']:
output = u'{0}{1} (\n{2}{3}{4})'.format(line, splitter, cont_line, (u',' if self.config[u'include_trailing_comma'] else u''), (u'\n' if (wrap_mode in (settings.WrapModes.VERTICAL_HANGING_INDENT, settings.WrapModes.VERTICAL_GRID_GROUPED)) else u''))
lines = output.split(u'\n')
if ((u' #' in lines[(-1)]) and lines[(-1)].endswith(u')')):
(line, comment) = lines[(-1)].split(u' #', 1)
lines[(-1)] = ((line + u') #') + comment[:(-1)])
return u'\n'.join(lines)
return u'{0}{1} \\\n{2}'.format(line, splitter, cont_line)
elif ((len(line) > self.config[u'line_length']) and (wrap_mode == settings.WrapModes.NOQA)):
if (u'# NOQA' not in line):
return u'{0} # NOQA'.format(line)
return line
|
'Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups'
| def _add_formatted_imports(self):
| sort_ignore_case = self.config[u'force_alphabetical_sort_within_sections']
sections = itertools.chain(self.sections, self.config[u'forced_separate'])
if self.config[u'no_sections']:
self.imports[u'no_sections'] = {u'straight': [], u'from': {}}
for section in sections:
self.imports[u'no_sections'][u'straight'].extend(self.imports[section].get(u'straight', []))
self.imports[u'no_sections'][u'from'].update(self.imports[section].get(u'from', {}))
sections = (u'no_sections',)
output = []
for section in sections:
straight_modules = self.imports[section][u'straight']
straight_modules = nsorted(straight_modules, key=(lambda key: self._module_key(key, self.config)))
from_modules = self.imports[section][u'from']
from_modules = nsorted(from_modules, key=(lambda key: self._module_key(key, self.config)))
section_output = []
if self.config[u'from_first']:
self._add_from_imports(from_modules, section, section_output, sort_ignore_case)
if (self.config[u'lines_between_types'] and from_modules and straight_modules):
section_output.extend(([u''] * self.config[u'lines_between_types']))
self._add_straight_imports(straight_modules, section, section_output)
else:
self._add_straight_imports(straight_modules, section, section_output)
if (self.config[u'lines_between_types'] and from_modules and straight_modules):
section_output.extend(([u''] * self.config[u'lines_between_types']))
self._add_from_imports(from_modules, section, section_output, sort_ignore_case)
if self.config[u'force_sort_within_sections']:
def by_module(line):
section = u'B'
if line.startswith(u'#'):
return u'AA'
line = re.sub(u'^from ', u'', line)
line = re.sub(u'^import ', u'', line)
if (line.split(u' ')[0] in self.config[u'force_to_top']):
section = u'A'
if (not self.config[u'order_by_type']):
line = line.lower()
return u'{0}{1}'.format(section, line)
section_output = nsorted(section_output, key=by_module)
if section_output:
section_name = section
if (section_name in self.place_imports):
self.place_imports[section_name] = section_output
continue
section_title = self.config.get((u'import_heading_' + str(section_name).lower()), u'')
if section_title:
section_comment = u'# {0}'.format(section_title)
if ((not (section_comment in self.out_lines[0:1])) and (not (section_comment in self.in_lines[0:1]))):
section_output.insert(0, section_comment)
output += (section_output + ([u''] * self.config[u'lines_between_sections']))
while ([character.strip() for character in output[(-1):]] == [u'']):
output.pop()
output_at = 0
if (self.import_index < self.original_length):
output_at = self.import_index
elif ((self._first_comment_index_end != (-1)) and (self._first_comment_index_start <= 2)):
output_at = self._first_comment_index_end
self.out_lines[output_at:0] = output
imports_tail = (output_at + len(output))
while ([character.strip() for character in self.out_lines[imports_tail:(imports_tail + 1)]] == [u'']):
self.out_lines.pop(imports_tail)
if (len(self.out_lines) > imports_tail):
next_construct = u''
self._in_quote = False
tail = self.out_lines[imports_tail:]
for (index, line) in enumerate(tail):
if ((not self._skip_line(line)) and line.strip()):
if (line.strip().startswith(u'#') and (len(tail) > (index + 1)) and tail[(index + 1)].strip()):
continue
next_construct = line
break
if (self.config[u'lines_after_imports'] != (-1)):
self.out_lines[imports_tail:0] = [u'' for line in range(self.config[u'lines_after_imports'])]
elif (next_construct.startswith(u'def') or next_construct.startswith(u'class') or next_construct.startswith(u'@') or next_construct.startswith(u'async def')):
self.out_lines[imports_tail:0] = [u'', u'']
else:
self.out_lines[imports_tail:0] = [u'']
if self.place_imports:
new_out_lines = []
for (index, line) in enumerate(self.out_lines):
new_out_lines.append(line)
if (line in self.import_placements):
new_out_lines.extend(self.place_imports[self.import_placements[line]])
if ((len(self.out_lines) <= index) or (self.out_lines[(index + 1)].strip() != u'')):
new_out_lines.append(u'')
self.out_lines = new_out_lines
|
'Removes comments from import line.'
| @staticmethod
def _strip_comments(line, comments=None):
| if (comments is None):
comments = []
new_comments = False
comment_start = line.find(u'#')
if (comment_start != (-1)):
comments.append(line[(comment_start + 1):].strip())
new_comments = True
line = line[:comment_start]
return (line, comments, new_comments)
|
'Parses a python file taking out and categorizing imports.'
| def _parse(self):
| self._in_quote = False
self._in_top_comment = False
while (not self._at_end()):
line = self._get_line()
statement_index = self.index
skip_line = self._skip_line(line)
if ((line in self._section_comments) and (not skip_line)):
if (self.import_index == (-1)):
self.import_index = (self.index - 1)
continue
if ((u'isort:imports-' in line) and line.startswith(u'#')):
section = line.split(u'isort:imports-')[(-1)].split()[0].upper()
self.place_imports[section] = []
self.import_placements[line] = section
if (u';' in line):
for part in (part.strip() for part in line.split(u';')):
if (part and (not part.startswith(u'from ')) and (not part.startswith(u'import '))):
skip_line = True
import_type = self._import_type(line)
if ((not import_type) or skip_line):
self.out_lines.append(line)
continue
for line in (line.strip() for line in line.split(u';')):
import_type = self._import_type(line)
if (not import_type):
self.out_lines.append(line)
continue
line = line.replace(u' DCTB ', u' ').replace(u'import*', u'import *')
if (self.import_index == (-1)):
self.import_index = (self.index - 1)
nested_comments = {}
(import_string, comments, new_comments) = self._strip_comments(line)
stripped_line = [part for part in self._strip_syntax(import_string).strip().split(u' ') if part]
if ((import_type == u'from') and (len(stripped_line) == 2) and (stripped_line[1] != u'*') and new_comments):
nested_comments[stripped_line[(-1)]] = comments[0]
if ((u'(' in line.split(u'#')[0]) and (not self._at_end())):
while ((not line.strip().endswith(u')')) and (not self._at_end())):
(line, comments, new_comments) = self._strip_comments(self._get_line(), comments)
stripped_line = self._strip_syntax(line).strip()
if ((import_type == u'from') and stripped_line and (not (u' ' in stripped_line)) and new_comments):
nested_comments[stripped_line] = comments[(-1)]
import_string += (u'\n' + line)
else:
while line.strip().endswith(u'\\'):
(line, comments, new_comments) = self._strip_comments(self._get_line(), comments)
stripped_line = self._strip_syntax(line).strip()
if ((import_type == u'from') and stripped_line and (not (u' ' in stripped_line)) and new_comments):
nested_comments[stripped_line] = comments[(-1)]
if (import_string.strip().endswith(u' import') or line.strip().startswith(u'import ')):
import_string += (u'\n' + line)
else:
import_string = ((import_string.rstrip().rstrip(u'\\') + u' ') + line.lstrip())
if (import_type == u'from'):
import_string = import_string.replace(u'import(', u'import (')
parts = import_string.split(u' import ')
from_import = parts[0].split(u' ')
import_string = u' import '.join(([((from_import[0] + u' ') + u''.join(from_import[1:]))] + parts[1:]))
imports = [item.replace(u'{|', u'{ ').replace(u'|}', u' }') for item in self._strip_syntax(import_string).split()]
if ((u'as' in imports) and ((imports.index(u'as') + 1) < len(imports))):
while (u'as' in imports):
index = imports.index(u'as')
if (import_type == u'from'):
module = ((imports[0] + u'.') + imports[(index - 1)])
self.as_map[module] = imports[(index + 1)]
else:
module = imports[(index - 1)]
self.as_map[module] = imports[(index + 1)]
if (not self.config[u'combine_as_imports']):
self.comments[u'straight'][module] = comments
comments = []
del imports[index:(index + 2)]
if (import_type == u'from'):
import_from = imports.pop(0)
placed_module = self.place_module(import_from)
if (placed_module == u''):
print(u'WARNING: could not place module {0} of line {1} -- Do you need to define a default section?'.format(import_from, line))
root = self.imports[placed_module][import_type]
for import_name in imports:
associated_comment = nested_comments.get(import_name)
if associated_comment:
self.comments[u'nested'].setdefault(import_from, {})[import_name] = associated_comment
comments.pop(comments.index(associated_comment))
if comments:
self.comments[u'from'].setdefault(import_from, []).extend(comments)
if (len(self.out_lines) > (max(self.import_index, (self._first_comment_index_end + 1), 1) - 1)):
last = ((self.out_lines and self.out_lines[(-1)].rstrip()) or u'')
while (last.startswith(u'#') and (not last.endswith(u'"""')) and (not last.endswith(u"'''")) and (not (u'isort:imports-' in last))):
self.comments[u'above'][u'from'].setdefault(import_from, []).insert(0, self.out_lines.pop((-1)))
if (len(self.out_lines) > (max((self.import_index - 1), (self._first_comment_index_end + 1), 1) - 1)):
last = self.out_lines[(-1)].rstrip()
else:
last = u''
if ((statement_index - 1) == self.import_index):
self.import_index -= len(self.comments[u'above'][u'from'].get(import_from, []))
if root.get(import_from, False):
root[import_from].update(imports)
else:
root[import_from] = OrderedSet(imports)
else:
for module in imports:
if comments:
self.comments[u'straight'][module] = comments
comments = None
if (len(self.out_lines) > (max(self.import_index, (self._first_comment_index_end + 1), 1) - 1)):
last = ((self.out_lines and self.out_lines[(-1)].rstrip()) or u'')
while (last.startswith(u'#') and (not last.endswith(u'"""')) and (not last.endswith(u"'''")) and (not (u'isort:imports-' in last))):
self.comments[u'above'][u'straight'].setdefault(module, []).insert(0, self.out_lines.pop((-1)))
if (len(self.out_lines) > 0):
last = self.out_lines[(-1)].rstrip()
else:
last = u''
if ((self.index - 1) == self.import_index):
self.import_index -= len(self.comments[u'above'][u'straight'].get(module, []))
placed_module = self.place_module(module)
if (placed_module == u''):
print(u'WARNING: could not place module {0} of line {1} -- Do you need to define a default section?'.format(import_from, line))
self.imports[placed_module][import_type].add(module)
|
'@ivar s_size search string size
@ivar s search string
@ivar substring index to longest matching substring
@ivar result of the lookup
@ivar method method to use if substring matches'
| def __init__(self, s, substring_i, result, method=None):
| self.s_size = len(s)
self.s = s
self.substring_i = substring_i
self.result = result
self.method = method
|
'Set the self.current string.'
| def set_current(self, value):
| self.current = value
self.cursor = 0
self.limit = len(self.current)
self.limit_backward = 0
self.bra = self.cursor
self.ket = self.limit
|
'Get the self.current string.'
| def get_current(self):
| return self.current
|
'find_among_b is for backwards processing. Same comments apply'
| def find_among_b(self, v, v_size):
| i = 0
j = v_size
c = self.cursor
lb = self.limit_backward
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = (i + ((j - i) >> 1))
diff = 0
common = min(common_i, common_j)
w = v[k]
for i2 in range(((w.s_size - 1) - common), (-1), (-1)):
if ((c - common) == lb):
diff = (-1)
break
diff = (ord(self.current[((c - 1) - common)]) - ord(w.s[i2]))
if (diff != 0):
break
common += 1
if (diff < 0):
j = k
common_j = common
else:
i = k
common_i = common
if ((j - i) <= 1):
if (i > 0):
break
if (j == i):
break
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if (common_i >= w.s_size):
self.cursor = (c - w.s_size)
if (w.method is None):
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = (c - w.s_size)
if res:
return w.result
i = w.substring_i
if (i < 0):
return 0
return (-1)
|
'to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string'
| def replace_s(self, c_bra, c_ket, s):
| adjustment = (len(s) - (c_ket - c_bra))
self.current = ((self.current[0:c_bra] + s) + self.current[c_ket:])
self.limit += adjustment
if (self.cursor >= c_ket):
self.cursor += adjustment
elif (self.cursor > c_bra):
self.cursor = c_bra
return adjustment
|
'@type s string'
| def slice_from(self, s):
| result = False
if self.slice_check():
self.replace_s(self.bra, self.ket, s)
result = True
return result
|
'@type c_bra int
@type c_ket int
@type s: string'
| def insert(self, c_bra, c_ket, s):
| adjustment = self.replace_s(c_bra, c_ket, s)
if (c_bra <= self.bra):
self.bra += adjustment
if (c_bra <= self.ket):
self.ket += adjustment
|
'Copy the slice into the supplied StringBuffer
@type s: string'
| def slice_to(self, s):
| result = ''
if self.slice_check():
result = self.current[self.bra:self.ket]
return result
|
'@type s: string'
| def assign_to(self, s):
| return self.current[0:self.limit]
|
'Return whether importation needs an as clause.'
| def _has_alias(self):
| return (not (self.fullName.split('.')[(-1)] == self.name))
|
'Generate a source statement equivalent to the import.'
| @property
def source_statement(self):
| if self._has_alias():
return ('import %s as %s' % (self.fullName, self.name))
else:
return ('import %s' % self.fullName)
|
'Return import full name with alias.'
| def __str__(self):
| if self._has_alias():
return ((self.fullName + ' as ') + self.name)
else:
return self.fullName
|
'Return import full name with alias.'
| def __str__(self):
| if (self.real_name != self.name):
return ((self.fullName + ' as ') + self.name)
else:
return self.fullName
|
'Return a generator for the assignments which have not been used.'
| def unusedAssignments(self):
| for (name, binding) in self.items():
if ((not binding.used) and (name not in self.globals) and (not self.usesLocals) and isinstance(binding, Assignment)):
(yield (name, binding))
|
'Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.'
| def deferFunction(self, callable):
| self._deferredFunctions.append((callable, self.scopeStack[:], self.offset))
|
'Schedule an assignment handler to be called just after deferred
function handlers.'
| def deferAssignment(self, callable):
| self._deferredAssignments.append((callable, self.scopeStack[:], self.offset))
|
'Run the callables in C{deferred} using their associated scope stack.'
| def runDeferred(self, deferred):
| for (handler, scope, offset) in deferred:
self.scopeStack = scope
self.offset = offset
handler()
|
'Look at scopes which have been fully examined and report names in them
which were imported but unused.'
| def checkDeadScopes(self):
| for scope in self.deadScopes:
if isinstance(scope, ClassScope):
continue
all_binding = scope.get('__all__')
if (all_binding and (not isinstance(all_binding, ExportBinding))):
all_binding = None
if all_binding:
all_names = set(all_binding.names)
undefined = all_names.difference(scope)
else:
all_names = undefined = []
if undefined:
if ((not scope.importStarred) and (os.path.basename(self.filename) != '__init__.py')):
for name in undefined:
self.report(messages.UndefinedExport, scope['__all__'].source, name)
if scope.importStarred:
for binding in scope.values():
if isinstance(binding, StarImportation):
binding.used = all_binding
for value in scope.values():
if isinstance(value, Importation):
used = (value.used or (value.name in all_names))
if (not used):
messg = messages.UnusedImport
self.report(messg, value.source, str(value))
for node in value.redefined:
if isinstance(self.getParent(node), ast.For):
messg = messages.ImportShadowedByLoopVar
elif used:
continue
else:
messg = messages.RedefinedWhileUnused
self.report(messg, node, value.name, value.source)
|
'True, if lnode and rnode are located on different forks of IF/TRY'
| def differentForks(self, lnode, rnode):
| ancestor = self.getCommonAncestor(lnode, rnode, self.root)
parts = getAlternatives(ancestor)
if parts:
for items in parts:
if (self.descendantOf(lnode, items, ancestor) ^ self.descendantOf(rnode, items, ancestor)):
return True
return False
|
'Called when a binding is altered.
- `node` is the statement responsible for the change
- `value` is the new value, a Binding instance'
| def addBinding(self, node, value):
| for scope in self.scopeStack[::(-1)]:
if (value.name in scope):
break
existing = scope.get(value.name)
if (existing and (not self.differentForks(node, existing.source))):
parent_stmt = self.getParent(value.source)
if (isinstance(existing, Importation) and isinstance(parent_stmt, ast.For)):
self.report(messages.ImportShadowedByLoopVar, node, value.name, existing.source)
elif (scope is self.scope):
if (isinstance(parent_stmt, ast.comprehension) and (not isinstance(self.getParent(existing.source), (ast.For, ast.comprehension)))):
self.report(messages.RedefinedInListComp, node, value.name, existing.source)
elif ((not existing.used) and value.redefines(existing)):
self.report(messages.RedefinedWhileUnused, node, value.name, existing.source)
elif (isinstance(existing, Importation) and value.redefines(existing)):
existing.redefined.append(node)
if (value.name in self.scope):
value.used = self.scope[value.name].used
self.scope[value.name] = value
|
'Determine if the given node is a docstring, as long as it is at the
correct place in the node tree.'
| def isDocstring(self, node):
| return (isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)))
|
'Keep track of globals declarations.'
| def GLOBAL(self, node):
| global_scope_index = (1 if self._in_doctest() else 0)
global_scope = self.scopeStack[global_scope_index]
if (self.scope is not global_scope):
for node_name in node.names:
node_value = Assignment(node_name, node)
self.messages = [m for m in self.messages if ((not isinstance(m, messages.UndefinedName)) or (m.message_args[0] != node_name))]
global_scope.setdefault(node_name, node_value)
node_value.used = (global_scope, node)
for scope in self.scopeStack[(global_scope_index + 1):]:
scope[node_name] = node_value
|
'Handle occurrence of Name (which can be a load/store/delete access.)'
| def NAME(self, node):
| if isinstance(node.ctx, (ast.Load, ast.AugLoad)):
self.handleNodeLoad(node)
if ((node.id == 'locals') and isinstance(self.scope, FunctionScope) and isinstance(node.parent, ast.Call)):
self.scope.usesLocals = True
elif isinstance(node.ctx, (ast.Store, ast.AugStore)):
self.handleNodeStore(node)
elif isinstance(node.ctx, ast.Del):
self.handleNodeDelete(node)
else:
raise RuntimeError(('Got impossible expression context: %r' % (node.ctx,)))
|
'Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.'
| def CLASSDEF(self, node):
| for deco in node.decorator_list:
self.handleNode(deco, node)
for baseNode in node.bases:
self.handleNode(baseNode, node)
if (not PY2):
for keywordNode in node.keywords:
self.handleNode(keywordNode, node)
self.pushScope(ClassScope)
if (self.withDoctest and (not self._in_doctest()) and (not isinstance(self.scope, FunctionScope))):
self.deferFunction((lambda : self.handleDoctests(node)))
for stmt in node.body:
self.handleNode(stmt, node)
self.popScope()
self.addBinding(node, ClassDefinition(node.name, node))
|
'Annotated assignments don\'t have annotations evaluated on function
scope, hence the custom implementation.
See: PEP 526.'
| def ANNASSIGN(self, node):
| if node.value:
self.handleNode(node.target, node)
if (not isinstance(self.scope, FunctionScope)):
self.handleNode(node.annotation, node)
if node.value:
self.handleNode(node.value, node)
|
'Construct a L{Reporter}.
@param warningStream: A file-like object where warnings will be
written to. The stream\'s C{write} method must accept unicode.
C{sys.stdout} is a good value.
@param errorStream: A file-like object where error output will be
written to. The stream\'s C{write} method must accept unicode.
C{sys.stderr} is a good value.'
| def __init__(self, warningStream, errorStream):
| self._stdout = warningStream
self._stderr = errorStream
|
'An unexpected error occurred trying to process C{filename}.
@param filename: The path to a file that we could not process.
@ptype filename: C{unicode}
@param msg: A message explaining the problem.
@ptype msg: C{unicode}'
| def unexpectedError(self, filename, msg):
| self._stderr.write(('%s: %s\n' % (filename, msg)))
|
'There was a syntax error in C{filename}.
@param filename: The path to the file with the syntax error.
@ptype filename: C{unicode}
@param msg: An explanation of the syntax error.
@ptype msg: C{unicode}
@param lineno: The line number where the syntax error occurred.
@ptype lineno: C{int}
@param offset: The column on which the syntax error occurred, or None.
@ptype offset: C{int}
@param text: The source code containing the syntax error.
@ptype text: C{unicode}'
| def syntaxError(self, filename, msg, lineno, offset, text):
| line = text.splitlines()[(-1)]
if (offset is not None):
offset = (offset - (len(text) - len(line)))
self._stderr.write(('%s:%d:%d: %s\n' % (filename, lineno, (offset + 1), msg)))
else:
self._stderr.write(('%s:%d: %s\n' % (filename, lineno, msg)))
self._stderr.write(line)
self._stderr.write('\n')
if (offset is not None):
self._stderr.write((re.sub('\\S', ' ', line[:offset]) + '^\n'))
|
'pyflakes found something wrong with the code.
@param: A L{pyflakes.messages.Message}.'
| def flake(self, message):
| self._stdout.write(str(message))
self._stdout.write('\n')
|
'Construct a restructuring
See class pydoc for more info about the arguments.'
| def __init__(self, project, pattern, goal, args=None, imports=None, wildcards=None):
| self.project = project
self.pattern = pattern
self.goal = goal
self.args = args
if (self.args is None):
self.args = {}
self.imports = imports
if (self.imports is None):
self.imports = []
self.wildcards = wildcards
self.template = similarfinder.CodeTemplate(self.goal)
|
'Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {\'obj1.type\': \'mod.A\', \'obj2\': \'mod.B\',
\'obj3.object\': \'mod.C\'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {\'obj1\': \'type=mod.A\', \'obj2\': \'name=mod.B\',
\'obj3\': \'object=mod.C\'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.'
| def get_changes(self, checks=None, imports=None, resources=None, task_handle=taskhandle.NullTaskHandle()):
| if (checks is not None):
warnings.warn('The use of checks parameter is deprecated; use the args parameter of the constructor instead.', DeprecationWarning, stacklevel=2)
for (name, value) in checks.items():
self.args[name] = similarfinder._pydefined_to_str(value)
if (imports is not None):
warnings.warn('The use of imports parameter is deprecated; use imports parameter of the constructor, instead.', DeprecationWarning, stacklevel=2)
self.imports = imports
changes = change.ChangeSet(('Restructuring <%s> to <%s>' % (self.pattern, self.goal)))
if (resources is not None):
files = [resource for resource in resources if libutils.is_python_file(self.project, resource)]
else:
files = self.project.get_python_files()
job_set = task_handle.create_jobset('Collecting Changes', len(files))
for resource in files:
job_set.started_job(resource.path)
pymodule = self.project.get_pymodule(resource)
finder = similarfinder.SimilarFinder(pymodule, wildcards=self.wildcards)
matches = list(finder.get_matches(self.pattern, self.args))
computer = self._compute_changes(matches, pymodule)
result = computer.get_changed()
if (result is not None):
imported_source = self._add_imports(resource, result, self.imports)
changes.add_change(change.ChangeContents(resource, imported_source))
job_set.finished_job()
return changes
|
'Convert str to str dicts to str to PyObject dicts
This function is here to ease writing a UI.'
| def make_checks(self, string_checks):
| checks = {}
for (key, value) in string_checks.items():
is_pyname = ((not key.endswith('.object')) and (not key.endswith('.type')))
evaluated = self._evaluate(value, is_pyname=is_pyname)
if (evaluated is not None):
checks[key] = evaluated
return checks
|
'Generate `Occurrence` instances'
| def find_occurrences(self, resource=None, pymodule=None):
| tools = _OccurrenceToolsCreator(self.project, resource=resource, pymodule=pymodule, docs=self.docs)
for offset in self._textual_finder.find_offsets(tools.source_code):
occurrence = Occurrence(tools, offset)
for filter in self.filters:
result = filter(occurrence)
if (result is None):
continue
if result:
(yield occurrence)
break
|
'Get the changes this refactoring makes
:parameters:
- `similar`: if `True`, similar expressions/statements are also
replaced.
- `global_`: if `True`, the extracted method/variable will
be global.'
| def get_changes(self, extracted_name, similar=False, global_=False):
| info = _ExtractInfo(self.project, self.resource, self.start_offset, self.end_offset, extracted_name, variable=(self.kind == 'variable'), similar=similar, make_global=global_)
new_contents = _ExtractPerformer(info).extract()
changes = ChangeSet(('Extract %s <%s>' % (self.kind, extracted_name)))
changes.add_change(ChangeContents(self.resource, new_contents))
return changes
|
'Does the extracted piece contain return statement'
| @property
def returned(self):
| if (self._returned is None):
node = _parse_text(self.extracted)
self._returned = usefunction._returns_last(node)
return self._returned
|
'force a single import per statement'
| def force_single_imports(self):
| for import_stmt in self.imports[:]:
import_info = import_stmt.import_info
if (import_info.is_empty() or import_stmt.readonly):
continue
if (len(import_info.names_and_aliases) > 1):
for name_and_alias in import_info.names_and_aliases:
if hasattr(import_info, 'module_name'):
new_import = importinfo.FromImport(import_info.module_name, import_info.level, [name_and_alias])
else:
new_import = importinfo.NormalImport([name_and_alias])
self.add_import(new_import)
import_stmt.empty_import()
|
'Removes pyname when imported in ``from mod import x``'
| def remove_pyname(self, pyname):
| visitor = actions.RemovePyNameVisitor(self.project, self.pymodule, pyname, self._current_folder())
for import_stmt in self.imports:
import_stmt.accept(visitor)
|
'Get the imported resource
Returns `None` if module was not found.'
| def get_imported_resource(self, context):
| if (self.level == 0):
return context.project.find_module(self.module_name, folder=context.folder)
else:
return context.project.find_relative_module(self.module_name, context.folder, self.level)
|
'Get the imported `PyModule`
Raises `rope.base.exceptions.ModuleNotFoundError` if module
could not be found.'
| def get_imported_module(self, context):
| if (self.level == 0):
return context.project.get_module(self.module_name, context.folder)
else:
return context.project.get_relative_module(self.module_name, context.folder, self.level)
|
'The import statement for `resource`'
| def get_import(self, resource):
| module_name = libutils.modname(resource)
return NormalImport(((module_name, None),))
|
'The from import statement for `name` in `resource`'
| def get_from_import(self, resource, name):
| module_name = libutils.modname(resource)
names = []
if isinstance(name, list):
names = [(imported, None) for imported in name]
else:
names = [(name, None)]
return FromImport(module_name, 0, tuple(names))
|
'Create a multiproject proxy for the main refactoring
`projects` are other project.'
| def __init__(self, refactoring, projects, addpath=True):
| self.refactoring = refactoring
self.projects = projects
self.addpath = addpath
|
'Create the refactoring'
| def __call__(self, project, *args, **kwds):
| return _MultiRefactoring(self.refactoring, self.projects, self.addpath, project, *args, **kwds)
|
'Get a project to changes dict'
| def get_all_changes(self, *args, **kwds):
| result = []
for (project, refactoring) in zip(self.projects, self.refactorings):
(args, kwds) = self._resources_for_args(project, args, kwds)
result.append((project, refactoring.get_changes(*args, **kwds)))
return result
|
'Construct a SimilarFinder'
| def __init__(self, pymodule, wildcards=None):
| self.source = pymodule.source_code
try:
self.raw_finder = RawSimilarFinder(pymodule.source_code, pymodule.get_ast(), self._does_match)
except MismatchedTokenError:
print ('in file %s' % pymodule.resource.path)
raise
self.pymodule = pymodule
if (wildcards is None):
self.wildcards = {}
for wildcard in [rope.refactor.wildcards.DefaultWildcard(pymodule.pycore.project)]:
self.wildcards[wildcard.get_name()] = wildcard
else:
self.wildcards = wildcards
|
'Search for `code` in source and return a list of `Match`\es
`code` can contain wildcards. ``${name}`` matches normal
names and ``${?name} can match any expression. You can use
`Match.get_ast()` for getting the node that has matched a
given pattern.'
| def get_matches(self, code, start=0, end=None, skip=None):
| if (end is None):
end = len(self.source)
for match in self._get_matched_asts(code):
(match_start, match_end) = match.get_region()
if ((start <= match_start) and (match_end <= end)):
if ((skip is not None) and ((skip[0] < match_end) and (skip[1] > match_start))):
continue
(yield match)
|
'Searches the given pattern in the body AST.
body is an AST node and pattern can be either an AST node or
a list of ASTs nodes'
| def __init__(self, body, pattern, does_match):
| self.body = body
self.pattern = pattern
self.matches = None
self.ropevar = _RopeVariable()
self.matches_callback = does_match
|
'Return not `ast.expr_context` children of `node`'
| def _get_children(self, node):
| children = ast.get_children(node)
return [child for child in children if (not isinstance(child, ast.expr_context))]
|
'Return the ast node that has matched rope variables'
| def get_ast(self, name):
| return self.mapping.get(name, None)
|
'Get the changes this refactoring makes
`factory_name` indicates the name of the factory function to
be added. If `global_factory` is `True` the factory will be
global otherwise a static method is added to the class.
`resources` can be a list of `rope.base.resource.File`\s that
this refactoring should be applied on; if `None` all python
files in the project are searched.'
| def get_changes(self, factory_name, global_factory=False, resources=None, task_handle=taskhandle.NullTaskHandle()):
| if (resources is None):
resources = self.project.get_python_files()
changes = ChangeSet(('Introduce factory method <%s>' % factory_name))
job_set = task_handle.create_jobset('Collecting Changes', len(resources))
self._change_module(resources, changes, factory_name, global_factory, job_set)
return changes
|
'Return the name of the class'
| def get_name(self):
| return self.old_name
|
'Get function arguments.
Return a list of ``(name, default)`` tuples for all but star
and double star arguments. For arguments that don\'t have a
default, `None` will be used.'
| def get_args(self):
| return self._definfo().args_with_defaults
|
'Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.'
| def get_changes(self, changers, in_hierarchy=False, resources=None, task_handle=taskhandle.NullTaskHandle()):
| function_changer = _FunctionChangers(self.pyname.get_object(), self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy, resources, task_handle)
|
'Construct an `ArgumentReorderer`
Note that the `new_order` is a list containing the new
position of parameters; not the position each parameter
is going to be moved to. (changed in ``0.5m4``)
For example changing ``f(a, b, c)`` to ``f(c, a, b)``
requires passing ``[2, 0, 1]`` and *not* ``[1, 2, 0]``.
The `autodef` (automatic default) argument, forces rope to use
it as a default if a default is needed after the change. That
happens when an argument without default is moved after
another that has a default value. Note that `autodef` should
be a string or `None`; the latter disables adding automatic
default.'
| def __init__(self, new_order, autodef=None):
| self.new_order = new_order
self.autodef = autodef
|
'Changes `children` and returns new start'
| def _handle_parens(self, children, start, formats):
| (opens, closes) = self._count_needed_parens(formats)
old_end = self.source.offset
new_end = None
for i in range(closes):
new_end = self.source.consume(')')[1]
if (new_end is not None):
if self.children:
children.append(self.source[old_end:new_end])
new_start = start
for i in range(opens):
new_start = self.source.rfind_token('(', 0, new_start)
if (new_start != start):
if self.children:
children.appendleft(self.source[new_start:start])
start = new_start
return start
|
'Checks whether consumed token is in comments'
| def _good_token(self, token, offset, start=None):
| if (start is None):
start = self.offset
try:
comment_index = self.source.rindex('#', start, offset)
except ValueError:
return True
try:
new_line_index = self.source.rindex('\n', start, offset)
except ValueError:
return False
return (comment_index < new_line_index)
|
'If `offset` is None, the `resource` itself will be renamed'
| def __init__(self, project, resource, offset=None):
| self.project = project
self.resource = resource
if (offset is not None):
self.old_name = worder.get_name_at(self.resource, offset)
this_pymodule = self.project.get_pymodule(self.resource)
(self.old_instance, self.old_pyname) = evaluate.eval_location2(this_pymodule, offset)
if (self.old_pyname is None):
raise exceptions.RefactoringError('Rename refactoring should be performed on resolvable python identifiers.')
else:
if ((not resource.is_folder()) and (resource.name == '__init__.py')):
resource = resource.parent
dummy_pymodule = libutils.get_string_module(self.project, '')
self.old_instance = None
self.old_pyname = pynames.ImportedModule(dummy_pymodule, resource=resource)
if resource.is_folder():
self.old_name = resource.name
else:
self.old_name = resource.name[:(-3)]
|
'Get the changes needed for this refactoring
Parameters:
- `in_hierarchy`: when renaming a method this keyword forces
to rename all matching methods in the hierarchy
- `docs`: when `True` rename refactoring will rename
occurrences in comments and strings where the name is
visible. Setting it will make renames faster, too.
- `unsure`: decides what to do about unsure occurrences.
If `None`, they are ignored. Otherwise `unsure` is
called with an instance of `occurrence.Occurrence` as
parameter. If it returns `True`, the occurrence is
considered to be a match.
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
- `in_file`: this argument has been deprecated; use
`resources` instead.'
| def get_changes(self, new_name, in_file=None, in_hierarchy=False, unsure=None, docs=False, resources=None, task_handle=taskhandle.NullTaskHandle()):
| if (unsure in (True, False)):
warnings.warn('unsure parameter should be a function that returns True or False', DeprecationWarning, stacklevel=2)
def unsure_func(value=unsure):
return value
unsure = unsure_func
if (in_file is not None):
warnings.warn('`in_file` argument has been deprecated; use `resources` instead. ', DeprecationWarning, stacklevel=2)
if in_file:
resources = [self.resource]
if _is_local(self.old_pyname):
resources = [self.resource]
if (resources is None):
resources = self.project.get_python_files()
changes = ChangeSet(('Renaming <%s> to <%s>' % (self.old_name, new_name)))
finder = occurrences.create_finder(self.project, self.old_name, self.old_pyname, unsure=unsure, docs=docs, instance=self.old_instance, in_hierarchy=(in_hierarchy and self.is_method()))
job_set = task_handle.create_jobset('Collecting Changes', len(resources))
for file_ in resources:
job_set.started_job(file_.path)
new_content = rename_in_module(finder, new_name, resource=file_)
if (new_content is not None):
changes.add_change(ChangeContents(file_, new_content))
job_set.finished_job()
if self._is_renaming_a_module():
resource = self.old_pyname.get_object().get_resource()
if self._is_allowed_to_move(resources, resource):
self._rename_module(resource, new_name, changes)
return changes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.