code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def test_update_waste_free_multivariate_particles(self): """ Given resampled multivariate particles, when updating with waste free, they are joined by the result of iterating the MCMC chain to get a bigger set of particles. """ resampled_particles = np.ones((50, 3)) n_particles = 100 def normal_logdensity(x): return jnp.log( jax.scipy.stats.multivariate_normal.pdf( x, mean=np.zeros(3), cov=np.diag(np.ones(3)) ) ) def rmh_proposal_distribution(rng_key, position): return position + jax.random.normal(rng_key, (3,)) * 25.0 kernel = functools.partial( blackjax.rmh.build_kernel(), transition_generator=rmh_proposal_distribution ) init = blackjax.rmh.init update, _ = waste_free_smc(n_particles, 2)( init, normal_logdensity, kernel, n_particles ) updated_particles, infos = self.variant(update)( jax.random.split(jax.random.PRNGKey(10), 50), resampled_particles, {} ) assert updated_particles.shape == (n_particles, 3)
Given resampled multivariate particles, when updating with waste free, they are joined by the result of iterating the MCMC chain to get a bigger set of particles.
test_update_waste_free_multivariate_particles
python
blackjax-devs/blackjax
tests/smc/test_waste_free_smc.py
https://github.com/blackjax-devs/blackjax/blob/master/tests/smc/test_waste_free_smc.py
Apache-2.0
def detect_language(cls, code: str) -> str: """Scan the Mnemonic until the language becomes unambiguous, including as abbreviation prefixes. Unfortunately, there are valid words that are ambiguous between languages, which are complete words in one language and are prefixes in another: english: abandon ... about french: abandon ... aboutir If prefixes remain ambiguous, require exactly one language where word(s) match exactly. """ code = cls.normalize_string(code) possible = set(cls(lang) for lang in cls.list_languages()) words = set(code.split()) for word in words: # possible languages have candidate(s) starting with the word/prefix possible = set( p for p in possible if any(c.startswith(word) for c in p.wordlist) ) if not possible: raise ConfigurationError(f"Language unrecognized for {word!r}") if len(possible) == 1: return possible.pop().language # Multiple languages match: A prefix in many, but an exact match in one determines language. complete = set() for word in words: exact = set(p for p in possible if word in p.wordlist) if len(exact) == 1: complete.update(exact) if len(complete) == 1: return complete.pop().language raise ConfigurationError( f"Language ambiguous between {', '.join(p.language for p in possible)}" )
Scan the Mnemonic until the language becomes unambiguous, including as abbreviation prefixes. Unfortunately, there are valid words that are ambiguous between languages, which are complete words in one language and are prefixes in another: english: abandon ... about french: abandon ... aboutir If prefixes remain ambiguous, require exactly one language where word(s) match exactly.
detect_language
python
trezor/python-mnemonic
src/mnemonic/mnemonic.py
https://github.com/trezor/python-mnemonic/blob/master/src/mnemonic/mnemonic.py
MIT
def generate(self, strength: int = 128) -> str: """ Create a new mnemonic using a random generated number as entropy. As defined in BIP39, the entropy must be a multiple of 32 bits, and its size must be between 128 and 256 bits. Therefore the possible values for `strength` are 128, 160, 192, 224 and 256. If not provided, the default entropy length will be set to 128 bits. The return is a list of words that encodes the generated entropy. :param strength: Number of bytes used as entropy :type strength: int :return: A randomly generated mnemonic :rtype: str """ if strength not in [128, 160, 192, 224, 256]: raise ValueError( "Invalid strength value. Allowed values are [128, 160, 192, 224, 256]." ) return self.to_mnemonic(secrets.token_bytes(strength // 8))
Create a new mnemonic using a random generated number as entropy. As defined in BIP39, the entropy must be a multiple of 32 bits, and its size must be between 128 and 256 bits. Therefore the possible values for `strength` are 128, 160, 192, 224 and 256. If not provided, the default entropy length will be set to 128 bits. The return is a list of words that encodes the generated entropy. :param strength: Number of bytes used as entropy :type strength: int :return: A randomly generated mnemonic :rtype: str
generate
python
trezor/python-mnemonic
src/mnemonic/mnemonic.py
https://github.com/trezor/python-mnemonic/blob/master/src/mnemonic/mnemonic.py
MIT
def open_with_encoding(filename, mode='r', encoding=None, limit_byte_check=-1): """Return opened file with a specific encoding.""" if not encoding: encoding = detect_encoding(filename, limit_byte_check=limit_byte_check) return io.open(filename, mode=mode, encoding=encoding, newline='') # Preserve line endings
Return opened file with a specific encoding.
open_with_encoding
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def extended_blank_lines(logical_line, blank_lines, blank_before, indent_level, previous_logical): """Check for missing blank lines after class declaration.""" if previous_logical.startswith(('def ', 'async def ')): if blank_lines and pycodestyle.DOCSTRING_REGEX.match(logical_line): yield (0, 'E303 too many blank lines ({})'.format(blank_lines)) elif pycodestyle.DOCSTRING_REGEX.match(previous_logical): # Missing blank line between class docstring and method declaration. if ( indent_level and not blank_lines and not blank_before and logical_line.startswith(('def ', 'async def ')) and '(self' in logical_line ): yield (0, 'E301 expected 1 blank line, found 0')
Check for missing blank lines after class declaration.
extended_blank_lines
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def continued_indentation(logical_line, tokens, indent_level, hang_closing, indent_char, noqa): """Override pycodestyle's function to provide indentation information.""" first_row = tokens[0][2][0] nrows = 1 + tokens[-1][2][0] - first_row if noqa or nrows == 1: return # indent_next tells us whether the next block is indented. Assuming # that it is indented by 4 spaces, then we should not allow 4-space # indents on the final continuation line. In turn, some other # indents are allowed to have an extra 4 spaces. indent_next = logical_line.endswith(':') row = depth = 0 valid_hangs = ( (DEFAULT_INDENT_SIZE,) if indent_char != '\t' else (DEFAULT_INDENT_SIZE, 2 * DEFAULT_INDENT_SIZE) ) # Remember how many brackets were opened on each line. parens = [0] * nrows # Relative indents of physical lines. rel_indent = [0] * nrows # For each depth, collect a list of opening rows. open_rows = [[0]] # For each depth, memorize the hanging indentation. hangs = [None] # Visual indents. indent_chances = {} last_indent = tokens[0][2] indent = [last_indent[1]] last_token_multiline = None line = None last_line = '' last_line_begins_with_multiline = False for token_type, text, start, end, line in tokens: newline = row < start[0] - first_row if newline: row = start[0] - first_row newline = (not last_token_multiline and token_type not in (tokenize.NL, tokenize.NEWLINE)) last_line_begins_with_multiline = last_token_multiline if newline: # This is the beginning of a continuation line. last_indent = start # Record the initial indent. rel_indent[row] = pycodestyle.expand_indent(line) - indent_level # Identify closing bracket. close_bracket = (token_type == tokenize.OP and text in ']})') # Is the indent relative to an opening bracket line? for open_row in reversed(open_rows[depth]): hang = rel_indent[row] - rel_indent[open_row] hanging_indent = hang in valid_hangs if hanging_indent: break if hangs[depth]: hanging_indent = (hang == hangs[depth]) visual_indent = (not close_bracket and hang > 0 and indent_chances.get(start[1])) if close_bracket and indent[depth]: # Closing bracket for visual indent. if start[1] != indent[depth]: yield (start, 'E124 {}'.format(indent[depth])) elif close_bracket and not hang: # closing bracket matches indentation of opening bracket's line if hang_closing: yield (start, 'E133 {}'.format(indent[depth])) elif indent[depth] and start[1] < indent[depth]: if visual_indent is not True: # Visual indent is broken. yield (start, 'E128 {}'.format(indent[depth])) elif (hanging_indent or (indent_next and rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)): # Hanging indent is verified. if close_bracket and not hang_closing: yield (start, 'E123 {}'.format(indent_level + rel_indent[open_row])) hangs[depth] = hang elif visual_indent is True: # Visual indent is verified. indent[depth] = start[1] elif visual_indent in (text, str): # Ignore token lined up with matching one from a previous line. pass else: one_indented = (indent_level + rel_indent[open_row] + DEFAULT_INDENT_SIZE) # Indent is broken. if hang <= 0: error = ('E122', one_indented) elif indent[depth]: error = ('E127', indent[depth]) elif not close_bracket and hangs[depth]: error = ('E131', one_indented) elif hang > DEFAULT_INDENT_SIZE: error = ('E126', one_indented) else: hangs[depth] = hang error = ('E121', one_indented) yield (start, '{} {}'.format(*error)) # Look for visual indenting. if ( parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) and not indent[depth] ): indent[depth] = start[1] indent_chances[start[1]] = True # Deal with implicit string concatenation. elif (token_type in (tokenize.STRING, tokenize.COMMENT) or text in ('u', 'ur', 'b', 'br')): indent_chances[start[1]] = str # Special case for the "if" statement because len("if (") is equal to # 4. elif not indent_chances and not row and not depth and text == 'if': indent_chances[end[1] + 1] = True elif text == ':' and line[end[1]:].isspace(): open_rows[depth].append(row) # Keep track of bracket depth. if token_type == tokenize.OP: if text in '([{': depth += 1 indent.append(0) hangs.append(None) if len(open_rows) == depth: open_rows.append([]) open_rows[depth].append(row) parens[row] += 1 elif text in ')]}' and depth > 0: # Parent indents should not be more than this one. prev_indent = indent.pop() or last_indent[1] hangs.pop() for d in range(depth): if indent[d] > prev_indent: indent[d] = 0 for ind in list(indent_chances): if ind >= prev_indent: del indent_chances[ind] del open_rows[depth + 1:] depth -= 1 if depth: indent_chances[indent[depth]] = True for idx in range(row, -1, -1): if parens[idx]: parens[idx] -= 1 break assert len(indent) == depth + 1 if ( start[1] not in indent_chances and # This is for purposes of speeding up E121 (GitHub #90). not last_line.rstrip().endswith(',') ): # Allow to line up tokens. indent_chances[start[1]] = text last_token_multiline = (start[0] != end[0]) if last_token_multiline: rel_indent[end[0] - first_row] = rel_indent[row] last_line = line if ( indent_next and not last_line_begins_with_multiline and pycodestyle.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE ): pos = (start[0], indent[0] + 4) desired_indent = indent_level + 2 * DEFAULT_INDENT_SIZE if visual_indent: yield (pos, 'E129 {}'.format(desired_indent)) else: yield (pos, 'E125 {}'.format(desired_indent))
Override pycodestyle's function to provide indentation information.
continued_indentation
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _check_affected_anothers(self, result) -> bool: """Check if the fix affects the number of lines of another remark.""" line_index = result['line'] - 1 target = self.source[line_index] original_target = self.original_source[line_index] return target != original_target
Check if the fix affects the number of lines of another remark.
_check_affected_anothers
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix(self): """Return a version of the source code with PEP 8 violations fixed.""" pep8_options = { 'ignore': self.options.ignore, 'select': self.options.select, 'max_line_length': self.options.max_line_length, 'hang_closing': self.options.hang_closing, } results = _execute_pep8(pep8_options, self.source) if self.options.verbose: progress = {} for r in results: if r['id'] not in progress: progress[r['id']] = set() progress[r['id']].add(r['line']) print('---> {n} issue(s) to fix {progress}'.format( n=len(results), progress=progress), file=sys.stderr) if self.options.line_range: start, end = self.options.line_range results = [r for r in results if start <= r['line'] <= end] self._fix_source(filter_results(source=''.join(self.source), results=results, aggressive=self.options.aggressive)) if self.options.line_range: # If number of lines has changed then change line_range. count = sum(sline.count('\n') for sline in self.source[start - 1:end]) self.options.line_range[1] = start + count - 1 return ''.join(self.source)
Return a version of the source code with PEP 8 violations fixed.
fix
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _fix_reindent(self, result): """Fix a badly indented line. This is done by adding or removing from its initial indent only. """ num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
Fix a badly indented line. This is done by adding or removing from its initial indent only.
_fix_reindent
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e125(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) indent = len(_get_indentation(target)) modified_lines = [] while len(_get_indentation(self.source[line_index])) >= indent: self.source[line_index] = (' ' * spaces_to_add + self.source[line_index]) modified_lines.append(1 + line_index) # Line indexed at 1. line_index -= 1 return modified_lines
Fix indentation undistinguish from the next logical line.
fix_e125
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e131(self, result): """Fix indentation undistinguish from the next logical line.""" num_indent_spaces = int(result['info'].split()[1]) line_index = result['line'] - 1 target = self.source[line_index] spaces_to_add = num_indent_spaces - len(_get_indentation(target)) indent_length = len(_get_indentation(target)) spaces_to_add = num_indent_spaces - indent_length if num_indent_spaces == 0 and indent_length == 0: spaces_to_add = 4 if spaces_to_add >= 0: self.source[line_index] = (' ' * spaces_to_add + self.source[line_index]) else: offset = abs(spaces_to_add) self.source[line_index] = self.source[line_index][offset:]
Fix indentation undistinguish from the next logical line.
fix_e131
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e262(self, result): """Fix spacing after inline comment hash.""" target = self.source[result['line'] - 1] offset = result['column'] code = target[:offset].rstrip(' \t#') comment = target[offset:].lstrip(' \t#') fixed = code + (' # ' + comment if comment.strip() else '\n') self.source[result['line'] - 1] = fixed
Fix spacing after inline comment hash.
fix_e262
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e265(self, result): """Fix spacing after block comment hash.""" target = self.source[result['line'] - 1] indent = _get_indentation(target) line = target.lstrip(' \t') pos = next((index for index, c in enumerate(line) if c != '#')) hashes = line[:pos] comment = line[pos:].lstrip(' \t') # Ignore special comments, even in the middle of the file. if comment.startswith('!'): return fixed = indent + hashes + (' ' + comment if comment.strip() else '\n') self.source[result['line'] - 1] = fixed
Fix spacing after block comment hash.
fix_e265
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e266(self, result): """Fix too many block comment hashes.""" target = self.source[result['line'] - 1] # Leave stylistic outlined blocks alone. if target.strip().endswith('#'): return indentation = _get_indentation(target) fixed = indentation + '# ' + target.lstrip('# \t') self.source[result['line'] - 1] = fixed
Fix too many block comment hashes.
fix_e266
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e304(self, result): """Remove blank line following function decorator.""" line = result['line'] - 2 if not self.source[line].strip(): self.source[line] = ''
Remove blank line following function decorator.
fix_e304
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e305(self, result): """Add missing 2 blank lines after end of function or class.""" add_delete_linenum = 2 - int(result['info'].split()[-1]) cnt = 0 offset = result['line'] - 2 modified_lines = [] if add_delete_linenum < 0: # delete cr add_delete_linenum = abs(add_delete_linenum) while cnt < add_delete_linenum and offset >= 0: if not self.source[offset].strip(): self.source[offset] = '' modified_lines.append(1 + offset) # Line indexed at 1 cnt += 1 offset -= 1 else: # add cr cr = '\n' # check comment line while True: if offset < 0: break line = self.source[offset].lstrip() if not line: break if line[0] != '#': break offset -= 1 offset += 1 self.source[offset] = cr + self.source[offset] modified_lines.append(1 + offset) # Line indexed at 1. return modified_lines
Add missing 2 blank lines after end of function or class.
fix_e305
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_long_line_logically(self, result, logical): """Try to make lines fit within --max-line-length characters.""" if ( not logical or len(logical[2]) == 1 or self.source[result['line'] - 1].lstrip().startswith('#') ): return self.fix_long_line_physically(result) start_line_index = logical[0][0] end_line_index = logical[1][0] logical_lines = logical[2] previous_line = get_item(self.source, start_line_index - 1, default='') next_line = get_item(self.source, end_line_index + 1, default='') single_line = join_logical_line(''.join(logical_lines)) try: fixed = self.fix_long_line( target=single_line, previous_line=previous_line, next_line=next_line, original=''.join(logical_lines)) except (SyntaxError, tokenize.TokenError): return self.fix_long_line_physically(result) if fixed: for line_index in range(start_line_index, end_line_index + 1): self.source[line_index] = '' self.source[start_line_index] = fixed return range(start_line_index + 1, end_line_index + 1) return []
Try to make lines fit within --max-line-length characters.
fix_long_line_logically
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_long_line_physically(self, result): """Try to make lines fit within --max-line-length characters.""" line_index = result['line'] - 1 target = self.source[line_index] previous_line = get_item(self.source, line_index - 1, default='') next_line = get_item(self.source, line_index + 1, default='') try: fixed = self.fix_long_line( target=target, previous_line=previous_line, next_line=next_line, original=target) except (SyntaxError, tokenize.TokenError): return [] if fixed: self.source[line_index] = fixed return [line_index + 1] return []
Try to make lines fit within --max-line-length characters.
fix_long_line_physically
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e701(self, result): """Put colon-separated compound statement on separate lines.""" line_index = result['line'] - 1 target = self.source[line_index] c = result['column'] fixed_source = (target[:c] + '\n' + _get_indentation(target) + self.indent_word + target[c:].lstrip('\n\r \t\\')) self.source[result['line'] - 1] = fixed_source return [result['line'], result['line'] + 1]
Put colon-separated compound statement on separate lines.
fix_e701
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e702(self, result, logical): """Put semicolon-separated compound statement on separate lines.""" if not logical: return [] # pragma: no cover logical_lines = logical[2] # Avoid applying this when indented. # https://docs.python.org/reference/compound_stmts.html for line in logical_lines: if ( result['id'] == 'E702' and ':' in line and pycodestyle.STARTSWITH_INDENT_STATEMENT_REGEX.match(line) ): if self.options.verbose: print( '---> avoid fixing {error} with ' 'other compound statements'.format(error=result['id']), file=sys.stderr ) return [] line_index = result['line'] - 1 target = self.source[line_index] if target.rstrip().endswith('\\'): # Normalize '1; \\\n2' into '1; 2'. self.source[line_index] = target.rstrip('\n \r\t\\') self.source[line_index + 1] = self.source[line_index + 1].lstrip() return [line_index + 1, line_index + 2] if target.rstrip().endswith(';'): self.source[line_index] = target.rstrip('\n \r\t;') + '\n' return [line_index + 1] offset = result['column'] - 1 first = target[:offset].rstrip(';').rstrip() second = (_get_indentation(logical_lines[0]) + target[offset:].lstrip(';').lstrip()) # Find inline comment. inline_comment = None if target[offset:].lstrip(';').lstrip()[:2] == '# ': inline_comment = target[offset:].lstrip(';') if inline_comment: self.source[line_index] = first + inline_comment else: self.source[line_index] = first + '\n' + second return [line_index + 1]
Put semicolon-separated compound statement on separate lines.
fix_e702
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e704(self, result): """Fix multiple statements on one line def""" (line_index, _, target) = get_index_offset_contents(result, self.source) match = STARTSWITH_DEF_REGEX.match(target) if match: self.source[line_index] = '{}\n{}{}'.format( match.group(0), _get_indentation(target) + self.indent_word, target[match.end(0):].lstrip())
Fix multiple statements on one line def
fix_e704
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e712(self, result): """Fix (trivial case of) comparison with boolean.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) # Handle very easy "not" special cases. if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target): self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:', r'if not \1:', target, count=1) elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target): self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:', r'if not \1:', target, count=1) else: right_offset = offset + 2 if right_offset >= len(target): return [] left = target[:offset].rstrip() center = target[offset:right_offset] right = target[right_offset:].lstrip() # Handle simple cases only. new_right = None if center.strip() == '==': if re.match(r'\bTrue\b', right): new_right = re.sub(r'\bTrue\b *', '', right, count=1) elif center.strip() == '!=': if re.match(r'\bFalse\b', right): new_right = re.sub(r'\bFalse\b *', '', right, count=1) if new_right is None: return [] if new_right[0].isalnum(): new_right = ' ' + new_right self.source[line_index] = left + new_right
Fix (trivial case of) comparison with boolean.
fix_e712
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e713(self, result): """Fix (trivial case of) non-membership check.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) # to convert once 'not in' -> 'in' before_target = target[:offset] target = target[offset:] match_notin = COMPARE_NEGATIVE_REGEX_THROUGH.search(target) notin_pos_start, notin_pos_end = 0, 0 if match_notin: notin_pos_start = match_notin.start(1) notin_pos_end = match_notin.end() target = '{}{} {}'.format( target[:notin_pos_start], 'in', target[notin_pos_end:]) # fix 'not in' match = COMPARE_NEGATIVE_REGEX.search(target) if match: if match.group(3) == 'in': pos_start = match.start(1) new_target = '{5}{0}{1} {2} {3} {4}'.format( target[:pos_start], match.group(2), match.group(1), match.group(3), target[match.end():], before_target) if match_notin: # revert 'in' -> 'not in' pos_start = notin_pos_start + offset pos_end = notin_pos_end + offset - 4 # len('not ') new_target = '{}{} {}'.format( new_target[:pos_start], 'not in', new_target[pos_end:]) self.source[line_index] = new_target
Fix (trivial case of) non-membership check.
fix_e713
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e714(self, result): """Fix object identity should be 'is not' case.""" (line_index, offset, target) = get_index_offset_contents(result, self.source) # to convert once 'is not' -> 'is' before_target = target[:offset] target = target[offset:] match_isnot = COMPARE_NEGATIVE_REGEX_THROUGH.search(target) isnot_pos_start, isnot_pos_end = 0, 0 if match_isnot: isnot_pos_start = match_isnot.start(1) isnot_pos_end = match_isnot.end() target = '{}{} {}'.format( target[:isnot_pos_start], 'in', target[isnot_pos_end:]) match = COMPARE_NEGATIVE_REGEX.search(target) if match: if match.group(3).startswith('is'): pos_start = match.start(1) new_target = '{5}{0}{1} {2} {3} {4}'.format( target[:pos_start], match.group(2), match.group(3), match.group(1), target[match.end():], before_target) if match_isnot: # revert 'is' -> 'is not' pos_start = isnot_pos_start + offset pos_end = isnot_pos_end + offset - 4 # len('not ') new_target = '{}{} {}'.format( new_target[:pos_start], 'is not', new_target[pos_end:]) self.source[line_index] = new_target
Fix object identity should be 'is not' case.
fix_e714
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_e731(self, result): """Fix do not assign a lambda expression check.""" (line_index, _, target) = get_index_offset_contents(result, self.source) match = LAMBDA_REGEX.search(target) if match: end = match.end() self.source[line_index] = '{}def {}({}): return {}'.format( target[:match.start(0)], match.group(1), match.group(2), target[end:].lstrip())
Fix do not assign a lambda expression check.
fix_e731
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def get_module_imports_on_top_of_file(source, import_line_index): """return import or from keyword position example: > 0: import sys 1: import os 2: 3: def function(): """ def is_string_literal(line): if line[0] in 'uUbB': line = line[1:] if line and line[0] in 'rR': line = line[1:] return line and (line[0] == '"' or line[0] == "'") def is_future_import(line): nodes = ast.parse(line) for n in nodes.body: if isinstance(n, ast.ImportFrom) and n.module == '__future__': return True return False def has_future_import(source): offset = 0 line = '' for _, next_line in source: for line_part in next_line.strip().splitlines(True): line = line + line_part try: return is_future_import(line), offset except SyntaxError: continue offset += 1 return False, offset allowed_try_keywords = ('try', 'except', 'else', 'finally') in_docstring = False docstring_kind = '"""' source_stream = iter(enumerate(source)) for cnt, line in source_stream: if not in_docstring: m = DOCSTRING_START_REGEX.match(line.lstrip()) if m is not None: in_docstring = True docstring_kind = m.group('kind') remain = line[m.end(): m.endpos].rstrip() if remain[-3:] == docstring_kind: # one line doc in_docstring = False continue if in_docstring: if line.rstrip()[-3:] == docstring_kind: in_docstring = False continue if not line.rstrip(): continue elif line.startswith('#'): continue if line.startswith('import '): if cnt == import_line_index: continue return cnt elif line.startswith('from '): if cnt == import_line_index: continue hit, offset = has_future_import( itertools.chain([(cnt, line)], source_stream) ) if hit: # move to the back return cnt + offset + 1 return cnt elif pycodestyle.DUNDER_REGEX.match(line): return cnt elif any(line.startswith(kw) for kw in allowed_try_keywords): continue elif is_string_literal(line): return cnt else: return cnt return 0
return import or from keyword position example: > 0: import sys 1: import os 2: 3: def function():
get_module_imports_on_top_of_file
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def get_fixed_long_line(target, previous_line, original, indent_word=' ', max_line_length=79, aggressive=0, experimental=False, verbose=False): """Break up long line and return result. Do this by generating multiple reformatted candidates and then ranking the candidates to heuristically select the best option. """ indent = _get_indentation(target) source = target[len(indent):] assert source.lstrip() == source assert not target.lstrip().startswith('#') # Check for partial multiline. tokens = list(generate_tokens(source)) candidates = shorten_line( tokens, source, indent, indent_word, max_line_length, aggressive=aggressive, experimental=experimental, previous_line=previous_line) # Also sort alphabetically as a tie breaker (for determinism). candidates = sorted( sorted(set(candidates).union([target, original])), key=lambda x: line_shortening_rank( x, indent_word, max_line_length, experimental=experimental)) if verbose >= 4: print(('-' * 79 + '\n').join([''] + candidates + ['']), file=wrap_output(sys.stderr, 'utf-8')) if candidates: best_candidate = candidates[0] # Don't allow things to get longer. if longest_line_length(best_candidate) > longest_line_length(original): return None return best_candidate
Break up long line and return result. Do this by generating multiple reformatted candidates and then ranking the candidates to heuristically select the best option.
get_fixed_long_line
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def join_logical_line(logical_line): """Return single line based on logical line input.""" indentation = _get_indentation(logical_line) return indentation + untokenize_without_newlines( generate_tokens(logical_line.lstrip())) + '\n'
Return single line based on logical line input.
join_logical_line
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def untokenize_without_newlines(tokens): """Return source code based on tokens.""" text = '' last_row = 0 last_column = -1 for t in tokens: token_string = t[1] (start_row, start_column) = t[2] (end_row, end_column) = t[3] if start_row > last_row: last_column = 0 if ( (start_column > last_column or token_string == '\n') and not text.endswith(' ') ): text += ' ' if token_string != '\n': text += token_string last_row = end_row last_column = end_column return text.rstrip()
Return source code based on tokens.
untokenize_without_newlines
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _get_logical(source_lines, result, logical_start, logical_end): """Return the logical line corresponding to the result. Assumes input is already E702-clean. """ row = result['line'] - 1 col = result['column'] - 1 ls = None le = None for i in range(0, len(logical_start), 1): assert logical_end x = logical_end[i] if x[0] > row or (x[0] == row and x[1] > col): le = x ls = logical_start[i] break if ls is None: return None original = source_lines[ls[0]:le[0] + 1] return ls, le, original
Return the logical line corresponding to the result. Assumes input is already E702-clean.
_get_logical
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def code_almost_equal(a, b): """Return True if code is similar. Ignore whitespace when comparing specific line. """ split_a = split_and_strip_non_empty_lines(a) split_b = split_and_strip_non_empty_lines(b) if len(split_a) != len(split_b): return False for (index, _) in enumerate(split_a): if ''.join(split_a[index].split()) != ''.join(split_b[index].split()): return False return True
Return True if code is similar. Ignore whitespace when comparing specific line.
code_almost_equal
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def find_newline(source): """Return type of newline used in source. Input is a list of lines. """ assert not isinstance(source, str) counter = collections.defaultdict(int) for line in source: if line.endswith(CRLF): counter[CRLF] += 1 elif line.endswith(CR): counter[CR] += 1 elif line.endswith(LF): counter[LF] += 1 return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
Return type of newline used in source. Input is a list of lines.
find_newline
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def get_diff_text(old, new, filename): """Return text of unified diff between old and new.""" newline = '\n' diff = difflib.unified_diff( old, new, 'original/' + filename, 'fixed/' + filename, lineterm=newline) text = '' for line in diff: text += line # Work around missing newline (http://bugs.python.org/issue2142). if text and not line.endswith(newline): text += newline + r'\ No newline at end of file' + newline return text
Return text of unified diff between old and new.
get_diff_text
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _priority_key(pep8_result): """Key for sorting PEP8 results. Global fixes should be done first. This is important for things like indentation. """ priority = [ # Fix multiline colon-based before semicolon based. 'e701', # Break multiline statements early. 'e702', # Things that make lines longer. 'e225', 'e231', # Remove extraneous whitespace before breaking lines. 'e201', # Shorten whitespace in comment before resorting to wrapping. 'e262' ] middle_index = 10000 lowest_priority = [ # We need to shorten lines last since the logical fixer can get in a # loop, which causes us to exit early. 'e501', ] key = pep8_result['id'].lower() try: return priority.index(key) except ValueError: try: return middle_index + lowest_priority.index(key) + 1 except ValueError: return middle_index
Key for sorting PEP8 results. Global fixes should be done first. This is important for things like indentation.
_priority_key
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def shorten_line(tokens, source, indentation, indent_word, max_line_length, aggressive=0, experimental=False, previous_line=''): """Separate line at OPERATOR. Multiple candidates will be yielded. """ for candidate in _shorten_line(tokens=tokens, source=source, indentation=indentation, indent_word=indent_word, aggressive=aggressive, previous_line=previous_line): yield candidate if aggressive: for key_token_strings in SHORTEN_OPERATOR_GROUPS: shortened = _shorten_line_at_tokens( tokens=tokens, source=source, indentation=indentation, indent_word=indent_word, key_token_strings=key_token_strings, aggressive=aggressive) if shortened is not None and shortened != source: yield shortened if experimental: for shortened in _shorten_line_at_tokens_new( tokens=tokens, source=source, indentation=indentation, max_line_length=max_line_length): yield shortened
Separate line at OPERATOR. Multiple candidates will be yielded.
shorten_line
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _shorten_line(tokens, source, indentation, indent_word, aggressive=0, previous_line=''): """Separate line at OPERATOR. The input is expected to be free of newlines except for inside multiline strings and at the end. Multiple candidates will be yielded. """ in_string = False for (token_type, token_string, start_offset, end_offset) in token_offsets(tokens): if IS_SUPPORT_TOKEN_FSTRING: if token_type == tokenize.FSTRING_START: in_string = True elif token_type == tokenize.FSTRING_END: in_string = False if in_string: continue if ( token_type == tokenize.COMMENT and not is_probably_part_of_multiline(previous_line) and not is_probably_part_of_multiline(source) and not source[start_offset + 1:].strip().lower().startswith( ('noqa', 'pragma:', 'pylint:')) ): # Move inline comments to previous line. first = source[:start_offset] second = source[start_offset:] yield (indentation + second.strip() + '\n' + indentation + first.strip() + '\n') elif token_type == token.OP and token_string != '=': # Don't break on '=' after keyword as this violates PEP 8. assert token_type != token.INDENT first = source[:end_offset] second_indent = indentation if (first.rstrip().endswith('(') and source[end_offset:].lstrip().startswith(')')): pass elif first.rstrip().endswith('('): second_indent += indent_word elif '(' in first: second_indent += ' ' * (1 + first.find('(')) else: second_indent += indent_word second = (second_indent + source[end_offset:].lstrip()) if ( not second.strip() or second.lstrip().startswith('#') ): continue # Do not begin a line with a comma if second.lstrip().startswith(','): continue # Do end a line with a dot if first.rstrip().endswith('.'): continue if token_string in '+-*/': fixed = first + ' \\' + '\n' + second else: fixed = first + '\n' + second # Only fix if syntax is okay. if check_syntax(normalize_multiline(fixed) if aggressive else fixed): yield indentation + fixed
Separate line at OPERATOR. The input is expected to be free of newlines except for inside multiline strings and at the end. Multiple candidates will be yielded.
_shorten_line
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def current_size(self): """The size of the current line minus the indentation.""" size = 0 for item in reversed(self._lines): size += item.size if isinstance(item, self._LineBreak): break return size
The size of the current line minus the indentation.
current_size
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _add_item(self, item, indent_amt): """Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not. """ if item.is_fstring_start: self._in_fstring = True elif self._prev_item and self._prev_item.is_fstring_end: self._in_fstring = False if self._prev_item and self._prev_item.is_string and item.is_string: # Place consecutive string literals on separate lines. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) item_text = str(item) if self._lines and self._bracket_depth: # Adding the item into a container. self._prevent_default_initializer_splitting(item, indent_amt) if item_text in '.,)]}': self._split_after_delimiter(item, indent_amt) elif self._lines and not self.line_empty(): # Adding the item outside of a container. if self.fits_on_current_line(len(item_text)): self._enforce_space(item) else: # Line break for the new item. self._lines.append(self._LineBreak()) self._lines.append(self._Indent(indent_amt)) self._lines.append(item) self._prev_item, self._prev_prev_item = item, self._prev_item if item_text in '([{' and not self._in_fstring: self._bracket_depth += 1 elif item_text in '}])' and not self._in_fstring: self._bracket_depth -= 1 assert self._bracket_depth >= 0
Add an item to the line. Reflow the line to get the best formatting after the item is inserted. The bracket depth indicates if the item is being inserted inside of a container or not.
_add_item
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _prevent_default_initializer_splitting(self, item, indent_amt): """Prevent splitting between a default initializer. When there is a default initializer, it's best to keep it all on the same line. It's nicer and more readable, even if it goes over the maximum allowable line length. This goes back along the current line to determine if we have a default initializer, and, if so, to remove extraneous whitespaces and add a line break/indent before it if needed. """ if str(item) == '=': # This is the assignment in the initializer. Just remove spaces for # now. self._delete_whitespace() return if (not self._prev_item or not self._prev_prev_item or str(self._prev_item) != '='): return self._delete_whitespace() prev_prev_index = self._lines.index(self._prev_prev_item) if ( isinstance(self._lines[prev_prev_index - 1], self._Indent) or self.fits_on_current_line(item.size + 1) ): # The default initializer is already the only item on this line. # Don't insert a newline here. return # Replace the space with a newline/indent combo. if isinstance(self._lines[prev_prev_index - 1], self._Space): del self._lines[prev_prev_index - 1] self.add_line_break_at(self._lines.index(self._prev_prev_item), indent_amt)
Prevent splitting between a default initializer. When there is a default initializer, it's best to keep it all on the same line. It's nicer and more readable, even if it goes over the maximum allowable line length. This goes back along the current line to determine if we have a default initializer, and, if so, to remove extraneous whitespaces and add a line break/indent before it if needed.
_prevent_default_initializer_splitting
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _enforce_space(self, item): """Enforce a space in certain situations. There are cases where we will want a space where normally we wouldn't put one. This just enforces the addition of a space. """ if isinstance(self._lines[-1], (self._Space, self._LineBreak, self._Indent)): return if not self._prev_item: return item_text = str(item) prev_text = str(self._prev_item) # Prefer a space around a '.' in an import statement, and between the # 'import' and '('. if ( (item_text == '.' and prev_text == 'from') or (item_text == 'import' and prev_text == '.') or (item_text == '(' and prev_text == 'import') ): self._lines.append(self._Space())
Enforce a space in certain situations. There are cases where we will want a space where normally we wouldn't put one. This just enforces the addition of a space.
_enforce_space
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _delete_whitespace(self): """Delete all whitespace from the end of the line.""" while isinstance(self._lines[-1], (self._Space, self._LineBreak, self._Indent)): del self._lines[-1]
Delete all whitespace from the end of the line.
_delete_whitespace
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _get_extent(self, index): """The extent of the full element. E.g., the length of a function call or keyword. """ extent = 0 prev_item = get_item(self._items, index - 1) seen_dot = prev_item and str(prev_item) == '.' while index < len(self._items): item = get_item(self._items, index) index += 1 if isinstance(item, (ListComprehension, IfExpression)): break if isinstance(item, Container): if prev_item and prev_item.is_name: if seen_dot: extent += 1 else: extent += item.size prev_item = item continue elif (str(item) not in ['.', '=', ':', 'not'] and not item.is_name and not item.is_string): break if str(item) == '.': seen_dot = True extent += item.size prev_item = item return extent
The extent of the full element. E.g., the length of a function call or keyword.
_get_extent
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _parse_container(tokens, index, for_or_if=None): """Parse a high-level container, such as a list, tuple, etc.""" # Store the opening bracket. items = [Atom(Token(*tokens[index]))] index += 1 num_tokens = len(tokens) while index < num_tokens: tok = Token(*tokens[index]) if tok.token_string in ',)]}': # First check if we're at the end of a list comprehension or # if-expression. Don't add the ending token as part of the list # comprehension or if-expression, because they aren't part of those # constructs. if for_or_if == 'for': return (ListComprehension(items), index - 1) elif for_or_if == 'if': return (IfExpression(items), index - 1) # We've reached the end of a container. items.append(Atom(tok)) # If not, then we are at the end of a container. if tok.token_string == ')': # The end of a tuple. return (Tuple(items), index) elif tok.token_string == ']': # The end of a list. return (List(items), index) elif tok.token_string == '}': # The end of a dictionary or set. return (DictOrSet(items), index) elif tok.token_string in '([{': # A sub-container is being defined. (container, index) = _parse_container(tokens, index) items.append(container) elif tok.token_string == 'for': (container, index) = _parse_container(tokens, index, 'for') items.append(container) elif tok.token_string == 'if': (container, index) = _parse_container(tokens, index, 'if') items.append(container) else: items.append(Atom(tok)) index += 1 return (None, None)
Parse a high-level container, such as a list, tuple, etc.
_parse_container
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _parse_tokens(tokens): """Parse the tokens. This converts the tokens into a form where we can manipulate them more easily. """ index = 0 parsed_tokens = [] num_tokens = len(tokens) while index < num_tokens: tok = Token(*tokens[index]) assert tok.token_type != token.INDENT if tok.token_type == tokenize.NEWLINE: # There's only one newline and it's at the end. break if tok.token_string in '([{': (container, index) = _parse_container(tokens, index) if not container: return None parsed_tokens.append(container) else: parsed_tokens.append(Atom(tok)) index += 1 return parsed_tokens
Parse the tokens. This converts the tokens into a form where we can manipulate them more easily.
_parse_tokens
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line): """Reflow the lines so that it looks nice.""" if str(parsed_tokens[0]) == 'def': # A function definition gets indented a bit more. continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE else: continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE break_after_open_bracket = not start_on_prefix_line lines = ReformattedLines(max_line_length) lines.add_indent(len(indentation.lstrip('\r\n'))) if not start_on_prefix_line: # If splitting after the opening bracket will cause the first element # to be aligned weirdly, don't try it. first_token = get_item(parsed_tokens, 0) second_token = get_item(parsed_tokens, 1) if ( first_token and second_token and str(second_token)[0] == '(' and len(indentation) + len(first_token) + 1 == len(continued_indent) ): return None for item in parsed_tokens: lines.add_space_if_needed(str(item), equal=True) save_continued_indent = continued_indent if start_on_prefix_line and isinstance(item, Container): start_on_prefix_line = False continued_indent = ' ' * (lines.current_size() + 1) item.reflow(lines, continued_indent, break_after_open_bracket) continued_indent = save_continued_indent return lines.emit()
Reflow the lines so that it looks nice.
_reflow_lines
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _shorten_line_at_tokens_new(tokens, source, indentation, max_line_length): """Shorten the line taking its length into account. The input is expected to be free of newlines except for inside multiline strings and at the end. """ # Yield the original source so to see if it's a better choice than the # shortened candidate lines we generate here. yield indentation + source parsed_tokens = _parse_tokens(tokens) if parsed_tokens: # Perform two reflows. The first one starts on the same line as the # prefix. The second starts on the line after the prefix. fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line=True) if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, start_on_prefix_line=False) if fixed and check_syntax(normalize_multiline(fixed.lstrip())): yield fixed
Shorten the line taking its length into account. The input is expected to be free of newlines except for inside multiline strings and at the end.
_shorten_line_at_tokens_new
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _shorten_line_at_tokens(tokens, source, indentation, indent_word, key_token_strings, aggressive): """Separate line by breaking at tokens in key_token_strings. The input is expected to be free of newlines except for inside multiline strings and at the end. """ offsets = [] for (index, _t) in enumerate(token_offsets(tokens)): (token_type, token_string, start_offset, end_offset) = _t assert token_type != token.INDENT if token_string in key_token_strings: # Do not break in containers with zero or one items. unwanted_next_token = { '(': ')', '[': ']', '{': '}'}.get(token_string) if unwanted_next_token: if ( get_item(tokens, index + 1, default=[None, None])[1] == unwanted_next_token or get_item(tokens, index + 2, default=[None, None])[1] == unwanted_next_token ): continue if ( index > 2 and token_string == '(' and tokens[index - 1][1] in ',(%[' ): # Don't split after a tuple start, or before a tuple start if # the tuple is in a list. continue if end_offset < len(source) - 1: # Don't split right before newline. offsets.append(end_offset) else: # Break at adjacent strings. These were probably meant to be on # separate lines in the first place. previous_token = get_item(tokens, index - 1) if ( token_type == tokenize.STRING and previous_token and previous_token[0] == tokenize.STRING ): offsets.append(start_offset) current_indent = None fixed = None for line in split_at_offsets(source, offsets): if fixed: fixed += '\n' + current_indent + line for symbol in '([{': if line.endswith(symbol): current_indent += indent_word else: # First line. fixed = line assert not current_indent current_indent = indent_word assert fixed is not None if check_syntax(normalize_multiline(fixed) if aggressive > 1 else fixed): return indentation + fixed return None
Separate line by breaking at tokens in key_token_strings. The input is expected to be free of newlines except for inside multiline strings and at the end.
_shorten_line_at_tokens
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def normalize_multiline(line): """Normalize multiline-related code that will cause syntax error. This is for purposes of checking syntax. """ if line.startswith(('def ', 'async def ')) and line.rstrip().endswith(':'): return line + ' pass' elif line.startswith('return '): return 'def _(): ' + line elif line.startswith('@'): return line + 'def _(): pass' elif line.startswith('class '): return line + ' pass' elif line.startswith(('if ', 'elif ', 'for ', 'while ')): return line + ' pass' return line
Normalize multiline-related code that will cause syntax error. This is for purposes of checking syntax.
normalize_multiline
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_whitespace(line, offset, replacement): """Replace whitespace at offset and return fixed line.""" # Replace escaped newlines too left = line[:offset].rstrip('\n\r \t\\') right = line[offset:].lstrip('\n\r \t\\') if right.startswith('#'): return line return left + replacement + right
Replace whitespace at offset and return fixed line.
fix_whitespace
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _execute_pep8(pep8_options, source): """Execute pycodestyle via python method calls.""" class QuietReport(pycodestyle.BaseReport): """Version of checker that does not print.""" def __init__(self, options): super(QuietReport, self).__init__(options) self.__full_error_results = [] def error(self, line_number, offset, text, check): """Collect errors.""" code = super(QuietReport, self).error(line_number, offset, text, check) if code: self.__full_error_results.append( {'id': code, 'line': line_number, 'column': offset + 1, 'info': text}) def full_error_results(self): """Return error results in detail. Results are in the form of a list of dictionaries. Each dictionary contains 'id', 'line', 'column', and 'info'. """ return self.__full_error_results checker = pycodestyle.Checker('', lines=source, reporter=QuietReport, **pep8_options) checker.check_all() return checker.report.full_error_results()
Execute pycodestyle via python method calls.
_execute_pep8
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def run(self, indent_size=DEFAULT_INDENT_SIZE): """Fix indentation and return modified line numbers. Line numbers are indexed at 1. """ if indent_size < 1: return self.input_text try: stats = _reindent_stats(tokenize.generate_tokens(self.getline)) except (SyntaxError, tokenize.TokenError): return self.input_text # Remove trailing empty lines. lines = self.lines # Sentinel. stats.append((len(lines), 0)) # Map count of leading spaces to # we want. have2want = {} # Program after transformation. after = [] # Copy over initial empty lines -- there's nothing to do until # we see a line with *something* on it. i = stats[0][0] after.extend(lines[1:i]) for i in range(len(stats) - 1): thisstmt, thislevel = stats[i] nextstmt = stats[i + 1][0] have = _leading_space_count(lines[thisstmt]) want = thislevel * indent_size if want < 0: # A comment line. if have: # An indented comment line. If we saw the same # indentation before, reuse what it most recently # mapped to. want = have2want.get(have, -1) if want < 0: # Then it probably belongs to the next real stmt. for j in range(i + 1, len(stats) - 1): jline, jlevel = stats[j] if jlevel >= 0: if have == _leading_space_count(lines[jline]): want = jlevel * indent_size break # Maybe it's a hanging comment like this one, if want < 0: # in which case we should shift it like its base # line got shifted. for j in range(i - 1, -1, -1): jline, jlevel = stats[j] if jlevel >= 0: want = (have + _leading_space_count( after[jline - 1]) - _leading_space_count(lines[jline])) break if want < 0: # Still no luck -- leave it alone. want = have else: want = 0 assert want >= 0 have2want[have] = want diff = want - have if diff == 0 or have == 0: after.extend(lines[thisstmt:nextstmt]) else: for line_number, line in enumerate(lines[thisstmt:nextstmt], start=thisstmt): if line_number in self.string_content_line_numbers: after.append(line) elif diff > 0: if line == '\n': after.append(line) else: after.append(' ' * diff + line) else: remove = min(_leading_space_count(line), -diff) after.append(line[remove:]) return ''.join(after)
Fix indentation and return modified line numbers. Line numbers are indexed at 1.
run
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _reindent_stats(tokens): """Return list of (lineno, indentlevel) pairs. One for each stmt and comment line. indentlevel is -1 for comment lines, as a signal that tokenize doesn't know what to do about them; indeed, they're our headache! """ find_stmt = 1 # Next token begins a fresh stmt? level = 0 # Current indent level. stats = [] for t in tokens: token_type = t[0] sline = t[2][0] line = t[4] if token_type == tokenize.NEWLINE: # A program statement, or ENDMARKER, will eventually follow, # after some (possibly empty) run of tokens of the form # (NL | COMMENT)* (INDENT | DEDENT+)? find_stmt = 1 elif token_type == tokenize.INDENT: find_stmt = 1 level += 1 elif token_type == tokenize.DEDENT: find_stmt = 1 level -= 1 elif token_type == tokenize.COMMENT: if find_stmt: stats.append((sline, -1)) # But we're still looking for a new stmt, so leave # find_stmt alone. elif token_type == tokenize.NL: pass elif find_stmt: # This is the first "real token" following a NEWLINE, so it # must be the first token of the next program statement, or an # ENDMARKER. find_stmt = 0 if line: # Not endmarker. stats.append((sline, level)) return stats
Return list of (lineno, indentlevel) pairs. One for each stmt and comment line. indentlevel is -1 for comment lines, as a signal that tokenize doesn't know what to do about them; indeed, they're our headache!
_reindent_stats
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _leading_space_count(line): """Return number of leading spaces in line.""" i = 0 while i < len(line) and line[i] == ' ': i += 1 return i
Return number of leading spaces in line.
_leading_space_count
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def check_syntax(code): """Return True if syntax is okay.""" try: return compile(code, '<string>', 'exec', dont_inherit=True) except (SyntaxError, TypeError, ValueError): return False
Return True if syntax is okay.
check_syntax
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def find_with_line_numbers(pattern, contents): """A wrapper around 're.finditer' to find line numbers. Returns a list of line numbers where pattern was found in contents. """ matches = list(re.finditer(pattern, contents)) if not matches: return [] end = matches[-1].start() # -1 so a failed `rfind` maps to the first line. newline_offsets = { -1: 0 } for line_num, m in enumerate(re.finditer(r'\n', contents), 1): offset = m.start() if offset > end: break newline_offsets[offset] = line_num def get_line_num(match, contents): """Get the line number of string in a files contents. Failing to find the newline is OK, -1 maps to 0 """ newline_offset = contents.rfind('\n', 0, match.start()) return newline_offsets[newline_offset] return [get_line_num(match, contents) + 1 for match in matches]
A wrapper around 're.finditer' to find line numbers. Returns a list of line numbers where pattern was found in contents.
find_with_line_numbers
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def get_disabled_ranges(source): """Returns a list of tuples representing the disabled ranges. If disabled and no re-enable will disable for rest of file. """ enable_line_nums = find_with_line_numbers(ENABLE_REGEX, source) disable_line_nums = find_with_line_numbers(DISABLE_REGEX, source) total_lines = len(re.findall("\n", source)) + 1 enable_commands = {} for num in enable_line_nums: enable_commands[num] = True for num in disable_line_nums: enable_commands[num] = False disabled_ranges = [] currently_enabled = True disabled_start = None for line, commanded_enabled in sorted(enable_commands.items()): if commanded_enabled is False and currently_enabled is True: disabled_start = line currently_enabled = False elif commanded_enabled is True and currently_enabled is False: disabled_ranges.append((disabled_start, line)) currently_enabled = True if currently_enabled is False: disabled_ranges.append((disabled_start, total_lines)) return disabled_ranges
Returns a list of tuples representing the disabled ranges. If disabled and no re-enable will disable for rest of file.
get_disabled_ranges
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def filter_disabled_results(result, disabled_ranges): """Filter out reports based on tuple of disabled ranges. """ line = result['line'] for disabled_range in disabled_ranges: if disabled_range[0] <= line <= disabled_range[1]: return False return True
Filter out reports based on tuple of disabled ranges.
filter_disabled_results
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def filter_results(source, results, aggressive): """Filter out spurious reports from pycodestyle. If aggressive is True, we allow possibly unsafe fixes (E711, E712). """ non_docstring_string_line_numbers = multiline_string_lines( source, include_docstrings=False) all_string_line_numbers = multiline_string_lines( source, include_docstrings=True) commented_out_code_line_numbers = commented_out_code_lines(source) # Filter out the disabled ranges disabled_ranges = get_disabled_ranges(source) if disabled_ranges: results = [ result for result in results if filter_disabled_results( result, disabled_ranges, ) ] has_e901 = any(result['id'].lower() == 'e901' for result in results) for r in results: issue_id = r['id'].lower() if r['line'] in non_docstring_string_line_numbers: if issue_id.startswith(('e1', 'e501', 'w191')): continue if r['line'] in all_string_line_numbers: if issue_id in ['e501']: continue # We must offset by 1 for lines that contain the trailing contents of # multiline strings. if not aggressive and (r['line'] + 1) in all_string_line_numbers: # Do not modify multiline strings in non-aggressive mode. Remove # trailing whitespace could break doctests. if issue_id.startswith(('w29', 'w39')): continue if aggressive <= 0: if issue_id.startswith(('e711', 'e72', 'w6')): continue if aggressive <= 1: if issue_id.startswith(('e712', 'e713', 'e714')): continue if aggressive <= 2: if issue_id.startswith(('e704')): continue if r['line'] in commented_out_code_line_numbers: if issue_id.startswith(('e261', 'e262', 'e501')): continue # Do not touch indentation if there is a token error caused by # incomplete multi-line statement. Otherwise, we risk screwing up the # indentation. if has_e901: if issue_id.startswith(('e1', 'e7')): continue yield r
Filter out spurious reports from pycodestyle. If aggressive is True, we allow possibly unsafe fixes (E711, E712).
filter_results
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def multiline_string_lines(source, include_docstrings=False): """Return line numbers that are within multiline strings. The line numbers are indexed at 1. Docstrings are ignored. """ line_numbers = set() previous_token_type = '' _check_target_tokens = [tokenize.STRING] if IS_SUPPORT_TOKEN_FSTRING: _check_target_tokens.extend([ tokenize.FSTRING_START, tokenize.FSTRING_MIDDLE, tokenize.FSTRING_END, ]) try: for t in generate_tokens(source): token_type = t[0] start_row = t[2][0] end_row = t[3][0] if token_type in _check_target_tokens and start_row != end_row: if ( include_docstrings or previous_token_type != tokenize.INDENT ): # We increment by one since we want the contents of the # string. line_numbers |= set(range(1 + start_row, 1 + end_row)) previous_token_type = token_type except (SyntaxError, tokenize.TokenError): pass return line_numbers
Return line numbers that are within multiline strings. The line numbers are indexed at 1. Docstrings are ignored.
multiline_string_lines
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def commented_out_code_lines(source): """Return line numbers of comments that are likely code. Commented-out code is bad practice, but modifying it just adds even more clutter. """ line_numbers = [] try: for t in generate_tokens(source): token_type = t[0] token_string = t[1] start_row = t[2][0] line = t[4] # Ignore inline comments. if not line.lstrip().startswith('#'): continue if token_type == tokenize.COMMENT: stripped_line = token_string.lstrip('#').strip() with warnings.catch_warnings(): # ignore SyntaxWarning in Python3.8+ # refs: # https://bugs.python.org/issue15248 # https://docs.python.org/3.8/whatsnew/3.8.html#other-language-changes warnings.filterwarnings("ignore", category=SyntaxWarning) if ( ' ' in stripped_line and '#' not in stripped_line and check_syntax(stripped_line) ): line_numbers.append(start_row) except (SyntaxError, tokenize.TokenError): pass return line_numbers
Return line numbers of comments that are likely code. Commented-out code is bad practice, but modifying it just adds even more clutter.
commented_out_code_lines
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def shorten_comment(line, max_line_length, last_comment=False): """Return trimmed or split long comment line. If there are no comments immediately following it, do a text wrap. Doing this wrapping on all comments in general would lead to jagged comment text. """ assert len(line) > max_line_length line = line.rstrip() # PEP 8 recommends 72 characters for comment text. indentation = _get_indentation(line) + '# ' max_line_length = min(max_line_length, len(indentation) + 72) MIN_CHARACTER_REPEAT = 5 if ( len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and not line[-1].isalnum() ): # Trim comments that end with things like --------- return line[:max_line_length] + '\n' elif last_comment and re.match(r'\s*#+\s*\w+', line): split_lines = textwrap.wrap(line.lstrip(' \t#'), initial_indent=indentation, subsequent_indent=indentation, width=max_line_length, break_long_words=False, break_on_hyphens=False) return '\n'.join(split_lines) + '\n' return line + '\n'
Return trimmed or split long comment line. If there are no comments immediately following it, do a text wrap. Doing this wrapping on all comments in general would lead to jagged comment text.
shorten_comment
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def normalize_line_endings(lines, newline): """Return fixed line endings. All lines will be modified to use the most common line ending. """ line = [line.rstrip('\n\r') + newline for line in lines] if line and lines[-1] == lines[-1].rstrip('\n\r'): line[-1] = line[-1].rstrip('\n\r') return line
Return fixed line endings. All lines will be modified to use the most common line ending.
normalize_line_endings
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_code(source, options=None, encoding=None, apply_config=False): """Return fixed source code. "encoding" will be used to decode "source" if it is a byte string. """ options = _get_options(options, apply_config) # normalize options.ignore = [opt.upper() for opt in options.ignore] options.select = [opt.upper() for opt in options.select] # check ignore args # NOTE: If W50x is not included, add W50x because the code # correction result is indefinite. ignore_opt = options.ignore if not {"W50", "W503", "W504"} & set(ignore_opt): options.ignore.append("W50") if not isinstance(source, str): source = source.decode(encoding or get_encoding()) sio = io.StringIO(source) return fix_lines(sio.readlines(), options=options)
Return fixed source code. "encoding" will be used to decode "source" if it is a byte string.
fix_code
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def apply_global_fixes(source, options, where='global', filename='', codes=None): """Run global fixes on source code. These are fixes that only need be done once (unlike those in FixPEP8, which are dependent on pycodestyle). """ if codes is None: codes = [] if any(code_match(code, select=options.select, ignore=options.ignore) for code in ['E101', 'E111']): source = reindent( source, indent_size=options.indent_size, leave_tabs=not ( code_match( 'W191', select=options.select, ignore=options.ignore ) ) ) for (code, function) in global_fixes(): if code_match(code, select=options.select, ignore=options.ignore): if options.verbose: print('---> Applying {} fix for {}'.format(where, code.upper()), file=sys.stderr) source = function(source, aggressive=options.aggressive) return source
Run global fixes on source code. These are fixes that only need be done once (unlike those in FixPEP8, which are dependent on pycodestyle).
apply_global_fixes
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _parser_error_with_code( parser: argparse.ArgumentParser, code: int, msg: str, ) -> None: """wrap parser.error with exit code""" parser.print_usage(sys.stderr) parser.exit(code, f"{msg}\n")
wrap parser.error with exit code
_parser_error_with_code
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def read_config(args, parser): """Read both user configuration and local configuration.""" config = SafeConfigParser() try: if args.verbose and os.path.exists(args.global_config): print("read config path: {}".format(args.global_config)) config.read(args.global_config) if not args.ignore_local_config: parent = tail = args.files and os.path.abspath( os.path.commonprefix(args.files)) while tail: if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): if args.verbose: for fn in PROJECT_CONFIG: config_file = os.path.join(parent, fn) if not os.path.exists(config_file): continue print( "read config path: {}".format( os.path.join(parent, fn) ) ) break (parent, tail) = os.path.split(parent) defaults = {} option_list = {o.dest: o.type or type(o.default) for o in parser._actions} for section in ['pep8', 'pycodestyle', 'flake8']: if not config.has_section(section): continue for norm_opt, k, value in _get_normalize_options( args, config, section, option_list ): if args.verbose: print("enable config: section={}, key={}, value={}".format( section, k, value)) defaults[norm_opt] = value parser.set_defaults(**defaults) except Error: # Ignore for now. pass return parser
Read both user configuration and local configuration.
read_config
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def read_pyproject_toml(args, parser): """Read pyproject.toml and load configuration.""" if sys.version_info >= (3, 11): import tomllib else: import tomli as tomllib config = None if os.path.exists(args.global_config): with open(args.global_config, "rb") as fp: config = tomllib.load(fp) if not args.ignore_local_config: parent = tail = args.files and os.path.abspath( os.path.commonprefix(args.files)) while tail: pyproject_toml = os.path.join(parent, "pyproject.toml") if os.path.exists(pyproject_toml): with open(pyproject_toml, "rb") as fp: config = tomllib.load(fp) break (parent, tail) = os.path.split(parent) if not config: return None if config.get("tool", {}).get("autopep8") is None: return None config = config.get("tool", {}).get("autopep8") defaults = {} option_list = {o.dest: o.type or type(o.default) for o in parser._actions} TUPLED_OPTIONS = ("ignore", "select") for (k, v) in config.items(): norm_opt = k.lstrip('-').replace('-', '_') if not option_list.get(norm_opt): continue if type(v) in (list, tuple) and norm_opt in TUPLED_OPTIONS: value = ",".join(v) else: value = v if args.verbose: print("enable pyproject.toml config: " "key={}, value={}".format(k, value)) defaults[norm_opt] = value if defaults: # set value when exists key-value in defaults dict parser.set_defaults(**defaults) return parser
Read pyproject.toml and load configuration.
read_pyproject_toml
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def supported_fixes(): """Yield pep8 error codes that autopep8 fixes. Each item we yield is a tuple of the code followed by its description. """ yield ('E101', docstring_summary(reindent.__doc__)) instance = FixPEP8(filename=None, options=None, contents='') for attribute in dir(instance): code = re.match('fix_([ew][0-9][0-9][0-9])', attribute) if code: yield ( code.group(1).upper(), re.sub(r'\s+', ' ', docstring_summary(getattr(instance, attribute).__doc__)) ) for (code, function) in sorted(global_fixes()): yield (code.upper() + (4 - len(code)) * ' ', re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
Yield pep8 error codes that autopep8 fixes. Each item we yield is a tuple of the code followed by its description.
supported_fixes
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def line_shortening_rank(candidate, indent_word, max_line_length, experimental=False): """Return rank of candidate. This is for sorting candidates. """ if not candidate.strip(): return 0 rank = 0 lines = candidate.rstrip().split('\n') offset = 0 if ( not lines[0].lstrip().startswith('#') and lines[0].rstrip()[-1] not in '([{' ): for (opening, closing) in ('()', '[]', '{}'): # Don't penalize empty containers that aren't split up. Things like # this "foo(\n )" aren't particularly good. opening_loc = lines[0].find(opening) closing_loc = lines[0].find(closing) if opening_loc >= 0: if closing_loc < 0 or closing_loc != opening_loc + 1: offset = max(offset, 1 + opening_loc) current_longest = max(offset + len(x.strip()) for x in lines) rank += 4 * max(0, current_longest - max_line_length) rank += len(lines) # Too much variation in line length is ugly. rank += 2 * standard_deviation(len(line) for line in lines) bad_staring_symbol = { '(': ')', '[': ']', '{': '}'}.get(lines[0][-1]) if len(lines) > 1: if ( bad_staring_symbol and lines[1].lstrip().startswith(bad_staring_symbol) ): rank += 20 for lineno, current_line in enumerate(lines): current_line = current_line.strip() if current_line.startswith('#'): continue for bad_start in ['.', '%', '+', '-', '/']: if current_line.startswith(bad_start): rank += 100 # Do not tolerate operators on their own line. if current_line == bad_start: rank += 1000 if ( current_line.endswith(('.', '%', '+', '-', '/')) and "': " in current_line ): rank += 1000 if current_line.endswith(('(', '[', '{', '.')): # Avoid lonely opening. They result in longer lines. if len(current_line) <= len(indent_word): rank += 100 # Avoid the ugliness of ", (\n". if ( current_line.endswith('(') and current_line[:-1].rstrip().endswith(',') ): rank += 100 # Avoid the ugliness of "something[\n" and something[index][\n. if ( current_line.endswith('[') and len(current_line) > 1 and (current_line[-2].isalnum() or current_line[-2] in ']') ): rank += 300 # Also avoid the ugliness of "foo.\nbar" if current_line.endswith('.'): rank += 100 if has_arithmetic_operator(current_line): rank += 100 # Avoid breaking at unary operators. if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')): rank += 1000 if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')): rank += 1000 if current_line.endswith(('%', '(', '[', '{')): rank -= 20 # Try to break list comprehensions at the "for". if current_line.startswith('for '): rank -= 50 if current_line.endswith('\\'): # If a line ends in \-newline, it may be part of a # multiline string. In that case, we would like to know # how long that line is without the \-newline. If it's # longer than the maximum, or has comments, then we assume # that the \-newline is an okay candidate and only # penalize it a bit. total_len = len(current_line) lineno += 1 while lineno < len(lines): total_len += len(lines[lineno]) if lines[lineno].lstrip().startswith('#'): total_len = max_line_length break if not lines[lineno].endswith('\\'): break lineno += 1 if total_len < max_line_length: rank += 10 else: rank += 100 if experimental else 1 # Prefer breaking at commas rather than colon. if ',' in current_line and current_line.endswith(':'): rank += 10 # Avoid splitting dictionaries between key and value. if current_line.endswith(':'): rank += 100 rank += 10 * count_unbalanced_brackets(current_line) return max(0, rank)
Return rank of candidate. This is for sorting candidates.
line_shortening_rank
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def has_arithmetic_operator(line): """Return True if line contains any arithmetic operators.""" for operator in pycodestyle.ARITHMETIC_OP: if operator in line: return True return False
Return True if line contains any arithmetic operators.
has_arithmetic_operator
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def count_unbalanced_brackets(line): """Return number of unmatched open/close brackets.""" count = 0 for opening, closing in ['()', '[]', '{}']: count += abs(line.count(opening) - line.count(closing)) return count
Return number of unmatched open/close brackets.
count_unbalanced_brackets
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def split_at_offsets(line, offsets): """Split line at offsets. Return list of strings. """ result = [] previous_offset = 0 current_offset = 0 for current_offset in sorted(offsets): if current_offset < len(line) and previous_offset != current_offset: result.append(line[previous_offset:current_offset].strip()) previous_offset = current_offset result.append(line[current_offset:]) return result
Split line at offsets. Return list of strings.
split_at_offsets
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def match_file(filename, exclude): """Return True if file is okay for modifying/recursing.""" base_name = os.path.basename(filename) if base_name.startswith('.'): return False for pattern in exclude: if fnmatch.fnmatch(base_name, pattern): return False if fnmatch.fnmatch(filename, pattern): return False if not os.path.isdir(filename) and not is_python_file(filename): return False return True
Return True if file is okay for modifying/recursing.
match_file
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def _fix_file(parameters): """Helper function for optionally running fix_file() in parallel.""" if parameters[1].verbose: print('[file:{}]'.format(parameters[0]), file=sys.stderr) try: return fix_file(*parameters) except IOError as error: print(str(error), file=sys.stderr) raise error
Helper function for optionally running fix_file() in parallel.
_fix_file
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def fix_multiple_files(filenames, options, output=None): """Fix list of files. Optionally fix files recursively. """ results = [] filenames = find_files(filenames, options.recursive, options.exclude) if options.jobs > 1: import multiprocessing pool = multiprocessing.Pool(options.jobs) rets = [] for name in filenames: ret = pool.apply_async(_fix_file, ((name, options),)) rets.append(ret) pool.close() pool.join() if options.diff: for r in rets: sys.stdout.write(r.get().decode()) sys.stdout.flush() results.extend([x.get() for x in rets if x is not None]) else: for name in filenames: ret = _fix_file((name, options, output)) if ret is None: continue if options.diff: if ret != '': results.append(ret) elif options.in_place: results.append(ret) else: original_source = readlines_from_file(name) if "".join(original_source).splitlines() != ret.splitlines(): results.append(ret) return results
Fix list of files. Optionally fix files recursively.
fix_multiple_files
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def is_python_file(filename): """Return True if filename is Python file.""" if filename.endswith('.py'): return True try: with open_with_encoding( filename, limit_byte_check=MAX_PYTHON_FILE_DETECTION_BYTES) as f: text = f.read(MAX_PYTHON_FILE_DETECTION_BYTES) if not text: return False first_line = text.splitlines()[0] except (IOError, IndexError): return False if not PYTHON_SHEBANG_REGEX.match(first_line): return False return True
Return True if filename is Python file.
is_python_file
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def is_probably_part_of_multiline(line): """Return True if line is likely part of a multiline string. When multiline strings are involved, pep8 reports the error as being at the start of the multiline string, which doesn't work for us. """ return ( '"""' in line or "'''" in line or line.rstrip().endswith('\\') )
Return True if line is likely part of a multiline string. When multiline strings are involved, pep8 reports the error as being at the start of the multiline string, which doesn't work for us.
is_probably_part_of_multiline
python
hhatto/autopep8
autopep8.py
https://github.com/hhatto/autopep8/blob/master/autopep8.py
MIT
def run(filename, command, max_line_length=79, ignore='', check_ignore='', verbose=False, comparison_function=None, aggressive=0, experimental=False, line_range=None, random_range=False, pycodestyle=True): """Run autopep8 on file at filename. Return True on success. """ if random_range: if not line_range: line_range = [1, RANDOM_MAX] first = random.randint(*line_range) line_range = [first, random.randint(first, line_range[1])] command = (shlex.split(command) + (['--verbose'] if verbose else []) + ['--max-line-length={}'.format(max_line_length), '--ignore=' + ignore, filename] + aggressive * ['--aggressive'] + (['--experimental'] if experimental else []) + (['--line-range', str(line_range[0]), str(line_range[1])] if line_range else [])) print(' '.join(command), file=sys.stderr) with tempfile.NamedTemporaryFile(suffix='.py') as tmp_file: if subprocess.call(command, stdout=tmp_file) != 0: sys.stderr.write('autopep8 crashed on ' + filename + '\n') return False if pycodestyle and subprocess.call( [pycodestyle, '--ignore=' + ','.join([x for x in ignore.split(',') + check_ignore.split(',') if x]), '--show-source', tmp_file.name], stdout=sys.stdout) != 0: sys.stderr.write('autopep8 did not completely fix ' + filename + '\n') try: if check_syntax(filename): try: check_syntax(tmp_file.name, raise_error=True) except (SyntaxError, TypeError, UnicodeDecodeError) as exception: sys.stderr.write('autopep8 broke ' + filename + '\n' + str(exception) + '\n') return False if comparison_function: if not comparison_function(filename, tmp_file.name): return False except IOError as exception: sys.stderr.write(str(exception) + '\n') return True
Run autopep8 on file at filename. Return True on success.
run
python
hhatto/autopep8
test/acid.py
https://github.com/hhatto/autopep8/blob/master/test/acid.py
MIT
def check_syntax(filename, raise_error=False): """Return True if syntax is okay.""" with autopep8.open_with_encoding(filename) as input_file: try: compile(input_file.read(), '<string>', 'exec', dont_inherit=True) return True except (SyntaxError, TypeError, UnicodeDecodeError): if raise_error: raise else: return False
Return True if syntax is okay.
check_syntax
python
hhatto/autopep8
test/acid.py
https://github.com/hhatto/autopep8/blob/master/test/acid.py
MIT
def process_args(): """Return processed arguments (options and positional arguments).""" compare_bytecode_ignore = 'E71,E721,W' parser = argparse.ArgumentParser() parser.add_argument( '--command', default='{} {}'.format(sys.executable, os.path.join(ROOT_PATH, 'autopep8.py')), help='autopep8 command (default: %(default)s)') parser.add_argument('--ignore', help='comma-separated errors to ignore', default='') parser.add_argument('--check-ignore', help='comma-separated errors to ignore when checking ' 'for completeness (default: %(default)s)', default='') parser.add_argument('--max-line-length', metavar='n', default=79, type=int, help='set maximum allowed line length ' '(default: %(default)s)') parser.add_argument('--compare-bytecode', action='store_true', help='compare bytecode before and after fixes; ' 'sets default --ignore=' + compare_bytecode_ignore) parser.add_argument('-a', '--aggressive', action='count', default=0, help='run autopep8 in aggressive mode') parser.add_argument('--experimental', action='store_true', help='run experimental fixes') parser.add_argument('--line-range', metavar='line', default=None, type=int, nargs=2, help='pass --line-range to autope8') parser.add_argument('--random-range', action='store_true', help='pass random --line-range to autope8') parser.add_argument('--pycodestyle', default='pycodestyle', help='location of pycodestyle; ' 'set to empty string to disable this check') parser.add_argument('-v', '--verbose', action='store_true', help='print verbose messages') parser.add_argument('paths', nargs='*', help='paths to use for testing') args = parser.parse_args() if args.compare_bytecode and not args.ignore: args.ignore = compare_bytecode_ignore return args
Return processed arguments (options and positional arguments).
process_args
python
hhatto/autopep8
test/acid.py
https://github.com/hhatto/autopep8/blob/master/test/acid.py
MIT
def check(paths, args): """Run recursively run autopep8 on directory of files. Return False if the fix results in broken syntax. """ if paths: dir_paths = paths else: dir_paths = [path for path in sys.path if os.path.isdir(path)] filenames = dir_paths completed_filenames = set() if args.compare_bytecode: comparison_function = compare_bytecode else: comparison_function = None while filenames: try: name = os.path.realpath(filenames.pop(0)) if not os.path.exists(name): # Invalid symlink. continue if name in completed_filenames: sys.stderr.write( colored( '---> Skipping previously tested ' + name + '\n', YELLOW)) continue else: completed_filenames.update(name) if os.path.isdir(name): for root, directories, children in os.walk(name): filenames += [os.path.join(root, f) for f in children if f.endswith('.py') and not f.startswith('.')] directories[:] = [d for d in directories if not d.startswith('.')] else: verbose_message = '---> Testing with ' + name sys.stderr.write(colored(verbose_message + '\n', YELLOW)) if not run(os.path.join(name), command=args.command, max_line_length=args.max_line_length, ignore=args.ignore, check_ignore=args.check_ignore, verbose=args.verbose, comparison_function=comparison_function, aggressive=args.aggressive, experimental=args.experimental, line_range=args.line_range, random_range=args.random_range, pycodestyle=args.pycodestyle): return False except (UnicodeDecodeError, UnicodeEncodeError) as exception: # Ignore annoying codec problems on Python 2. print(exception) continue return True
Run recursively run autopep8 on directory of files. Return False if the fix results in broken syntax.
check
python
hhatto/autopep8
test/acid.py
https://github.com/hhatto/autopep8/blob/master/test/acid.py
MIT
def latest_packages(last_hours): """Return names of latest released packages on PyPI.""" process = subprocess.Popen( ['yolk', '--latest-releases={hours}'.format(hours=last_hours)], stdout=subprocess.PIPE) for line in process.communicate()[0].decode('utf-8').split('\n'): if line: yield line.split()[0]
Return names of latest released packages on PyPI.
latest_packages
python
hhatto/autopep8
test/acid_pypi.py
https://github.com/hhatto/autopep8/blob/master/test/acid_pypi.py
MIT
def get(self): """ Returns a simple HTML form for login """ if self.user: self.redirect_to('home', id=self.user_id) params = {} return self.render_template('boilerplate_login.html', **params)
Returns a simple HTML form for login
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def post(self): """ username: Get the username from POST dict password: Get the password from POST dict """ if not self.form.validate(): return self.get() username = self.form.username.data.lower() try: if utils.is_email_valid(username): user = models.User.get_by_email(username) if user: auth_id = user.auth_ids[0] else: raise InvalidAuthIdError else: auth_id = "own:%s" % username user = models.User.get_by_auth_id(auth_id) password = self.form.password.data.strip() remember_me = True if str(self.request.POST.get('remember_me')) == 'on' else False # Password to SHA512 password = utils.encrypt(password, config.salt) # Try to login user with password # Raises InvalidAuthIdError if user is not found # Raises InvalidPasswordError if provided password # doesn't match with specified user self.auth.get_user_by_password( auth_id, password, remember=remember_me) # if user account is not activated, logout and redirect to home if (user.activated == False): # logout self.auth.unset_session() # redirect to home with error message resend_email_uri = self.uri_for('resend-account-activation', encoded_email=utils.encode(user.email)) message = _('Sorry, your account') + ' <strong>{0:>s}</strong>'.format(username) + " " +\ _('has not been activated. Please check your email to activate your account') + ". " +\ _('Or click') + " <a href='"+resend_email_uri+"'>" + _('this') + "</a> " + _('to resend the email') self.add_message(message, 'error') return self.redirect_to('home') # check twitter association in session twitter_helper = twitter.TwitterAuth(self) twitter_association_data = twitter_helper.get_association_data() if twitter_association_data is not None: if models.SocialUser.check_unique(user.key, 'twitter', str(twitter_association_data['id'])): social_user = models.SocialUser( user = user.key, provider = 'twitter', uid = str(twitter_association_data['id']), extra_data = twitter_association_data ) social_user.put() logVisit = models.LogVisit( user=user.key, uastring=self.request.user_agent, ip=self.request.remote_addr, timestamp=utils.get_date_time() ) logVisit.put() self.redirect_to('home') except (InvalidAuthIdError, InvalidPasswordError), e: # Returns error message to self.response.write in # the BaseHandler.dispatcher message = _("Login invalid, Try again.") + "<br/>" + _("Don't have an account?") + \ ' <a href="' + self.uri_for('register') + '">' + _("Sign Up") + '</a>' self.add_message(message, 'error') return self.redirect_to('login')
username: Get the username from POST dict password: Get the password from POST dict
post
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def get(self): """ Returns a simple HTML form for create a new user """ if self.user: self.redirect_to('home', id=self.user_id) params = {} return self.render_template('boilerplate_register.html', **params)
Returns a simple HTML form for create a new user
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def get(self): """ Returns a simple HTML for contact form """ if self.user: user_info = models.User.get_by_id(long(self.user_id)) if user_info.name or user_info.last_name: self.form.name.data = user_info.name + " " + user_info.last_name if user_info.email: self.form.email.data = user_info.email params = { "exception" : self.request.get('exception') } return self.render_template('boilerplate_contact.html', **params)
Returns a simple HTML for contact form
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def get(self): """ Returns a simple HTML form for edit profile """ params = {} if self.user: user_info = models.User.get_by_id(long(self.user_id)) self.form.username.data = user_info.username self.form.name.data = user_info.name self.form.last_name.data = user_info.last_name self.form.country.data = user_info.country providers_info = user_info.get_social_providers_info() params['used_providers'] = providers_info['used'] params['unused_providers'] = providers_info['unused'] params['country'] = user_info.country return self.render_template('boilerplate_edit_profile.html', **params)
Returns a simple HTML form for edit profile
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def get(self): """ Returns a simple HTML form for editing password """ params = {} return self.render_template('boilerplate_edit_password.html', **params)
Returns a simple HTML form for editing password
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def get(self): """ Returns a simple HTML form for edit email """ params = {} if self.user: user_info = models.User.get_by_id(long(self.user_id)) self.form.new_email.data = user_info.email return self.render_template('boilerplate_edit_email.html', **params)
Returns a simple HTML form for edit email
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def get(self): """ Returns a simple HTML form for home """ params = {} return self.render_template('boilerplate_home.html', **params)
Returns a simple HTML form for home
get
python
hhatto/autopep8
test/e101_example.py
https://github.com/hhatto/autopep8/blob/master/test/e101_example.py
MIT
def ismethoddescriptor(object): """Return true if the object is a method descriptor. But not if ismethod() or isclass() or isfunction() are true. This is new in Python 2.2, and, for example, is true of int.__add__. An object passing this test has a __get__ attribute but not a __set__ attribute, but beyond that the set of attributes varies. __name__ is usually sensible, and __doc__ often is. Methods implemented via descriptors that also pass one of the other tests return false from the ismethoddescriptor() test, simply because the other tests promise more -- you can, e.g., count on having the __func__ attribute (etc) when an object passes ismethod().""" if isclass(object) or ismethod(object) or isfunction(object): # mutual exclusion return False tp = type(object) return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
Return true if the object is a method descriptor. But not if ismethod() or isclass() or isfunction() are true. This is new in Python 2.2, and, for example, is true of int.__add__. An object passing this test has a __get__ attribute but not a __set__ attribute, but beyond that the set of attributes varies. __name__ is usually sensible, and __doc__ often is. Methods implemented via descriptors that also pass one of the other tests return false from the ismethoddescriptor() test, simply because the other tests promise more -- you can, e.g., count on having the __func__ attribute (etc) when an object passes ismethod().
ismethoddescriptor
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def isdatadescriptor(object): """Return true if the object is a data descriptor. Data descriptors have both a __get__ and a __set__ attribute. Examples are properties (defined in Python) and getsets and members (defined in C). Typically, data descriptors will also have __name__ and __doc__ attributes (properties, getsets, and members have both of these attributes), but this is not guaranteed.""" if isclass(object) or ismethod(object) or isfunction(object): # mutual exclusion return False tp = type(object) return hasattr(tp, "__set__") and hasattr(tp, "__get__")
Return true if the object is a data descriptor. Data descriptors have both a __get__ and a __set__ attribute. Examples are properties (defined in Python) and getsets and members (defined in C). Typically, data descriptors will also have __name__ and __doc__ attributes (properties, getsets, and members have both of these attributes), but this is not guaranteed.
isdatadescriptor
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See help(isfunction) for attributes listing.""" return bool((isfunction(object) or ismethod(object)) and object.__code__.co_flags & CO_GENERATOR)
Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See help(isfunction) for attributes listing.
isgeneratorfunction
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def isroutine(object): """Return true if the object is any kind of function or method.""" return (isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object))
Return true if the object is any kind of function or method.
isroutine
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def getmembers(object, predicate=None): """Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.""" if isclass(object): mro = (object,) + getmro(object) else: mro = () results = [] processed = set() names = dir(object) # :dd any DynamicClassAttributes to the list of names if object is a class; # this may result in duplicate entries if, for example, a virtual # attribute with the same name as a DynamicClassAttribute exists try: for base in object.__bases__: for k, v in base.__dict__.items(): if isinstance(v, types.DynamicClassAttribute): names.append(k) except AttributeError: pass for key in names: # First try to get the value via getattr. Some descriptors don't # like calling their __get__ (see bug #1785), so fall back to # looking in the __dict__. try: value = getattr(object, key) # handle the duplicate key if key in processed: raise AttributeError except AttributeError: for base in mro: if key in base.__dict__: value = base.__dict__[key] break else: # could be a (currently) missing slot member, or a buggy # __dir__; discard and move on continue if not predicate or predicate(value): results.append((key, value)) processed.add(key) results.sort(key=lambda pair: pair[0]) return results
Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.
getmembers
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def classify_class_attrs(cls): """Return list of attribute-descriptor tuples. For each name in dir(cls), the return list contains a 4-tuple with these elements: 0. The name (a string). 1. The kind of attribute this is, one of these strings: 'class method' created via classmethod() 'static method' created via staticmethod() 'property' created via property() 'method' any other flavor of method or descriptor 'data' not a method 2. The class which defined this attribute (a class). 3. The object as obtained by calling getattr; if this fails, or if the resulting object does not live anywhere in the class' mro (including metaclasses) then the object is looked up in the defining class's dict (found by walking the mro). If one of the items in dir(cls) is stored in the metaclass it will now be discovered and not have None be listed as the class in which it was defined. Any items whose home class cannot be discovered are skipped. """ mro = getmro(cls) metamro = getmro(type(cls)) # for attributes stored in the metaclass metamro = tuple([cls for cls in metamro if cls not in (type, object)]) class_bases = (cls,) + mro all_bases = class_bases + metamro names = dir(cls) # :dd any DynamicClassAttributes to the list of names; # this may result in duplicate entries if, for example, a virtual # attribute with the same name as a DynamicClassAttribute exists. for base in mro: for k, v in base.__dict__.items(): if isinstance(v, types.DynamicClassAttribute): names.append(k) result = [] processed = set() for name in names: # Get the object associated with the name, and where it was defined. # Normal objects will be looked up with both getattr and directly in # its class' dict (in case getattr fails [bug #1785], and also to look # for a docstring). # For DynamicClassAttributes on the second pass we only look in the # class's dict. # # Getting an obj from the __dict__ sometimes reveals more than # using getattr. Static and class methods are dramatic examples. homecls = None get_obj = None dict_obj = None if name not in processed: try: if name == '__dict__': raise Exception("__dict__ is special, don't want the proxy") get_obj = getattr(cls, name) except Exception as exc: pass else: homecls = getattr(get_obj, "__objclass__", homecls) if homecls not in class_bases: # if the resulting object does not live somewhere in the # mro, drop it and search the mro manually homecls = None last_cls = None # first look in the classes for srch_cls in class_bases: srch_obj = getattr(srch_cls, name, None) if srch_obj == get_obj: last_cls = srch_cls # then check the metaclasses for srch_cls in metamro: try: srch_obj = srch_cls.__getattr__(cls, name) except AttributeError: continue if srch_obj == get_obj: last_cls = srch_cls if last_cls is not None: homecls = last_cls for base in all_bases: if name in base.__dict__: dict_obj = base.__dict__[name] if homecls not in metamro: homecls = base break if homecls is None: # unable to locate the attribute anywhere, most likely due to # buggy custom __dir__; discard and move on continue obj = get_obj or dict_obj # Classify the object or its descriptor. if isinstance(dict_obj, staticmethod): kind = "static method" obj = dict_obj elif isinstance(dict_obj, classmethod): kind = "class method" obj = dict_obj elif isinstance(dict_obj, property): kind = "property" obj = dict_obj elif isroutine(obj): kind = "method" else: kind = "data" result.append(Attribute(name, kind, homecls, obj)) processed.add(name) return result
Return list of attribute-descriptor tuples. For each name in dir(cls), the return list contains a 4-tuple with these elements: 0. The name (a string). 1. The kind of attribute this is, one of these strings: 'class method' created via classmethod() 'static method' created via staticmethod() 'property' created via property() 'method' any other flavor of method or descriptor 'data' not a method 2. The class which defined this attribute (a class). 3. The object as obtained by calling getattr; if this fails, or if the resulting object does not live anywhere in the class' mro (including metaclasses) then the object is looked up in the defining class's dict (found by walking the mro). If one of the items in dir(cls) is stored in the metaclass it will now be discovered and not have None be listed as the class in which it was defined. Any items whose home class cannot be discovered are skipped.
classify_class_attrs
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def unwrap(func, *, stop=None): """Get the object wrapped by *func*. Follows the chain of :attr:`__wrapped__` attributes returning the last object in the chain. *stop* is an optional callback accepting an object in the wrapper chain as its sole argument that allows the unwrapping to be terminated early if the callback returns a true value. If the callback never returns a true value, the last object in the chain is returned as usual. For example, :func:`signature` uses this to stop unwrapping if any object in the chain has a ``__signature__`` attribute defined. :exc:`ValueError` is raised if a cycle is encountered. """ if stop is None: def _is_wrapper(f): return hasattr(f, '__wrapped__') else: def _is_wrapper(f): return hasattr(f, '__wrapped__') and not stop(f) f = func # remember the original func for error reporting memo = {id(f)} # Memoise by id to tolerate non-hashable objects while _is_wrapper(func): func = func.__wrapped__ id_func = id(func) if id_func in memo: raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) memo.add(id_func) return func
Get the object wrapped by *func*. Follows the chain of :attr:`__wrapped__` attributes returning the last object in the chain. *stop* is an optional callback accepting an object in the wrapper chain as its sole argument that allows the unwrapping to be terminated early if the callback returns a true value. If the callback never returns a true value, the last object in the chain is returned as usual. For example, :func:`signature` uses this to stop unwrapping if any object in the chain has a ``__signature__`` attribute defined. :exc:`ValueError` is raised if a cycle is encountered.
unwrap
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = line.expandtabs() return len(expline) - len(expline.lstrip())
Return the indent size, in spaces, at the start of a line of text.
indentsize
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def getdoc(object): """Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.""" try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, str): return None return cleandoc(doc)
Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.
getdoc
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT
def cleandoc(doc): """Clean up indentation from docstrings. Any whitespace that can be uniformly removed from the second line onwards is removed.""" try: lines = doc.expandtabs().split('\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxsize for line in lines[1:]: content = len(line.lstrip()) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxsize: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return '\n'.join(lines)
Clean up indentation from docstrings. Any whitespace that can be uniformly removed from the second line onwards is removed.
cleandoc
python
hhatto/autopep8
test/inspect_example.py
https://github.com/hhatto/autopep8/blob/master/test/inspect_example.py
MIT