Spaces:
Runtime error
Runtime error
File size: 7,862 Bytes
6eefbd7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
"""
Parse Python code and perform AST validation.
"""
import ast
import sys
from typing import Final, Iterable, Iterator, List, Set, Tuple
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
from black.nodes import syms
from blib2to3 import pygram
from blib2to3.pgen2 import driver
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
from blib2to3.pgen2.tokenize import TokenError
from blib2to3.pytree import Leaf, Node
PY2_HINT: Final = "Python 2 support was removed in version 22.0."
class InvalidInput(ValueError):
"""Raised when input source code fails all parse attempts."""
def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
if not target_versions:
# No target_version specified, so try all grammars.
return [
# Python 3.7-3.9
pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
# Python 3.0-3.6
pygram.python_grammar_no_print_statement_no_exec_statement,
# Python 3.10+
pygram.python_grammar_soft_keywords,
]
grammars = []
# If we have to parse both, try to parse async as a keyword first
if not supports_feature(
target_versions, Feature.ASYNC_IDENTIFIERS
) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):
# Python 3.7-3.9
grammars.append(
pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords
)
if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
# Python 3.0-3.6
grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions):
# Python 3.10+
grammars.append(pygram.python_grammar_soft_keywords)
# At least one of the above branches must have been taken, because every Python
# version has exactly one of the two 'ASYNC_*' flags
return grammars
def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
"""Given a string with source, return the lib2to3 Node."""
if not src_txt.endswith("\n"):
src_txt += "\n"
grammars = get_grammars(set(target_versions))
errors = {}
for grammar in grammars:
drv = driver.Driver(grammar)
try:
result = drv.parse_string(src_txt, True)
break
except ParseError as pe:
lineno, column = pe.context[1]
lines = src_txt.splitlines()
try:
faulty_line = lines[lineno - 1]
except IndexError:
faulty_line = "<line number missing in source>"
errors[grammar.version] = InvalidInput(
f"Cannot parse: {lineno}:{column}: {faulty_line}"
)
except TokenError as te:
# In edge cases these are raised; and typically don't have a "faulty_line".
lineno, column = te.args[1]
errors[grammar.version] = InvalidInput(
f"Cannot parse: {lineno}:{column}: {te.args[0]}"
)
else:
# Choose the latest version when raising the actual parsing error.
assert len(errors) >= 1
exc = errors[max(errors)]
if matches_grammar(src_txt, pygram.python_grammar) or matches_grammar(
src_txt, pygram.python_grammar_no_print_statement
):
original_msg = exc.args[0]
msg = f"{original_msg}\n{PY2_HINT}"
raise InvalidInput(msg) from None
raise exc from None
if isinstance(result, Leaf):
result = Node(syms.file_input, [result])
return result
def matches_grammar(src_txt: str, grammar: Grammar) -> bool:
drv = driver.Driver(grammar)
try:
drv.parse_string(src_txt, True)
except (ParseError, TokenError, IndentationError):
return False
else:
return True
def lib2to3_unparse(node: Node) -> str:
"""Given a lib2to3 node, return its string representation."""
code = str(node)
return code
def parse_single_version(
src: str, version: Tuple[int, int], *, type_comments: bool
) -> ast.AST:
filename = "<unknown>"
return ast.parse(
src, filename, feature_version=version, type_comments=type_comments
)
def parse_ast(src: str) -> ast.AST:
# TODO: support Python 4+ ;)
versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]
first_error = ""
for version in sorted(versions, reverse=True):
try:
return parse_single_version(src, version, type_comments=True)
except SyntaxError as e:
if not first_error:
first_error = str(e)
# Try to parse without type comments
for version in sorted(versions, reverse=True):
try:
return parse_single_version(src, version, type_comments=False)
except SyntaxError:
pass
raise SyntaxError(first_error)
def _normalize(lineend: str, value: str) -> str:
# To normalize, we strip any leading and trailing space from
# each line...
stripped: List[str] = [i.strip() for i in value.splitlines()]
normalized = lineend.join(stripped)
# ...and remove any blank lines at the beginning and end of
# the whole string
return normalized.strip()
def stringify_ast(node: ast.AST, depth: int = 0) -> Iterator[str]:
"""Simple visitor generating strings to compare ASTs by content."""
if (
isinstance(node, ast.Constant)
and isinstance(node.value, str)
and node.kind == "u"
):
# It's a quirk of history that we strip the u prefix over here. We used to
# rewrite the AST nodes for Python version compatibility and we never copied
# over the kind
node.kind = None
yield f"{' ' * depth}{node.__class__.__name__}("
for field in sorted(node._fields): # noqa: F402
# TypeIgnore has only one field 'lineno' which breaks this comparison
if isinstance(node, ast.TypeIgnore):
break
try:
value: object = getattr(node, field)
except AttributeError:
continue
yield f"{' ' * (depth+1)}{field}="
if isinstance(value, list):
for item in value:
# Ignore nested tuples within del statements, because we may insert
# parentheses and they change the AST.
if (
field == "targets"
and isinstance(node, ast.Delete)
and isinstance(item, ast.Tuple)
):
for elt in item.elts:
yield from stringify_ast(elt, depth + 2)
elif isinstance(item, ast.AST):
yield from stringify_ast(item, depth + 2)
elif isinstance(value, ast.AST):
yield from stringify_ast(value, depth + 2)
else:
normalized: object
if (
isinstance(node, ast.Constant)
and field == "value"
and isinstance(value, str)
):
# Constant strings may be indented across newlines, if they are
# docstrings; fold spaces after newlines when comparing. Similarly,
# trailing and leading space may be removed.
normalized = _normalize("\n", value)
elif field == "type_comment" and isinstance(value, str):
# Trailing whitespace in type comments is removed.
normalized = value.rstrip()
else:
normalized = value
yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}"
yield f"{' ' * depth}) # /{node.__class__.__name__}"
|