X-Git-Url: https://git.madduck.net/etc/vim.git/blobdiff_plain/28ab82aab013978b7ed91bda816de3d41385f260..068f6fb8fa2b52f647aec8696033e43f6b0db70b:/src/blib2to3/pgen2/parse.py diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index e5dad3a..299cc24 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -9,33 +9,32 @@ See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ -import copy from contextlib import contextmanager - -# Local imports -from . import grammar, token, tokenize from typing import ( - cast, + TYPE_CHECKING, Any, - Optional, - Text, - Union, - Tuple, + Callable, Dict, - List, Iterator, - Callable, + List, + Optional, Set, - TYPE_CHECKING, + Tuple, + Union, + cast, ) + from blib2to3.pgen2.grammar import Grammar -from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node +from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert + +# Local imports +from . import grammar, token, tokenize if TYPE_CHECKING: - from blib2to3.driver import TokenProxy + from blib2to3.pgen2.driver import TokenProxy -Results = Dict[Text, NL] +Results = Dict[str, NL] Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]] DFA = List[List[Tuple[int, int]]] DFAS = Tuple[DFA, Dict[int, int]] @@ -46,6 +45,17 @@ def lam_sub(grammar: Grammar, node: RawNode) -> NL: return Node(type=node[0], children=node[3], context=node[2]) +# A placeholder node, used when parser is backtracking. +DUMMY_NODE = (-1, None, None, None) + + +def stack_copy( + stack: List[Tuple[DFAS, int, RawNode]] +) -> List[Tuple[DFAS, int, RawNode]]: + """Nodeless stack copy.""" + return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack] + + class Recorder: def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None: self.parser = parser @@ -54,7 +64,7 @@ class Recorder: self._dead_ilabels: Set[int] = set() self._start_point = self.parser.stack - self._points = {ilabel: copy.deepcopy(self._start_point) for ilabel in ilabels} + self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels} @property def ilabels(self) -> Set[int]: @@ -62,15 +72,34 @@ class Recorder: @contextmanager def switch_to(self, ilabel: int) -> Iterator[None]: - self.parser.stack = self._points[ilabel] + with self.backtrack(): + self.parser.stack = self._points[ilabel] + try: + yield + except ParseError: + self._dead_ilabels.add(ilabel) + finally: + self.parser.stack = self._start_point + + @contextmanager + def backtrack(self) -> Iterator[None]: + """ + Use the node-level invariant ones for basic parsing operations (push/pop/shift). + These still will operate on the stack; but they won't create any new nodes, or + modify the contents of any other existing nodes. + + This saves us a ton of time when we are backtracking, since we + want to restore to the initial state as quick as possible, which + can only be done by having as little mutatations as possible. + """ + is_backtracking = self.parser.is_backtracking try: + self.parser.is_backtracking = True yield - except ParseError: - self._dead_ilabels.add(ilabel) finally: - self.parser.stack = self._start_point + self.parser.is_backtracking = is_backtracking - def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: + def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None: func: Callable[..., Any] if raw: func = self.parser._addtoken @@ -84,7 +113,9 @@ class Recorder: args.insert(0, ilabel) func(*args) - def determine_route(self, value: Text = None, force: bool = False) -> Optional[int]: + def determine_route( + self, value: Optional[str] = None, force: bool = False + ) -> Optional[int]: alive_ilabels = self.ilabels if len(alive_ilabels) == 0: *_, most_successful_ilabel = self._dead_ilabels @@ -101,10 +132,10 @@ class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__( - self, msg: Text, type: Optional[int], value: Optional[Text], context: Context + self, msg: str, type: Optional[int], value: Optional[str], context: Context ) -> None: Exception.__init__( - self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context) + self, f"{msg}: type={type!r}, value={value!r}, context={context!r}" ) self.msg = msg self.type = type @@ -112,7 +143,7 @@ class ParseError(Exception): self.context = context -class Parser(object): +class Parser: """Parser engine. The proper usage sequence is: @@ -179,6 +210,7 @@ class Parser(object): self.grammar = grammar # See note in docstring above. TL;DR this is ignored. self.convert = convert or lam_sub + self.is_backtracking = False def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None: """Prepare for parsing. @@ -205,7 +237,7 @@ class Parser(object): self.used_names: Set[str] = set() self.proxy = proxy - def addtoken(self, type: int, value: Text, context: Context) -> bool: + def addtoken(self, type: int, value: str, context: Context) -> bool: """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabels = self.classify(type, value, context) @@ -238,6 +270,10 @@ class Parser(object): break next_token_type, next_token_value, *_ = proxy.eat(counter) + if next_token_type in (tokenize.COMMENT, tokenize.NL): + counter += 1 + continue + if next_token_type == tokenize.OP: next_token_type = grammar.opmap[next_token_value] @@ -249,7 +285,7 @@ class Parser(object): return self._addtoken(ilabel, type, value, context) - def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: + def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool: # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] @@ -294,7 +330,7 @@ class Parser(object): # No success finding a transition raise ParseError("bad input", type, value, context) - def classify(self, type: int, value: Text, context: Context) -> List[int]: + def classify(self, type: int, value: str, context: Context) -> List[int]: """Turn a token into a label. (Internal) Depending on whether the value is a soft-keyword or not, @@ -317,30 +353,42 @@ class Parser(object): raise ParseError("bad token", type, value, context) return [ilabel] - def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: + def shift(self, type: int, value: str, newstate: int, context: Context) -> None: """Shift a token. (Internal)""" - dfa, state, node = self.stack[-1] - rawnode: RawNode = (type, value, context, None) - newnode = convert(self.grammar, rawnode) - assert node[-1] is not None - node[-1].append(newnode) - self.stack[-1] = (dfa, newstate, node) + if self.is_backtracking: + dfa, state, _ = self.stack[-1] + self.stack[-1] = (dfa, newstate, DUMMY_NODE) + else: + dfa, state, node = self.stack[-1] + rawnode: RawNode = (type, value, context, None) + newnode = convert(self.grammar, rawnode) + assert node[-1] is not None + node[-1].append(newnode) + self.stack[-1] = (dfa, newstate, node) def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None: """Push a nonterminal. (Internal)""" - dfa, state, node = self.stack[-1] - newnode: RawNode = (type, None, context, []) - self.stack[-1] = (dfa, newstate, node) - self.stack.append((newdfa, 0, newnode)) + if self.is_backtracking: + dfa, state, _ = self.stack[-1] + self.stack[-1] = (dfa, newstate, DUMMY_NODE) + self.stack.append((newdfa, 0, DUMMY_NODE)) + else: + dfa, state, node = self.stack[-1] + newnode: RawNode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) def pop(self) -> None: """Pop a nonterminal. (Internal)""" - popdfa, popstate, popnode = self.stack.pop() - newnode = convert(self.grammar, popnode) - if self.stack: - dfa, state, node = self.stack[-1] - assert node[-1] is not None - node[-1].append(newnode) + if self.is_backtracking: + self.stack.pop() else: - self.rootnode = newnode - self.rootnode.used_names = self.used_names + popdfa, popstate, popnode = self.stack.pop() + newnode = convert(self.grammar, popnode) + if self.stack: + dfa, state, node = self.stack[-1] + assert node[-1] is not None + node[-1].append(newnode) + else: + self.rootnode = newnode + self.rootnode.used_names = self.used_names