TYPE_CHECKING,
)
from blib2to3.pgen2.grammar import Grammar
-from blib2to3.pytree import NL, Context, RawNode, Leaf, Node
+from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node
if TYPE_CHECKING:
from blib2to3.driver import TokenProxy
return Node(type=node[0], children=node[3], context=node[2])
+# A placeholder node, used when parser is backtracking.
+DUMMY_NODE = (-1, None, None, None)
+
+
+def stack_copy(
+ stack: List[Tuple[DFAS, int, RawNode]]
+) -> List[Tuple[DFAS, int, RawNode]]:
+ """Nodeless stack copy."""
+ return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
+
+
class Recorder:
def __init__(self, parser: "Parser", ilabels: List[int], context: Context) -> None:
self.parser = parser
self.context = context # not really matter
self._dead_ilabels: Set[int] = set()
- self._start_point = copy.deepcopy(self.parser.stack)
- self._points = {ilabel: copy.deepcopy(self._start_point) for ilabel in ilabels}
+ self._start_point = self.parser.stack
+ self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
@property
def ilabels(self) -> Set[int]:
@contextmanager
def switch_to(self, ilabel: int) -> Iterator[None]:
- self.parser.stack = self._points[ilabel]
+ with self.backtrack():
+ self.parser.stack = self._points[ilabel]
+ try:
+ yield
+ except ParseError:
+ self._dead_ilabels.add(ilabel)
+ finally:
+ self.parser.stack = self._start_point
+
+ @contextmanager
+ def backtrack(self) -> Iterator[None]:
+ """
+ Use the node-level invariant ones for basic parsing operations (push/pop/shift).
+ These still will operate on the stack; but they won't create any new nodes, or
+ modify the contents of any other existing nodes.
+
+ This saves us a ton of time when we are backtracking, since we
+ want to restore to the initial state as quick as possible, which
+ can only be done by having as little mutatations as possible.
+ """
+ is_backtracking = self.parser.is_backtracking
try:
+ self.parser.is_backtracking = True
yield
- except ParseError:
- self._dead_ilabels.add(ilabel)
finally:
- self.parser.stack = self._start_point
+ self.parser.is_backtracking = is_backtracking
- def add_token(
- self, tok_type: int, tok_val: Optional[Text], raw: bool = False
- ) -> None:
+ def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
args.insert(0, ilabel)
func(*args)
- def determine_route(
- self, value: Optional[Text] = None, force: bool = False
- ) -> Optional[int]:
+ def determine_route(self, value: Text = None, force: bool = False) -> Optional[int]:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
to be converted. The syntax tree is converted from the bottom
up.
+ **post-note: the convert argument is ignored since for Black's
+ usage, convert will always be blib2to3.pytree.convert. Allowing
+ this to be dynamic hurts mypyc's ability to use early binding.
+ These docs are left for historical and informational value.
+
A concrete syntax tree node is a (type, value, context, nodes)
tuple, where type is the node type (a token or symbol number),
value is None for symbols and a string for tokens, context is
"""
self.grammar = grammar
+ # See note in docstring above. TL;DR this is ignored.
self.convert = convert or lam_sub
+ self.is_backtracking = False
def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
"""Prepare for parsing.
self.used_names: Set[str] = set()
self.proxy = proxy
- def addtoken(self, type: int, value: Optional[Text], context: Context) -> bool:
+ def addtoken(self, type: int, value: Text, context: Context) -> bool:
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
break
next_token_type, next_token_value, *_ = proxy.eat(counter)
+ if next_token_type in (tokenize.COMMENT, tokenize.NL):
+ counter += 1
+ continue
+
if next_token_type == tokenize.OP:
- next_token_type = grammar.opmap[cast(str, next_token_value)]
+ next_token_type = grammar.opmap[next_token_value]
recorder.add_token(next_token_type, next_token_value)
counter += 1
return self._addtoken(ilabel, type, value, context)
- def _addtoken(
- self, ilabel: int, type: int, value: Optional[Text], context: Context
- ) -> bool:
+ def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
- t, v = self.grammar.labels[i]
- if ilabel == i:
+ t = self.grammar.labels[i][0]
+ if t >= 256:
+ # See if it's a symbol and if we're in its first set
+ itsdfa = self.grammar.dfas[t]
+ itsstates, itsfirst = itsdfa
+ if ilabel in itsfirst:
+ # Push a symbol
+ self.push(t, itsdfa, newstate, context)
+ break # To continue the outer while loop
+
+ elif ilabel == i:
# Look it up in the list of labels
- assert t < 256
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
states, first = dfa
# Done with this token
return False
- elif t >= 256:
- # See if it's a symbol and if we're in its first set
- itsdfa = self.grammar.dfas[t]
- itsstates, itsfirst = itsdfa
- if ilabel in itsfirst:
- # Push a symbol
- self.push(t, self.grammar.dfas[t], newstate, context)
- break # To continue the outer while loop
+
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
# No success finding a transition
raise ParseError("bad input", type, value, context)
- def classify(self, type: int, value: Optional[Text], context: Context) -> List[int]:
+ def classify(self, type: int, value: Text, context: Context) -> List[int]:
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
this function may return multiple labels to choose from."""
if type == token.NAME:
# Keep a listing of all used names
- assert value is not None
self.used_names.add(value)
# Check for reserved words
if value in self.grammar.keywords:
raise ParseError("bad token", type, value, context)
return [ilabel]
- def shift(
- self, type: int, value: Optional[Text], newstate: int, context: Context
- ) -> None:
+ def shift(self, type: int, value: Text, newstate: int, context: Context) -> None:
"""Shift a token. (Internal)"""
- dfa, state, node = self.stack[-1]
- assert value is not None
- assert context is not None
- rawnode: RawNode = (type, value, context, None)
- newnode = self.convert(self.grammar, rawnode)
- if newnode is not None:
+ if self.is_backtracking:
+ dfa, state, _ = self.stack[-1]
+ self.stack[-1] = (dfa, newstate, DUMMY_NODE)
+ else:
+ dfa, state, node = self.stack[-1]
+ rawnode: RawNode = (type, value, context, None)
+ newnode = convert(self.grammar, rawnode)
assert node[-1] is not None
node[-1].append(newnode)
- self.stack[-1] = (dfa, newstate, node)
+ self.stack[-1] = (dfa, newstate, node)
def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None:
"""Push a nonterminal. (Internal)"""
- dfa, state, node = self.stack[-1]
- newnode: RawNode = (type, None, context, [])
- self.stack[-1] = (dfa, newstate, node)
- self.stack.append((newdfa, 0, newnode))
+ if self.is_backtracking:
+ dfa, state, _ = self.stack[-1]
+ self.stack[-1] = (dfa, newstate, DUMMY_NODE)
+ self.stack.append((newdfa, 0, DUMMY_NODE))
+ else:
+ dfa, state, node = self.stack[-1]
+ newnode: RawNode = (type, None, context, [])
+ self.stack[-1] = (dfa, newstate, node)
+ self.stack.append((newdfa, 0, newnode))
def pop(self) -> None:
"""Pop a nonterminal. (Internal)"""
- popdfa, popstate, popnode = self.stack.pop()
- newnode = self.convert(self.grammar, popnode)
- if newnode is not None:
+ if self.is_backtracking:
+ self.stack.pop()
+ else:
+ popdfa, popstate, popnode = self.stack.pop()
+ newnode = convert(self.grammar, popnode)
if self.stack:
dfa, state, node = self.stack[-1]
assert node[-1] is not None