X-Git-Url: https://git.madduck.net/etc/vim.git/blobdiff_plain/b3ceb293d9e69295a190fed93517cbe1b7372154..eb9d0396cd51065c975e366f06dfea60221a2d03:/src/blib2to3/pgen2/driver.py?ds=inline diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index af1dc6b..5edd75b 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -28,19 +28,92 @@ from typing import ( List, Optional, Text, + Iterator, Tuple, + TypeVar, + Generic, Union, ) +from dataclasses import dataclass, field # Pgen imports from . import grammar, parse, token, tokenize, pgen from logging import Logger from blib2to3.pytree import _Convert, NL from blib2to3.pgen2.grammar import Grammar +from contextlib import contextmanager Path = Union[str, "os.PathLike[str]"] +@dataclass +class ReleaseRange: + start: int + end: Optional[int] = None + tokens: List[Any] = field(default_factory=list) + + def lock(self) -> None: + total_eaten = len(self.tokens) + self.end = self.start + total_eaten + + +class TokenProxy: + def __init__(self, generator: Any) -> None: + self._tokens = generator + self._counter = 0 + self._release_ranges: List[ReleaseRange] = [] + + @contextmanager + def release(self) -> Iterator["TokenProxy"]: + release_range = ReleaseRange(self._counter) + self._release_ranges.append(release_range) + try: + yield self + finally: + # Lock the last release range to the final position that + # has been eaten. + release_range.lock() + + def eat(self, point: int) -> Any: + eaten_tokens = self._release_ranges[-1].tokens + if point < len(eaten_tokens): + return eaten_tokens[point] + else: + while point >= len(eaten_tokens): + token = next(self._tokens) + eaten_tokens.append(token) + return token + + def __iter__(self) -> "TokenProxy": + return self + + def __next__(self) -> Any: + # If the current position is already compromised (looked up) + # return the eaten token, if not just go further on the given + # token producer. + for release_range in self._release_ranges: + assert release_range.end is not None + + start, end = release_range.start, release_range.end + if start <= self._counter < end: + token = release_range.tokens[self._counter - start] + break + else: + token = next(self._tokens) + self._counter += 1 + return token + + def can_advance(self, to: int) -> bool: + # Try to eat, fail if it can't. The eat operation is cached + # so there wont be any additional cost of eating here + try: + self.eat(to) + except StopIteration: + return False + else: + return True + + class Driver(object): def __init__( self, @@ -57,14 +130,18 @@ class Driver(object): def parse_tokens(self, tokens: Iterable[Any], debug: bool = False) -> NL: """Parse a series of tokens and return the syntax tree.""" # XXX Move the prefix computation into a wrapper around tokenize. + proxy = TokenProxy(tokens) + p = parse.Parser(self.grammar, self.convert) - p.setup() + p.setup(proxy=proxy) + lineno = 1 column = 0 indent_columns = [] type = value = start = end = line_text = None prefix = "" - for quintuple in tokens: + + for quintuple in proxy: type, value, start, end, line_text = quintuple if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start)