]>
git.madduck.net Git - etc/vim.git/blobdiff - src/blib2to3/pgen2/driver.py
madduck's git repository
Every one of the projects in this repository is available at the canonical
URL git://git.madduck.net/madduck/pub/<projectpath> — see
each project's metadata for the exact URL.
All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@ git. madduck. net .
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
SSH access, as well as push access can be individually
arranged .
If you use my repositories frequently, consider adding the following
snippet to ~/.gitconfig and using the third clone URL listed for each
project:
[url "git://git.madduck.net/madduck/"]
insteadOf = madduck:
import sys
from typing import (
Any,
import sys
from typing import (
Any,
IO,
Iterable,
List,
Optional,
IO,
Iterable,
List,
Optional,
+from contextlib import contextmanager
from dataclasses import dataclass, field
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
from logging import Logger
from dataclasses import dataclass, field
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
from logging import Logger
-from blib2to3.pytree import _Convert, NL
+from blib2to3.pytree import NL
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.grammar import Grammar
-from contextlib import contextmanager
+from blib2to3.pgen2.tokenize import GoodTokenInfo
Path = Union[str, "os.PathLike[str]"]
Path = Union[str, "os.PathLike[str]"]
def can_advance(self, to: int) -> bool:
# Try to eat, fail if it can't. The eat operation is cached
def can_advance(self, to: int) -> bool:
# Try to eat, fail if it can't. The eat operation is cached
- # so there wont be any additional cost of eating here
+ # so there won' t be any additional cost of eating here
try:
self.eat(to)
except StopIteration:
try:
self.eat(to)
except StopIteration:
-class Driver(object):
- def __init__(
- self,
- grammar: Grammar,
- convert: Optional[_Convert] = None,
- logger: Optional[Logger] = None,
- ) -> None:
+class Driver:
+ def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
self.grammar = grammar
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
self.grammar = grammar
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
- def parse_tokens(self, tokens: Iterable[Any ], debug: bool = False) -> NL:
+ def parse_tokens(self, tokens: Iterable[GoodTokenInfo ], debug: bool = False) -> NL:
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
proxy = TokenProxy(tokens)
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
proxy = TokenProxy(tokens)
- p = parse.Parser(self.grammar, self.convert )
+ p = parse.Parser(self.grammar)
p.setup(proxy=proxy)
lineno = 1
column = 0
p.setup(proxy=proxy)
lineno = 1
column = 0
+ indent_columns: List[int] = []
type = value = start = end = line_text = None
prefix = ""
type = value = start = end = line_text = None
prefix = ""
if type == token.OP:
type = grammar.opmap[value]
if debug:
if type == token.OP:
type = grammar.opmap[value]
if debug:
+ assert type is not None
self.logger.debug(
"%s %r (prefix=%r)", token.tok_name[type], value, prefix
)
self.logger.debug(
"%s %r (prefix=%r)", token.tok_name[type], value, prefix
)
elif type == token.DEDENT:
_indent_col = indent_columns.pop()
prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col)
elif type == token.DEDENT:
_indent_col = indent_columns.pop()
prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col)
- if p.addtoken(type , value, (prefix, start)):
+ if p.addtoken(cast(int, type) , value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
if debug:
self.logger.debug("Stop.")
break
assert p.rootnode is not None
return p.rootnode
assert p.rootnode is not None
return p.rootnode
- def parse_stream_raw(self, stream: IO[Text ], debug: bool = False) -> NL:
+ def parse_stream_raw(self, stream: IO[str ], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
- def parse_stream(self, stream: IO[Text ], debug: bool = False) -> NL:
+ def parse_stream(self, stream: IO[str ], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(
- self, filename: Path, encoding: Optional[Text ] = None, debug: bool = False
+ self, filename: Path, encoding: Optional[str ] = None, debug: bool = False
) -> NL:
"""Parse a file and return the syntax tree."""
) -> NL:
"""Parse a file and return the syntax tree."""
- with io.open(filename, "r" , encoding=encoding) as stream:
+ with open(filename , encoding=encoding) as stream:
return self.parse_stream(stream, debug)
return self.parse_stream(stream, debug)
- def parse_string(self, text: Text , debug: bool = False) -> NL:
+ def parse_string(self, text: str , debug: bool = False) -> NL:
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar
)
return self.parse_tokens(tokens, debug)
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar
)
return self.parse_tokens(tokens, debug)
- def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text ]:
+ def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str ]:
lines: List[str] = []
current_line = ""
current_column = 0
lines: List[str] = []
current_line = ""
current_column = 0
return "".join(lines), current_line
return "".join(lines), current_line
-def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text :
+def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str :
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
- gt: Text = "Grammar.txt",
- gp: Optional[Text ] = None,
+ gt: str = "Grammar.txt",
+ gp: Optional[str ] = None,
save: bool = True,
force: bool = False,
logger: Optional[Logger] = None,
save: bool = True,
force: bool = False,
logger: Optional[Logger] = None,
logger = logging.getLogger(__name__)
gp = _generate_pickle_name(gt) if gp is None else gp
if force or not _newer(gp, gt):
logger = logging.getLogger(__name__)
gp = _generate_pickle_name(gt) if gp is None else gp
if force or not _newer(gp, gt):
- logger.info("Generating grammar tables from %s", gt)
g: grammar.Grammar = pgen.generate_grammar(gt)
if save:
g: grammar.Grammar = pgen.generate_grammar(gt)
if save:
- logger.info("Writing grammar tables to %s", gp)
- except OSError as e:
- logger.info("Writing failed: %s", e)
+ except OSError:
+ # Ignore error, caching is not vital.
+ pass
else:
g = grammar.Grammar()
g.load(gp)
return g
else:
g = grammar.Grammar()
g.load(gp)
return g
-def _newer(a: Text, b: Text ) -> bool:
+def _newer(a: str, b: str ) -> bool:
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
def load_packaged_grammar(
def load_packaged_grammar(
- package: str, grammar_source: Text , cache_dir: Optional[Path] = None
+ package: str, grammar_source: str , cache_dir: Optional[Path] = None
) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
-def main(*args: Text ) -> bool:
+def main(*args: str ) -> bool:
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.