]> git.madduck.net Git - etc/vim.git/blobdiff - src/blib2to3/pgen2/driver.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Standardise newlines after module-level docstrings (#3932)
[etc/vim.git] / src / blib2to3 / pgen2 / driver.py
index 1741b33c5103d0719b499309ecd0e6049c11cfa4..e629843f8b986ea792d59a150ff64018d2108bcb 100644 (file)
@@ -17,33 +17,21 @@ __all__ = ["Driver", "load_grammar"]
 
 # Python imports
 import io
-import os
 import logging
+import os
 import pkgutil
 import sys
-from typing import (
-    Any,
-    cast,
-    IO,
-    Iterable,
-    List,
-    Optional,
-    Text,
-    Iterator,
-    Tuple,
-    TypeVar,
-    Generic,
-    Union,
-)
 from contextlib import contextmanager
 from dataclasses import dataclass, field
-
-# Pgen imports
-from . import grammar, parse, token, tokenize, pgen
 from logging import Logger
-from blib2to3.pytree import NL
+from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast
+
 from blib2to3.pgen2.grammar import Grammar
 from blib2to3.pgen2.tokenize import GoodTokenInfo
+from blib2to3.pytree import NL
+
+# Pgen imports
+from . import grammar, parse, pgen, token, tokenize
 
 Path = Union[str, "os.PathLike[str]"]
 
@@ -116,7 +104,7 @@ class TokenProxy:
             return True
 
 
-class Driver(object):
+class Driver:
     def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
         self.grammar = grammar
         if logger is None:
@@ -189,30 +177,30 @@ class Driver(object):
         assert p.rootnode is not None
         return p.rootnode
 
-    def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL:
+    def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
         """Parse a stream and return the syntax tree."""
         tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
         return self.parse_tokens(tokens, debug)
 
-    def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL:
+    def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
         """Parse a stream and return the syntax tree."""
         return self.parse_stream_raw(stream, debug)
 
     def parse_file(
-        self, filename: Path, encoding: Optional[Text] = None, debug: bool = False
+        self, filename: Path, encoding: Optional[str] = None, debug: bool = False
     ) -> NL:
         """Parse a file and return the syntax tree."""
-        with io.open(filename, "r", encoding=encoding) as stream:
+        with open(filename, encoding=encoding) as stream:
             return self.parse_stream(stream, debug)
 
-    def parse_string(self, text: Text, debug: bool = False) -> NL:
+    def parse_string(self, text: str, debug: bool = False) -> NL:
         """Parse a string and return the syntax tree."""
         tokens = tokenize.generate_tokens(
             io.StringIO(text).readline, grammar=self.grammar
         )
         return self.parse_tokens(tokens, debug)
 
-    def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]:
+    def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
         lines: List[str] = []
         current_line = ""
         current_column = 0
@@ -240,7 +228,7 @@ class Driver(object):
         return "".join(lines), current_line
 
 
-def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
+def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str:
     head, tail = os.path.splitext(gt)
     if tail == ".txt":
         tail = ""
@@ -252,8 +240,8 @@ def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
 
 
 def load_grammar(
-    gt: Text = "Grammar.txt",
-    gp: Optional[Text] = None,
+    gt: str = "Grammar.txt",
+    gp: Optional[str] = None,
     save: bool = True,
     force: bool = False,
     logger: Optional[Logger] = None,
@@ -276,7 +264,7 @@ def load_grammar(
     return g
 
 
-def _newer(a: Text, b: Text) -> bool:
+def _newer(a: str, b: str) -> bool:
     """Inquire whether file a was written since file b."""
     if not os.path.exists(a):
         return False
@@ -286,7 +274,7 @@ def _newer(a: Text, b: Text) -> bool:
 
 
 def load_packaged_grammar(
-    package: str, grammar_source: Text, cache_dir: Optional[Path] = None
+    package: str, grammar_source: str, cache_dir: Optional[Path] = None
 ) -> grammar.Grammar:
     """Normally, loads a pickled grammar by doing
         pkgutil.get_data(package, pickled_grammar)
@@ -309,7 +297,7 @@ def load_packaged_grammar(
     return g
 
 
-def main(*args: Text) -> bool:
+def main(*args: str) -> bool:
     """Main program, when run as a script: produce grammar pickle files.
 
     Calls load_grammar for each argument, a path to a grammar text file.