]> git.madduck.net Git - etc/vim.git/blobdiff - src/blib2to3/pgen2/tokenize.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Maintainers += Shantanu Jain (hauntsaninja) (#3792)
[etc/vim.git] / src / blib2to3 / pgen2 / tokenize.py
index a5e89188d873c2cfdeafd804833a3079d07a59cb..1dea89d7bb89c97914bbe0d21b82ffc113418296 100644 (file)
@@ -35,7 +35,6 @@ from typing import (
     List,
     Optional,
     Set,
-    Text,
     Tuple,
     Pattern,
     Union,
@@ -77,7 +76,7 @@ def maybe(*choices: str) -> str:
 
 
 def _combinations(*l: str) -> Set[str]:
-    return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold())
+    return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()}
 
 
 Whitespace = r"[ \f\t]*"
@@ -189,7 +188,7 @@ Coord = Tuple[int, int]
 
 
 def printtoken(
-    type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text
+    type: int, token: str, srow_col: Coord, erow_col: Coord, line: str
 ) -> None:  # for testing
     (srow, scol) = srow_col
     (erow, ecol) = erow_col
@@ -198,10 +197,10 @@ def printtoken(
     )
 
 
-TokenEater = Callable[[int, Text, Coord, Coord, Text], None]
+TokenEater = Callable[[int, str, Coord, Coord, str], None]
 
 
-def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None:
+def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None:
     """
     The tokenize() function accepts two parameters: one representing the
     input stream, and one providing an output mechanism for tokenize().
@@ -221,17 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken)
 
 
 # backwards compatible interface
-def tokenize_loop(readline: Callable[[], Text], tokeneater: TokenEater) -> None:
+def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None:
     for token_info in generate_tokens(readline):
         tokeneater(*token_info)
 
 
-GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text]
+GoodTokenInfo = Tuple[int, str, Coord, Coord, str]
 TokenInfo = Union[Tuple[int, str], GoodTokenInfo]
 
 
 class Untokenizer:
-    tokens: List[Text]
+    tokens: List[str]
     prev_row: int
     prev_col: int
 
@@ -247,13 +246,13 @@ class Untokenizer:
         if col_offset:
             self.tokens.append(" " * col_offset)
 
-    def untokenize(self, iterable: Iterable[TokenInfo]) -> Text:
+    def untokenize(self, iterable: Iterable[TokenInfo]) -> str:
         for t in iterable:
             if len(t) == 2:
                 self.compat(cast(Tuple[int, str], t), iterable)
                 break
             tok_type, token, start, end, line = cast(
-                Tuple[int, Text, Coord, Coord, Text], t
+                Tuple[int, str, Coord, Coord, str], t
             )
             self.add_whitespace(start)
             self.tokens.append(token)
@@ -263,7 +262,7 @@ class Untokenizer:
                 self.prev_col = 0
         return "".join(self.tokens)
 
-    def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None:
+    def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None:
         startline = False
         indents = []
         toks_append = self.tokens.append
@@ -335,7 +334,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]:
         try:
             return readline()
         except StopIteration:
-            return bytes()
+            return b''
 
     def find_cookie(line: bytes) -> Optional[str]:
         try:
@@ -384,7 +383,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]:
     return default, [first, second]
 
 
-def untokenize(iterable: Iterable[TokenInfo]) -> Text:
+def untokenize(iterable: Iterable[TokenInfo]) -> str:
     """Transform tokens back into Python source code.
 
     Each element returned by the iterable must be a token sequence
@@ -407,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text:
 
 
 def generate_tokens(
-    readline: Callable[[], Text], grammar: Optional[Grammar] = None
+    readline: Callable[[], str], grammar: Optional[Grammar] = None
 ) -> Iterator[GoodTokenInfo]:
     """
     The generate_tokens() generator requires one argument, readline, which