X-Git-Url: https://git.madduck.net/etc/vim.git/blobdiff_plain/eb32729ab562c010d9f644a3bbb2305196b2deb7..b8e2ec728cc09d0f00829a9cffcb54da3efa5760:/src/blib2to3/pgen2/tokenize.py?ds=sidebyside diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index 2d0cc43..1dea89d 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -35,17 +35,13 @@ from typing import ( List, Optional, Set, - Text, Tuple, Pattern, Union, cast, ) -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final +from typing import Final from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar @@ -80,7 +76,7 @@ def maybe(*choices: str) -> str: def _combinations(*l: str) -> Set[str]: - return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) + return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()} Whitespace = r"[ \f\t]*" @@ -192,7 +188,7 @@ Coord = Tuple[int, int] def printtoken( - type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text + type: int, token: str, srow_col: Coord, erow_col: Coord, line: str ) -> None: # for testing (srow, scol) = srow_col (erow, ecol) = erow_col @@ -201,10 +197,10 @@ def printtoken( ) -TokenEater = Callable[[int, Text, Coord, Coord, Text], None] +TokenEater = Callable[[int, str, Coord, Coord, str], None] -def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None: +def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None: """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). @@ -224,17 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) # backwards compatible interface -def tokenize_loop(readline: Callable[[], Text], tokeneater: TokenEater) -> None: +def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None: for token_info in generate_tokens(readline): tokeneater(*token_info) -GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text] +GoodTokenInfo = Tuple[int, str, Coord, Coord, str] TokenInfo = Union[Tuple[int, str], GoodTokenInfo] class Untokenizer: - tokens: List[Text] + tokens: List[str] prev_row: int prev_col: int @@ -250,13 +246,13 @@ class Untokenizer: if col_offset: self.tokens.append(" " * col_offset) - def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: + def untokenize(self, iterable: Iterable[TokenInfo]) -> str: for t in iterable: if len(t) == 2: self.compat(cast(Tuple[int, str], t), iterable) break tok_type, token, start, end, line = cast( - Tuple[int, Text, Coord, Coord, Text], t + Tuple[int, str, Coord, Coord, str], t ) self.add_whitespace(start) self.tokens.append(token) @@ -266,7 +262,7 @@ class Untokenizer: self.prev_col = 0 return "".join(self.tokens) - def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None: + def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None: startline = False indents = [] toks_append = self.tokens.append @@ -338,7 +334,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: try: return readline() except StopIteration: - return bytes() + return b'' def find_cookie(line: bytes) -> Optional[str]: try: @@ -387,7 +383,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: return default, [first, second] -def untokenize(iterable: Iterable[TokenInfo]) -> Text: +def untokenize(iterable: Iterable[TokenInfo]) -> str: """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence @@ -410,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text: def generate_tokens( - readline: Callable[[], Text], grammar: Optional[Grammar] = None + readline: Callable[[], str], grammar: Optional[Grammar] = None ) -> Iterator[GoodTokenInfo]: """ The generate_tokens() generator requires one argument, readline, which