X-Git-Url: https://git.madduck.net/etc/vim.git/blobdiff_plain/a07871b9cd1b5a1469271be6aaac5766f5a0e0fc..193ee766ca496871f93621d6b58d57a6564ff81b:/src/blib2to3/pgen2/tokenize.py diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index 82ac513..1dea89d 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -34,17 +34,14 @@ from typing import ( Iterator, List, Optional, - Text, + Set, Tuple, Pattern, Union, cast, ) -if sys.version_info >= (3, 8): - from typing import Final -else: - from typing_extensions import Final +from typing import Final from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar @@ -66,20 +63,20 @@ __all__ = [x for x in dir(token) if x[0] != "_"] + [ del token -def group(*choices): +def group(*choices: str) -> str: return "(" + "|".join(choices) + ")" -def any(*choices): +def any(*choices: str) -> str: return group(*choices) + "*" -def maybe(*choices): +def maybe(*choices: str) -> str: return group(*choices) + "?" -def _combinations(*l): - return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) +def _combinations(*l: str) -> Set[str]: + return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()} Whitespace = r"[ \f\t]*" @@ -187,19 +184,23 @@ class StopTokenizing(Exception): pass -def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing - (srow, scol) = xxx_todo_changeme - (erow, ecol) = xxx_todo_changeme1 +Coord = Tuple[int, int] + + +def printtoken( + type: int, token: str, srow_col: Coord, erow_col: Coord, line: str +) -> None: # for testing + (srow, scol) = srow_col + (erow, ecol) = erow_col print( "%d,%d-%d,%d:\t%s\t%s" % (srow, scol, erow, ecol, tok_name[type], repr(token)) ) -Coord = Tuple[int, int] -TokenEater = Callable[[int, Text, Coord, Coord, Text], None] +TokenEater = Callable[[int, str, Coord, Coord, str], None] -def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None: +def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None: """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). @@ -219,18 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) # backwards compatible interface -def tokenize_loop(readline, tokeneater): +def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None: for token_info in generate_tokens(readline): tokeneater(*token_info) -GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text] +GoodTokenInfo = Tuple[int, str, Coord, Coord, str] TokenInfo = Union[Tuple[int, str], GoodTokenInfo] class Untokenizer: - - tokens: List[Text] + tokens: List[str] prev_row: int prev_col: int @@ -246,13 +246,13 @@ class Untokenizer: if col_offset: self.tokens.append(" " * col_offset) - def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: + def untokenize(self, iterable: Iterable[TokenInfo]) -> str: for t in iterable: if len(t) == 2: self.compat(cast(Tuple[int, str], t), iterable) break tok_type, token, start, end, line = cast( - Tuple[int, Text, Coord, Coord, Text], t + Tuple[int, str, Coord, Coord, str], t ) self.add_whitespace(start) self.tokens.append(token) @@ -262,7 +262,7 @@ class Untokenizer: self.prev_col = 0 return "".join(self.tokens) - def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None: + def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None: startline = False indents = [] toks_append = self.tokens.append @@ -334,7 +334,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: try: return readline() except StopIteration: - return bytes() + return b'' def find_cookie(line: bytes) -> Optional[str]: try: @@ -383,7 +383,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: return default, [first, second] -def untokenize(iterable: Iterable[TokenInfo]) -> Text: +def untokenize(iterable: Iterable[TokenInfo]) -> str: """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence @@ -406,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text: def generate_tokens( - readline: Callable[[], Text], grammar: Optional[Grammar] = None + readline: Callable[[], str], grammar: Optional[Grammar] = None ) -> Iterator[GoodTokenInfo]: """ The generate_tokens() generator requires one argument, readline, which @@ -603,7 +603,9 @@ def generate_tokens( or endprogs.get(token[1]) or endprogs.get(token[2]) ) - assert maybe_endprog is not None, f"endprog not found for {token}" + assert ( + maybe_endprog is not None + ), f"endprog not found for {token}" endprog = maybe_endprog contstr, needcont = line[start:], 1 contline = line @@ -632,7 +634,6 @@ def generate_tokens( if token in ("def", "for"): if stashed and stashed[0] == NAME and stashed[1] == "async": - if token == "def": async_def = True async_def_indent = indents[-1]