X-Git-Url: https://git.madduck.net/etc/vim.git/blobdiff_plain/2082a325fdd14f0aabd88f7f12a20f9fb085c538..cd02c2809b193e17aa7c43f8dd0fae4695898184:/src/blib2to3/pgen2/tokenize.py?ds=sidebyside diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index bad79b2..2d0cc43 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -27,25 +27,33 @@ are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" +import sys from typing import ( Callable, Iterable, Iterator, List, Optional, + Set, Text, Tuple, Pattern, Union, cast, ) + +if sys.version_info >= (3, 8): + from typing import Final +else: + from typing_extensions import Final + from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar __author__ = "Ka-Ping Yee " __credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro" -import regex as re +import re from codecs import BOM_UTF8, lookup from blib2to3.pgen2.token import * @@ -59,19 +67,19 @@ __all__ = [x for x in dir(token) if x[0] != "_"] + [ del token -def group(*choices): +def group(*choices: str) -> str: return "(" + "|".join(choices) + ")" -def any(*choices): +def any(*choices: str) -> str: return group(*choices) + "*" -def maybe(*choices): +def maybe(*choices: str) -> str: return group(*choices) + "?" -def _combinations(*l): +def _combinations(*l: str) -> Set[str]: return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) @@ -79,7 +87,7 @@ Whitespace = r"[ \f\t]*" Comment = r"#[^\r\n]*" Ignore = Whitespace + any(r"\\\r?\n" + Whitespace) + maybe(Comment) Name = ( # this is invalid but it's fine because Name comes after Number in all groups - r"\w+" + r"[^\s#\(\)\[\]\{\}+\-*/!@$%^&=|;:'\",\.<>/?`~\\]+" ) Binnumber = r"0[bB]_?[01]+(?:_[01]+)*" @@ -139,7 +147,7 @@ ContStr = group( PseudoExtras = group(r"\\\r?\n", Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) -pseudoprog = re.compile(PseudoToken, re.UNICODE) +pseudoprog: Final = re.compile(PseudoToken, re.UNICODE) single3prog = re.compile(Single3) double3prog = re.compile(Double3) @@ -149,22 +157,21 @@ _strprefixes = ( | {"u", "U", "ur", "uR", "Ur", "UR"} ) -endprogs = { +endprogs: Final = { "'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, '"""': double3prog, **{f"{prefix}'''": single3prog for prefix in _strprefixes}, **{f'{prefix}"""': double3prog for prefix in _strprefixes}, - **{prefix: None for prefix in _strprefixes}, } -triple_quoted = ( +triple_quoted: Final = ( {"'''", '"""'} | {f"{prefix}'''" for prefix in _strprefixes} | {f'{prefix}"""' for prefix in _strprefixes} ) -single_quoted = ( +single_quoted: Final = ( {"'", '"'} | {f"{prefix}'" for prefix in _strprefixes} | {f'{prefix}"' for prefix in _strprefixes} @@ -181,15 +188,19 @@ class StopTokenizing(Exception): pass -def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing - (srow, scol) = xxx_todo_changeme - (erow, ecol) = xxx_todo_changeme1 +Coord = Tuple[int, int] + + +def printtoken( + type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text +) -> None: # for testing + (srow, scol) = srow_col + (erow, ecol) = erow_col print( "%d,%d-%d,%d:\t%s\t%s" % (srow, scol, erow, ecol, tok_name[type], repr(token)) ) -Coord = Tuple[int, int] TokenEater = Callable[[int, Text, Coord, Coord, Text], None] @@ -213,7 +224,7 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) # backwards compatible interface -def tokenize_loop(readline, tokeneater): +def tokenize_loop(readline: Callable[[], Text], tokeneater: TokenEater) -> None: for token_info in generate_tokens(readline): tokeneater(*token_info) @@ -223,7 +234,6 @@ TokenInfo = Union[Tuple[int, str], GoodTokenInfo] class Untokenizer: - tokens: List[Text] prev_row: int prev_col: int @@ -286,7 +296,7 @@ class Untokenizer: cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)", re.ASCII) -blank_re = re.compile(br"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) +blank_re = re.compile(rb"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) def _get_normal_name(orig_enc: str) -> str: @@ -418,7 +428,7 @@ def generate_tokens( logical line; continuation lines are included. """ lnum = parenlev = continued = 0 - numchars = "0123456789" + numchars: Final[str] = "0123456789" contstr, needcont = "", 0 contline: Optional[str] = None indents = [0] @@ -427,7 +437,7 @@ def generate_tokens( # `await` as keywords. async_keywords = False if grammar is None else grammar.async_keywords # 'stashed' and 'async_*' are used for async/await parsing - stashed = None + stashed: Optional[GoodTokenInfo] = None async_def = False async_def_indent = 0 async_def_nl = False @@ -440,7 +450,7 @@ def generate_tokens( line = readline() except StopIteration: line = "" - lnum = lnum + 1 + lnum += 1 pos, max = 0, len(line) if contstr: # continued string @@ -481,14 +491,14 @@ def generate_tokens( column = 0 while pos < max: # measure leading whitespace if line[pos] == " ": - column = column + 1 + column += 1 elif line[pos] == "\t": column = (column // tabsize + 1) * tabsize elif line[pos] == "\f": column = 0 else: break - pos = pos + 1 + pos += 1 if pos == max: break @@ -507,7 +517,7 @@ def generate_tokens( COMMENT, comment_token, (lnum, pos), - (lnum, pos + len(comment_token)), + (lnum, nl_pos), line, ) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) @@ -592,11 +602,15 @@ def generate_tokens( ): if token[-1] == "\n": # continued string strstart = (lnum, start) - endprog = ( - endprogs[initial] - or endprogs[token[1]] - or endprogs[token[2]] + maybe_endprog = ( + endprogs.get(initial) + or endprogs.get(token[1]) + or endprogs.get(token[2]) ) + assert ( + maybe_endprog is not None + ), f"endprog not found for {token}" + endprog = maybe_endprog contstr, needcont = line[start:], 1 contline = line break @@ -624,7 +638,6 @@ def generate_tokens( if token in ("def", "for"): if stashed and stashed[0] == NAME and stashed[1] == "async": - if token == "def": async_def = True async_def_indent = indents[-1] @@ -652,16 +665,16 @@ def generate_tokens( continued = 1 else: if initial in "([{": - parenlev = parenlev + 1 + parenlev += 1 elif initial in ")]}": - parenlev = parenlev - 1 + parenlev -= 1 if stashed: yield stashed stashed = None yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) - pos = pos + 1 + pos += 1 if stashed: yield stashed