From: Shantanu <12621235+hauntsaninja@users.noreply.github.com> Date: Sun, 9 Jul 2023 22:05:01 +0000 (-0700) Subject: Run pyupgrade on blib2to3 and src (#3771) X-Git-Url: https://git.madduck.net/etc/vim.git/commitdiff_plain/0b4d7d55f78913be9e0a3738681ef3aafd5d9a5a?ds=sidebyside Run pyupgrade on blib2to3 and src (#3771) --- diff --git a/src/black/files.py b/src/black/files.py index 65b2d0a..4e2209e 100644 --- a/src/black/files.py +++ b/src/black/files.py @@ -42,7 +42,7 @@ if TYPE_CHECKING: import colorama # noqa: F401 -@lru_cache() +@lru_cache def find_project_root( srcs: Sequence[str], stdin_filename: Optional[str] = None ) -> Tuple[Path, str]: @@ -212,7 +212,7 @@ def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet: return SpecifierSet(",".join(str(s) for s in specifiers)) -@lru_cache() +@lru_cache def find_user_pyproject_toml() -> Path: r"""Return the path to the top-level user configuration for black. @@ -232,7 +232,7 @@ def find_user_pyproject_toml() -> Path: return user_config_path.resolve() -@lru_cache() +@lru_cache def get_gitignore(root: Path) -> PathSpec: """Return a PathSpec matching gitignore content if present.""" gitignore = root / ".gitignore" diff --git a/src/black/handle_ipynb_magics.py b/src/black/handle_ipynb_magics.py index 4324819..2a2d622 100644 --- a/src/black/handle_ipynb_magics.py +++ b/src/black/handle_ipynb_magics.py @@ -55,7 +55,7 @@ class Replacement: src: str -@lru_cache() +@lru_cache def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool: try: # isort: off diff --git a/src/blib2to3/pgen2/conv.py b/src/blib2to3/pgen2/conv.py index fa9825e..04eccfa 100644 --- a/src/blib2to3/pgen2/conv.py +++ b/src/blib2to3/pgen2/conv.py @@ -63,7 +63,7 @@ class Converter(grammar.Grammar): try: f = open(filename) except OSError as err: - print("Can't open %s: %s" % (filename, err)) + print(f"Can't open {filename}: {err}") return False self.symbol2number = {} self.number2symbol = {} @@ -72,7 +72,7 @@ class Converter(grammar.Grammar): lineno += 1 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): - print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) + print(f"{filename}({lineno}): can't parse {line.strip()}") else: symbol, number = mo.groups() number = int(number) @@ -113,7 +113,7 @@ class Converter(grammar.Grammar): try: f = open(filename) except OSError as err: - print("Can't open %s: %s" % (filename, err)) + print(f"Can't open {filename}: {err}") return False # The code below essentially uses f's iterator-ness! lineno = 0 diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index 1741b33..bb73016 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -28,11 +28,8 @@ from typing import ( Iterable, List, Optional, - Text, Iterator, Tuple, - TypeVar, - Generic, Union, ) from contextlib import contextmanager @@ -116,7 +113,7 @@ class TokenProxy: return True -class Driver(object): +class Driver: def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None: self.grammar = grammar if logger is None: @@ -189,30 +186,30 @@ class Driver(object): assert p.rootnode is not None return p.rootnode - def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL: + def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL: """Parse a stream and return the syntax tree.""" tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar) return self.parse_tokens(tokens, debug) - def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL: + def parse_stream(self, stream: IO[str], debug: bool = False) -> NL: """Parse a stream and return the syntax tree.""" return self.parse_stream_raw(stream, debug) def parse_file( - self, filename: Path, encoding: Optional[Text] = None, debug: bool = False + self, filename: Path, encoding: Optional[str] = None, debug: bool = False ) -> NL: """Parse a file and return the syntax tree.""" - with io.open(filename, "r", encoding=encoding) as stream: + with open(filename, encoding=encoding) as stream: return self.parse_stream(stream, debug) - def parse_string(self, text: Text, debug: bool = False) -> NL: + def parse_string(self, text: str, debug: bool = False) -> NL: """Parse a string and return the syntax tree.""" tokens = tokenize.generate_tokens( io.StringIO(text).readline, grammar=self.grammar ) return self.parse_tokens(tokens, debug) - def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]: + def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]: lines: List[str] = [] current_line = "" current_column = 0 @@ -240,7 +237,7 @@ class Driver(object): return "".join(lines), current_line -def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text: +def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str: head, tail = os.path.splitext(gt) if tail == ".txt": tail = "" @@ -252,8 +249,8 @@ def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text: def load_grammar( - gt: Text = "Grammar.txt", - gp: Optional[Text] = None, + gt: str = "Grammar.txt", + gp: Optional[str] = None, save: bool = True, force: bool = False, logger: Optional[Logger] = None, @@ -276,7 +273,7 @@ def load_grammar( return g -def _newer(a: Text, b: Text) -> bool: +def _newer(a: str, b: str) -> bool: """Inquire whether file a was written since file b.""" if not os.path.exists(a): return False @@ -286,7 +283,7 @@ def _newer(a: Text, b: Text) -> bool: def load_packaged_grammar( - package: str, grammar_source: Text, cache_dir: Optional[Path] = None + package: str, grammar_source: str, cache_dir: Optional[Path] = None ) -> grammar.Grammar: """Normally, loads a pickled grammar by doing pkgutil.get_data(package, pickled_grammar) @@ -309,7 +306,7 @@ def load_packaged_grammar( return g -def main(*args: Text) -> bool: +def main(*args: str) -> bool: """Main program, when run as a script: produce grammar pickle files. Calls load_grammar for each argument, a path to a grammar text file. diff --git a/src/blib2to3/pgen2/grammar.py b/src/blib2to3/pgen2/grammar.py index 337a64f..1f3fdc5 100644 --- a/src/blib2to3/pgen2/grammar.py +++ b/src/blib2to3/pgen2/grammar.py @@ -16,19 +16,19 @@ fallback token code OP, but the parser needs the actual token code. import os import pickle import tempfile -from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar, Union +from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union # Local imports from . import token _P = TypeVar("_P", bound="Grammar") -Label = Tuple[int, Optional[Text]] +Label = Tuple[int, Optional[str]] DFA = List[List[Tuple[int, int]]] DFAS = Tuple[DFA, Dict[int, int]] Path = Union[str, "os.PathLike[str]"] -class Grammar(object): +class Grammar: """Pgen parsing tables conversion class. Once initialized, this class supplies the grammar tables for the diff --git a/src/blib2to3/pgen2/literals.py b/src/blib2to3/pgen2/literals.py index b5fe428..c67b91d 100644 --- a/src/blib2to3/pgen2/literals.py +++ b/src/blib2to3/pgen2/literals.py @@ -5,10 +5,10 @@ import re -from typing import Dict, Match, Text +from typing import Dict, Match -simple_escapes: Dict[Text, Text] = { +simple_escapes: Dict[str, str] = { "a": "\a", "b": "\b", "f": "\f", @@ -22,7 +22,7 @@ simple_escapes: Dict[Text, Text] = { } -def escape(m: Match[Text]) -> Text: +def escape(m: Match[str]) -> str: all, tail = m.group(0, 1) assert all.startswith("\\") esc = simple_escapes.get(tail) @@ -44,7 +44,7 @@ def escape(m: Match[Text]) -> Text: return chr(i) -def evalString(s: Text) -> Text: +def evalString(s: str) -> str: assert s.startswith("'") or s.startswith('"'), repr(s[:1]) q = s[0] if s[:3] == q * 3: diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index c462f63..17bf118 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -9,7 +9,6 @@ See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ -import copy from contextlib import contextmanager # Local imports @@ -18,7 +17,6 @@ from typing import ( cast, Any, Optional, - Text, Union, Tuple, Dict, @@ -35,7 +33,7 @@ if TYPE_CHECKING: from blib2to3.pgen2.driver import TokenProxy -Results = Dict[Text, NL] +Results = Dict[str, NL] Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]] DFA = List[List[Tuple[int, int]]] DFAS = Tuple[DFA, Dict[int, int]] @@ -100,7 +98,7 @@ class Recorder: finally: self.parser.is_backtracking = is_backtracking - def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None: + def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None: func: Callable[..., Any] if raw: func = self.parser._addtoken @@ -114,7 +112,7 @@ class Recorder: args.insert(0, ilabel) func(*args) - def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]: + def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]: alive_ilabels = self.ilabels if len(alive_ilabels) == 0: *_, most_successful_ilabel = self._dead_ilabels @@ -131,10 +129,10 @@ class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__( - self, msg: Text, type: Optional[int], value: Optional[Text], context: Context + self, msg: str, type: Optional[int], value: Optional[str], context: Context ) -> None: Exception.__init__( - self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context) + self, f"{msg}: type={type!r}, value={value!r}, context={context!r}" ) self.msg = msg self.type = type @@ -142,7 +140,7 @@ class ParseError(Exception): self.context = context -class Parser(object): +class Parser: """Parser engine. The proper usage sequence is: @@ -236,7 +234,7 @@ class Parser(object): self.used_names: Set[str] = set() self.proxy = proxy - def addtoken(self, type: int, value: Text, context: Context) -> bool: + def addtoken(self, type: int, value: str, context: Context) -> bool: """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabels = self.classify(type, value, context) @@ -284,7 +282,7 @@ class Parser(object): return self._addtoken(ilabel, type, value, context) - def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool: + def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool: # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] @@ -329,7 +327,7 @@ class Parser(object): # No success finding a transition raise ParseError("bad input", type, value, context) - def classify(self, type: int, value: Text, context: Context) -> List[int]: + def classify(self, type: int, value: str, context: Context) -> List[int]: """Turn a token into a label. (Internal) Depending on whether the value is a soft-keyword or not, @@ -352,7 +350,7 @@ class Parser(object): raise ParseError("bad token", type, value, context) return [ilabel] - def shift(self, type: int, value: Text, newstate: int, context: Context) -> None: + def shift(self, type: int, value: str, newstate: int, context: Context) -> None: """Shift a token. (Internal)""" if self.is_backtracking: dfa, state, _ = self.stack[-1] diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index b5ebc7b..046efd0 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -11,7 +11,6 @@ from typing import ( Iterator, List, Optional, - Text, Tuple, Union, Sequence, @@ -29,13 +28,13 @@ class PgenGrammar(grammar.Grammar): pass -class ParserGenerator(object): +class ParserGenerator: filename: Path - stream: IO[Text] + stream: IO[str] generator: Iterator[GoodTokenInfo] - first: Dict[Text, Optional[Dict[Text, int]]] + first: Dict[str, Optional[Dict[str, int]]] - def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None: + def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None: close_stream = None if stream is None: stream = open(filename, encoding="utf-8") @@ -75,7 +74,7 @@ class ParserGenerator(object): c.start = c.symbol2number[self.startsymbol] return c - def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]: + def make_first(self, c: PgenGrammar, name: str) -> Dict[int, int]: rawfirst = self.first[name] assert rawfirst is not None first = {} @@ -85,7 +84,7 @@ class ParserGenerator(object): first[ilabel] = 1 return first - def make_label(self, c: PgenGrammar, label: Text) -> int: + def make_label(self, c: PgenGrammar, label: str) -> int: # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): @@ -144,7 +143,7 @@ class ParserGenerator(object): self.calcfirst(name) # print name, self.first[name].keys() - def calcfirst(self, name: Text) -> None: + def calcfirst(self, name: str) -> None: dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] @@ -176,7 +175,7 @@ class ParserGenerator(object): inverse[symbol] = label self.first[name] = totalset - def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]: + def parse(self) -> Tuple[Dict[str, List["DFAState"]], str]: dfas = {} startsymbol: Optional[str] = None # MSTART: (NEWLINE | RULE)* ENDMARKER @@ -240,7 +239,7 @@ class ParserGenerator(object): state.addarc(st, label) return states # List of DFAState instances; first one is start - def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None: + def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None: print("Dump of NFA for", name) todo = [start] for i, state in enumerate(todo): @@ -256,7 +255,7 @@ class ParserGenerator(object): else: print(" %s -> %d" % (label, j)) - def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None: + def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None: print("Dump of DFA for", name) for i, state in enumerate(dfa): print(" State", i, state.isfinal and "(final)" or "") @@ -349,7 +348,7 @@ class ParserGenerator(object): ) assert False - def expect(self, type: int, value: Optional[Any] = None) -> Text: + def expect(self, type: int, value: Optional[Any] = None) -> str: if self.type != type or (value is not None and self.value != value): self.raise_error( "expected %s/%s, got %s/%s", type, value, self.type, self.value @@ -374,22 +373,22 @@ class ParserGenerator(object): raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) -class NFAState(object): - arcs: List[Tuple[Optional[Text], "NFAState"]] +class NFAState: + arcs: List[Tuple[Optional[str], "NFAState"]] def __init__(self) -> None: self.arcs = [] # list of (label, NFAState) pairs - def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None: + def addarc(self, next: "NFAState", label: Optional[str] = None) -> None: assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next)) -class DFAState(object): +class DFAState: nfaset: Dict[NFAState, Any] isfinal: bool - arcs: Dict[Text, "DFAState"] + arcs: Dict[str, "DFAState"] def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None: assert isinstance(nfaset, dict) @@ -399,7 +398,7 @@ class DFAState(object): self.isfinal = final in nfaset self.arcs = {} # map from label to DFAState - def addarc(self, next: "DFAState", label: Text) -> None: + def addarc(self, next: "DFAState", label: str) -> None: assert isinstance(label, str) assert label not in self.arcs assert isinstance(next, DFAState) diff --git a/src/blib2to3/pgen2/token.py b/src/blib2to3/pgen2/token.py index c939531..117cc09 100644 --- a/src/blib2to3/pgen2/token.py +++ b/src/blib2to3/pgen2/token.py @@ -1,6 +1,5 @@ """Token constants (from "token.h").""" -import sys from typing import Dict from typing import Final @@ -75,7 +74,7 @@ NT_OFFSET: Final = 256 tok_name: Final[Dict[int, str]] = {} for _name, _value in list(globals().items()): - if type(_value) is type(0): + if type(_value) is int: tok_name[_value] = _name diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index a5e8918..1dea89d 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -35,7 +35,6 @@ from typing import ( List, Optional, Set, - Text, Tuple, Pattern, Union, @@ -77,7 +76,7 @@ def maybe(*choices: str) -> str: def _combinations(*l: str) -> Set[str]: - return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) + return {x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()} Whitespace = r"[ \f\t]*" @@ -189,7 +188,7 @@ Coord = Tuple[int, int] def printtoken( - type: int, token: Text, srow_col: Coord, erow_col: Coord, line: Text + type: int, token: str, srow_col: Coord, erow_col: Coord, line: str ) -> None: # for testing (srow, scol) = srow_col (erow, ecol) = erow_col @@ -198,10 +197,10 @@ def printtoken( ) -TokenEater = Callable[[int, Text, Coord, Coord, Text], None] +TokenEater = Callable[[int, str, Coord, Coord, str], None] -def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) -> None: +def tokenize(readline: Callable[[], str], tokeneater: TokenEater = printtoken) -> None: """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). @@ -221,17 +220,17 @@ def tokenize(readline: Callable[[], Text], tokeneater: TokenEater = printtoken) # backwards compatible interface -def tokenize_loop(readline: Callable[[], Text], tokeneater: TokenEater) -> None: +def tokenize_loop(readline: Callable[[], str], tokeneater: TokenEater) -> None: for token_info in generate_tokens(readline): tokeneater(*token_info) -GoodTokenInfo = Tuple[int, Text, Coord, Coord, Text] +GoodTokenInfo = Tuple[int, str, Coord, Coord, str] TokenInfo = Union[Tuple[int, str], GoodTokenInfo] class Untokenizer: - tokens: List[Text] + tokens: List[str] prev_row: int prev_col: int @@ -247,13 +246,13 @@ class Untokenizer: if col_offset: self.tokens.append(" " * col_offset) - def untokenize(self, iterable: Iterable[TokenInfo]) -> Text: + def untokenize(self, iterable: Iterable[TokenInfo]) -> str: for t in iterable: if len(t) == 2: self.compat(cast(Tuple[int, str], t), iterable) break tok_type, token, start, end, line = cast( - Tuple[int, Text, Coord, Coord, Text], t + Tuple[int, str, Coord, Coord, str], t ) self.add_whitespace(start) self.tokens.append(token) @@ -263,7 +262,7 @@ class Untokenizer: self.prev_col = 0 return "".join(self.tokens) - def compat(self, token: Tuple[int, Text], iterable: Iterable[TokenInfo]) -> None: + def compat(self, token: Tuple[int, str], iterable: Iterable[TokenInfo]) -> None: startline = False indents = [] toks_append = self.tokens.append @@ -335,7 +334,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: try: return readline() except StopIteration: - return bytes() + return b'' def find_cookie(line: bytes) -> Optional[str]: try: @@ -384,7 +383,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: return default, [first, second] -def untokenize(iterable: Iterable[TokenInfo]) -> Text: +def untokenize(iterable: Iterable[TokenInfo]) -> str: """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence @@ -407,7 +406,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> Text: def generate_tokens( - readline: Callable[[], Text], grammar: Optional[Grammar] = None + readline: Callable[[], str], grammar: Optional[Grammar] = None ) -> Iterator[GoodTokenInfo]: """ The generate_tokens() generator requires one argument, readline, which diff --git a/src/blib2to3/pygram.py b/src/blib2to3/pygram.py index 15702e4..1b48323 100644 --- a/src/blib2to3/pygram.py +++ b/src/blib2to3/pygram.py @@ -9,7 +9,6 @@ import os from typing import Union # Local imports -from .pgen2 import token from .pgen2 import driver from .pgen2.grammar import Grammar @@ -21,7 +20,7 @@ from .pgen2.grammar import Grammar # "PatternGrammar.txt") -class Symbols(object): +class Symbols: def __init__(self, grammar: Grammar) -> None: """Initializer. diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index ea60c89..156322c 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -18,7 +18,6 @@ from typing import ( Iterator, List, Optional, - Text, Tuple, TypeVar, Union, @@ -34,10 +33,10 @@ from io import StringIO HUGE: int = 0x7FFFFFFF # maximum repeat count, default max -_type_reprs: Dict[int, Union[Text, int]] = {} +_type_reprs: Dict[int, Union[str, int]] = {} -def type_repr(type_num: int) -> Union[Text, int]: +def type_repr(type_num: int) -> Union[str, int]: global _type_reprs if not _type_reprs: from .pygram import python_symbols @@ -54,11 +53,11 @@ def type_repr(type_num: int) -> Union[Text, int]: _P = TypeVar("_P", bound="Base") NL = Union["Node", "Leaf"] -Context = Tuple[Text, Tuple[int, int]] -RawNode = Tuple[int, Optional[Text], Optional[Context], Optional[List[NL]]] +Context = Tuple[str, Tuple[int, int]] +RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]] -class Base(object): +class Base: """ Abstract base class for Node and Leaf. @@ -92,7 +91,7 @@ class Base(object): return self._eq(other) @property - def prefix(self) -> Text: + def prefix(self) -> str: raise NotImplementedError def _eq(self: _P, other: _P) -> bool: @@ -225,7 +224,7 @@ class Base(object): return 0 return 1 + self.parent.depth() - def get_suffix(self) -> Text: + def get_suffix(self) -> str: """ Return the string immediately following the invocant node. This is effectively equivalent to node.next_sibling.prefix @@ -242,14 +241,14 @@ class Node(Base): """Concrete implementation for interior nodes.""" fixers_applied: Optional[List[Any]] - used_names: Optional[Set[Text]] + used_names: Optional[Set[str]] def __init__( self, type: int, children: List[NL], context: Optional[Any] = None, - prefix: Optional[Text] = None, + prefix: Optional[str] = None, fixers_applied: Optional[List[Any]] = None, ) -> None: """ @@ -274,16 +273,16 @@ class Node(Base): else: self.fixers_applied = None - def __repr__(self) -> Text: + def __repr__(self) -> str: """Return a canonical string representation.""" assert self.type is not None - return "%s(%s, %r)" % ( + return "{}({}, {!r})".format( self.__class__.__name__, type_repr(self.type), self.children, ) - def __str__(self) -> Text: + def __str__(self) -> str: """ Return a pretty string representation. @@ -317,7 +316,7 @@ class Node(Base): yield from child.pre_order() @property - def prefix(self) -> Text: + def prefix(self) -> str: """ The whitespace and comments preceding this node in the input. """ @@ -326,7 +325,7 @@ class Node(Base): return self.children[0].prefix @prefix.setter - def prefix(self, prefix: Text) -> None: + def prefix(self, prefix: str) -> None: if self.children: self.children[0].prefix = prefix @@ -383,12 +382,12 @@ class Leaf(Base): """Concrete implementation for leaf nodes.""" # Default values for instance variables - value: Text + value: str fixers_applied: List[Any] bracket_depth: int # Changed later in brackets.py opening_bracket: Optional["Leaf"] = None - used_names: Optional[Set[Text]] + used_names: Optional[Set[str]] _prefix = "" # Whitespace and comments preceding this token in the input lineno: int = 0 # Line where this token starts in the input column: int = 0 # Column where this token starts in the input @@ -400,9 +399,9 @@ class Leaf(Base): def __init__( self, type: int, - value: Text, + value: str, context: Optional[Context] = None, - prefix: Optional[Text] = None, + prefix: Optional[str] = None, fixers_applied: List[Any] = [], opening_bracket: Optional["Leaf"] = None, fmt_pass_converted_first_leaf: Optional["Leaf"] = None, @@ -431,13 +430,13 @@ class Leaf(Base): from .pgen2.token import tok_name assert self.type is not None - return "%s(%s, %r)" % ( + return "{}({}, {!r})".format( self.__class__.__name__, tok_name.get(self.type, self.type), self.value, ) - def __str__(self) -> Text: + def __str__(self) -> str: """ Return a pretty string representation. @@ -471,14 +470,14 @@ class Leaf(Base): yield self @property - def prefix(self) -> Text: + def prefix(self) -> str: """ The whitespace and comments preceding this token in the input. """ return self._prefix @prefix.setter - def prefix(self, prefix: Text) -> None: + def prefix(self, prefix: str) -> None: self.changed() self._prefix = prefix @@ -503,10 +502,10 @@ def convert(gr: Grammar, raw_node: RawNode) -> NL: return Leaf(type, value or "", context=context) -_Results = Dict[Text, NL] +_Results = Dict[str, NL] -class BasePattern(object): +class BasePattern: """ A pattern is a tree matching pattern. @@ -526,19 +525,19 @@ class BasePattern(object): type: Optional[int] type = None # Node type (token if < 256, symbol if >= 256) content: Any = None # Optional content matching pattern - name: Optional[Text] = None # Optional name used to store match in results dict + name: Optional[str] = None # Optional name used to store match in results dict def __new__(cls, *args, **kwds): """Constructor that prevents BasePattern from being instantiated.""" assert cls is not BasePattern, "Cannot instantiate BasePattern" return object.__new__(cls) - def __repr__(self) -> Text: + def __repr__(self) -> str: assert self.type is not None args = [type_repr(self.type), self.content, self.name] while args and args[-1] is None: del args[-1] - return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, args))) def _submatch(self, node, results=None) -> bool: raise NotImplementedError @@ -602,8 +601,8 @@ class LeafPattern(BasePattern): def __init__( self, type: Optional[int] = None, - content: Optional[Text] = None, - name: Optional[Text] = None, + content: Optional[str] = None, + name: Optional[str] = None, ) -> None: """ Initializer. Takes optional type, content, and name. @@ -653,8 +652,8 @@ class NodePattern(BasePattern): def __init__( self, type: Optional[int] = None, - content: Optional[Iterable[Text]] = None, - name: Optional[Text] = None, + content: Optional[Iterable[str]] = None, + name: Optional[str] = None, ) -> None: """ Initializer. Takes optional type, content, and name. @@ -734,10 +733,10 @@ class WildcardPattern(BasePattern): def __init__( self, - content: Optional[Text] = None, + content: Optional[str] = None, min: int = 0, max: int = HUGE, - name: Optional[Text] = None, + name: Optional[str] = None, ) -> None: """ Initializer.