From: Shantanu <12621235+hauntsaninja@users.noreply.github.com> Date: Mon, 17 Jul 2023 04:33:58 +0000 (-0700) Subject: Fix most blib2to3 lint (#3794) X-Git-Url: https://git.madduck.net/etc/vim.git/commitdiff_plain/c1e30d97fe39e4c1b1967571b7e3854547239bf6?ds=sidebyside Fix most blib2to3 lint (#3794) --- diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89c0de3..0d68b81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ # Note: don't use this config for your own repositories. Instead, see # "Version control integration" in docs/integrations/source_version_control.md -exclude: ^(src/blib2to3/|profiling/|tests/data/) +exclude: ^(profiling/|tests/data/) repos: - repo: local hooks: @@ -36,6 +36,7 @@ repos: - flake8-bugbear - flake8-comprehensions - flake8-simplify + exclude: ^src/blib2to3/ - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.4.1 diff --git a/pyproject.toml b/pyproject.toml index aaac42b..d29b768 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,8 +12,7 @@ include = '\.pyi?$' extend-exclude = ''' /( # The following are specific to Black, you probably don't want those. - | blib2to3 - | tests/data + tests/data | profiling )/ ''' @@ -183,7 +182,7 @@ atomic = true profile = "black" line_length = 88 skip_gitignore = true -skip_glob = ["src/blib2to3", "tests/data", "profiling"] +skip_glob = ["tests/data", "profiling"] known_first_party = ["black", "blib2to3", "blackd", "_black_version"] [tool.pytest.ini_options] diff --git a/src/blib2to3/README b/src/blib2to3/README index 0d3c607..38b0415 100644 --- a/src/blib2to3/README +++ b/src/blib2to3/README @@ -1,18 +1,19 @@ -A subset of lib2to3 taken from Python 3.7.0b2. -Commit hash: 9c17e3a1987004b8bcfbe423953aad84493a7984 +A subset of lib2to3 taken from Python 3.7.0b2. Commit hash: +9c17e3a1987004b8bcfbe423953aad84493a7984 Reasons for forking: + - consistent handling of f-strings for users of Python < 3.6.2 -- backport of BPO-33064 that fixes parsing files with trailing commas after - *args and **kwargs -- backport of GH-6143 that restores the ability to reformat legacy usage of - `async` +- backport of BPO-33064 that fixes parsing files with trailing commas after \*args and + \*\*kwargs +- backport of GH-6143 that restores the ability to reformat legacy usage of `async` - support all types of string literals - better ability to debug (better reprs) - INDENT and DEDENT don't hold whitespace and comment prefixes - ability to Cythonize Change Log: + - Changes default logger used by Driver - Backported the following upstream parser changes: - "bpo-42381: Allow walrus in set literals and set comprehensions (GH-23332)" diff --git a/src/blib2to3/pgen2/driver.py b/src/blib2to3/pgen2/driver.py index bb73016..e629843 100644 --- a/src/blib2to3/pgen2/driver.py +++ b/src/blib2to3/pgen2/driver.py @@ -17,30 +17,21 @@ __all__ = ["Driver", "load_grammar"] # Python imports import io -import os import logging +import os import pkgutil import sys -from typing import ( - Any, - cast, - IO, - Iterable, - List, - Optional, - Iterator, - Tuple, - Union, -) from contextlib import contextmanager from dataclasses import dataclass, field - -# Pgen imports -from . import grammar, parse, token, tokenize, pgen from logging import Logger -from blib2to3.pytree import NL +from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast + from blib2to3.pgen2.grammar import Grammar from blib2to3.pgen2.tokenize import GoodTokenInfo +from blib2to3.pytree import NL + +# Pgen imports +from . import grammar, parse, pgen, token, tokenize Path = Union[str, "os.PathLike[str]"] diff --git a/src/blib2to3/pgen2/literals.py b/src/blib2to3/pgen2/literals.py index c67b91d..53c0b8a 100644 --- a/src/blib2to3/pgen2/literals.py +++ b/src/blib2to3/pgen2/literals.py @@ -4,10 +4,8 @@ """Safely evaluate Python string literals without using eval().""" import re - from typing import Dict, Match - simple_escapes: Dict[str, str] = { "a": "\a", "b": "\b", diff --git a/src/blib2to3/pgen2/parse.py b/src/blib2to3/pgen2/parse.py index 17bf118..299cc24 100644 --- a/src/blib2to3/pgen2/parse.py +++ b/src/blib2to3/pgen2/parse.py @@ -10,24 +10,25 @@ how this parsing engine works. """ from contextlib import contextmanager - -# Local imports -from . import grammar, token, tokenize from typing import ( - cast, + TYPE_CHECKING, Any, - Optional, - Union, - Tuple, + Callable, Dict, - List, Iterator, - Callable, + List, + Optional, Set, - TYPE_CHECKING, + Tuple, + Union, + cast, ) + from blib2to3.pgen2.grammar import Grammar -from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node +from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert + +# Local imports +from . import grammar, token, tokenize if TYPE_CHECKING: from blib2to3.pgen2.driver import TokenProxy @@ -112,7 +113,9 @@ class Recorder: args.insert(0, ilabel) func(*args) - def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]: + def determine_route( + self, value: Optional[str] = None, force: bool = False + ) -> Optional[int]: alive_ilabels = self.ilabels if len(alive_ilabels) == 0: *_, most_successful_ilabel = self._dead_ilabels diff --git a/src/blib2to3/pgen2/pgen.py b/src/blib2to3/pgen2/pgen.py index 046efd0..3ece9bb 100644 --- a/src/blib2to3/pgen2/pgen.py +++ b/src/blib2to3/pgen2/pgen.py @@ -1,25 +1,22 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. -# Pgen imports -from . import grammar, token, tokenize - +import os from typing import ( + IO, Any, Dict, - IO, Iterator, List, + NoReturn, Optional, + Sequence, Tuple, Union, - Sequence, - NoReturn, ) -from blib2to3.pgen2 import grammar -from blib2to3.pgen2.tokenize import GoodTokenInfo -import os +from blib2to3.pgen2 import grammar, token, tokenize +from blib2to3.pgen2.tokenize import GoodTokenInfo Path = Union[str, "os.PathLike[str]"] @@ -149,7 +146,7 @@ class ParserGenerator: state = dfa[0] totalset: Dict[str, int] = {} overlapcheck = {} - for label, next in state.arcs.items(): + for label in state.arcs: if label in self.dfas: if label in self.first: fset = self.first[label] @@ -190,9 +187,9 @@ class ParserGenerator: # self.dump_nfa(name, a, z) dfa = self.make_dfa(a, z) # self.dump_dfa(name, dfa) - oldlen = len(dfa) + # oldlen = len(dfa) self.simplify_dfa(dfa) - newlen = len(dfa) + # newlen = len(dfa) dfas[name] = dfa # print name, oldlen, newlen if startsymbol is None: @@ -346,7 +343,7 @@ class ParserGenerator: self.raise_error( "expected (...) or NAME or STRING, got %s/%s", self.type, self.value ) - assert False + raise AssertionError def expect(self, type: int, value: Optional[Any] = None) -> str: if self.type != type or (value is not None and self.value != value): @@ -368,7 +365,7 @@ class ParserGenerator: if args: try: msg = msg % args - except: + except Exception: msg = " ".join([msg] + list(map(str, args))) raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) diff --git a/src/blib2to3/pgen2/token.py b/src/blib2to3/pgen2/token.py index 117cc09..ed2fc4e 100644 --- a/src/blib2to3/pgen2/token.py +++ b/src/blib2to3/pgen2/token.py @@ -1,8 +1,6 @@ """Token constants (from "token.h").""" -from typing import Dict - -from typing import Final +from typing import Dict, Final # Taken from Python (r53757) and modified to include some tokens # originally monkeypatched in by pgen2.tokenize diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index 1dea89d..d0607f4 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -30,28 +30,41 @@ each time a new token is found.""" import sys from typing import ( Callable, + Final, Iterable, Iterator, List, Optional, + Pattern, Set, Tuple, - Pattern, Union, cast, ) -from typing import Final - -from blib2to3.pgen2.token import * from blib2to3.pgen2.grammar import Grammar +from blib2to3.pgen2.token import ( + ASYNC, + AWAIT, + COMMENT, + DEDENT, + ENDMARKER, + ERRORTOKEN, + INDENT, + NAME, + NEWLINE, + NL, + NUMBER, + OP, + STRING, + tok_name, +) __author__ = "Ka-Ping Yee " __credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro" import re from codecs import BOM_UTF8, lookup -from blib2to3.pgen2.token import * from . import token @@ -334,7 +347,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]: try: return readline() except StopIteration: - return b'' + return b"" def find_cookie(line: bytes) -> Optional[str]: try: @@ -676,14 +689,12 @@ def generate_tokens( yield stashed stashed = None - for indent in indents[1:]: # pop remaining indent levels + for _indent in indents[1:]: # pop remaining indent levels yield (DEDENT, "", (lnum, 0), (lnum, 0), "") yield (ENDMARKER, "", (lnum, 0), (lnum, 0), "") if __name__ == "__main__": # testing - import sys - if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) else: diff --git a/src/blib2to3/pygram.py b/src/blib2to3/pygram.py index 1b48323..c30c630 100644 --- a/src/blib2to3/pygram.py +++ b/src/blib2to3/pygram.py @@ -5,12 +5,10 @@ # Python imports import os - from typing import Union # Local imports from .pgen2 import driver - from .pgen2.grammar import Grammar # Moved into initialize because mypyc can't handle __file__ (XXX bug) diff --git a/src/blib2to3/pytree.py b/src/blib2to3/pytree.py index 156322c..2a0cd6d 100644 --- a/src/blib2to3/pytree.py +++ b/src/blib2to3/pytree.py @@ -15,15 +15,16 @@ There's also a pattern matching implementation here. from typing import ( Any, Dict, + Iterable, Iterator, List, Optional, + Set, Tuple, TypeVar, Union, - Set, - Iterable, ) + from blib2to3.pgen2.grammar import Grammar __author__ = "Guido van Rossum " @@ -58,7 +59,6 @@ RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]] class Base: - """ Abstract base class for Node and Leaf. @@ -237,7 +237,6 @@ class Base: class Node(Base): - """Concrete implementation for interior nodes.""" fixers_applied: Optional[List[Any]] @@ -378,7 +377,6 @@ class Node(Base): class Leaf(Base): - """Concrete implementation for leaf nodes.""" # Default values for instance variables @@ -506,7 +504,6 @@ _Results = Dict[str, NL] class BasePattern: - """ A pattern is a tree matching pattern. @@ -646,7 +643,6 @@ class LeafPattern(BasePattern): class NodePattern(BasePattern): - wildcards: bool = False def __init__( @@ -715,7 +711,6 @@ class NodePattern(BasePattern): class WildcardPattern(BasePattern): - """ A wildcard pattern can match zero or more nodes.