]> git.madduck.net Git - etc/vim.git/commitdiff

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Fix most blib2to3 lint (#3794)
authorShantanu <12621235+hauntsaninja@users.noreply.github.com>
Mon, 17 Jul 2023 04:33:58 +0000 (21:33 -0700)
committerGitHub <noreply@github.com>
Mon, 17 Jul 2023 04:33:58 +0000 (21:33 -0700)
.pre-commit-config.yaml
pyproject.toml
src/blib2to3/README
src/blib2to3/pgen2/driver.py
src/blib2to3/pgen2/literals.py
src/blib2to3/pgen2/parse.py
src/blib2to3/pgen2/pgen.py
src/blib2to3/pgen2/token.py
src/blib2to3/pgen2/tokenize.py
src/blib2to3/pygram.py
src/blib2to3/pytree.py

index 89c0de39c862b69e3a9c8676cb472c1b5c4e2a0e..0d68b81ccd77c3e32ca4d0898d3103a3bd0f7571 100644 (file)
@@ -1,6 +1,6 @@
 # Note: don't use this config for your own repositories. Instead, see
 # "Version control integration" in docs/integrations/source_version_control.md
-exclude: ^(src/blib2to3/|profiling/|tests/data/)
+exclude: ^(profiling/|tests/data/)
 repos:
   - repo: local
     hooks:
@@ -36,6 +36,7 @@ repos:
           - flake8-bugbear
           - flake8-comprehensions
           - flake8-simplify
+        exclude: ^src/blib2to3/
 
   - repo: https://github.com/pre-commit/mirrors-mypy
     rev: v1.4.1
index aaac42b44b913ec94f4dd9902af17e107c983fc2..d29b768c289bf9dcce5db2363a73fd84a4ef94ee 100644 (file)
@@ -12,8 +12,7 @@ include = '\.pyi?$'
 extend-exclude = '''
 /(
   # The following are specific to Black, you probably don't want those.
-  | blib2to3
-  | tests/data
+  tests/data
   | profiling
 )/
 '''
@@ -183,7 +182,7 @@ atomic = true
 profile = "black"
 line_length = 88
 skip_gitignore = true
-skip_glob = ["src/blib2to3", "tests/data", "profiling"]
+skip_glob = ["tests/data", "profiling"]
 known_first_party = ["black", "blib2to3", "blackd", "_black_version"]
 
 [tool.pytest.ini_options]
index 0d3c607c9c7c1723bee89337652ba51bc8f800f0..38b04158ddb70070110e5047aab88bb9009ea003 100644 (file)
@@ -1,18 +1,19 @@
-A subset of lib2to3 taken from Python 3.7.0b2.
-Commit hash: 9c17e3a1987004b8bcfbe423953aad84493a7984
+A subset of lib2to3 taken from Python 3.7.0b2. Commit hash:
+9c17e3a1987004b8bcfbe423953aad84493a7984
 
 Reasons for forking:
+
 - consistent handling of f-strings for users of Python < 3.6.2
-- backport of BPO-33064 that fixes parsing files with trailing commas after
-  *args and **kwargs
-- backport of GH-6143 that restores the ability to reformat legacy usage of
-  `async`
+- backport of BPO-33064 that fixes parsing files with trailing commas after \*args and
+  \*\*kwargs
+- backport of GH-6143 that restores the ability to reformat legacy usage of `async`
 - support all types of string literals
 - better ability to debug (better reprs)
 - INDENT and DEDENT don't hold whitespace and comment prefixes
 - ability to Cythonize
 
 Change Log:
+
 - Changes default logger used by Driver
 - Backported the following upstream parser changes:
   - "bpo-42381: Allow walrus in set literals and set comprehensions (GH-23332)"
index bb73016a4c12d58caab7f34fdb55fbf2e4937890..e629843f8b986ea792d59a150ff64018d2108bcb 100644 (file)
@@ -17,30 +17,21 @@ __all__ = ["Driver", "load_grammar"]
 
 # Python imports
 import io
-import os
 import logging
+import os
 import pkgutil
 import sys
-from typing import (
-    Any,
-    cast,
-    IO,
-    Iterable,
-    List,
-    Optional,
-    Iterator,
-    Tuple,
-    Union,
-)
 from contextlib import contextmanager
 from dataclasses import dataclass, field
-
-# Pgen imports
-from . import grammar, parse, token, tokenize, pgen
 from logging import Logger
-from blib2to3.pytree import NL
+from typing import IO, Any, Iterable, Iterator, List, Optional, Tuple, Union, cast
+
 from blib2to3.pgen2.grammar import Grammar
 from blib2to3.pgen2.tokenize import GoodTokenInfo
+from blib2to3.pytree import NL
+
+# Pgen imports
+from . import grammar, parse, pgen, token, tokenize
 
 Path = Union[str, "os.PathLike[str]"]
 
index c67b91d046368e91b4adf686fd142a7479483235..53c0b8ac2bbb2984a8b78d03261d8c7ebd0eb5ca 100644 (file)
@@ -4,10 +4,8 @@
 """Safely evaluate Python string literals without using eval()."""
 
 import re
-
 from typing import Dict, Match
 
-
 simple_escapes: Dict[str, str] = {
     "a": "\a",
     "b": "\b",
index 17bf118e9fcd8053b655b4efc42e26f840b40465..299cc24a15f4bd353d155b62ef645d8d6a9ef148 100644 (file)
@@ -10,24 +10,25 @@ how this parsing engine works.
 
 """
 from contextlib import contextmanager
-
-# Local imports
-from . import grammar, token, tokenize
 from typing import (
-    cast,
+    TYPE_CHECKING,
     Any,
-    Optional,
-    Union,
-    Tuple,
+    Callable,
     Dict,
-    List,
     Iterator,
-    Callable,
+    List,
+    Optional,
     Set,
-    TYPE_CHECKING,
+    Tuple,
+    Union,
+    cast,
 )
+
 from blib2to3.pgen2.grammar import Grammar
-from blib2to3.pytree import convert, NL, Context, RawNode, Leaf, Node
+from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
+
+# Local imports
+from . import grammar, token, tokenize
 
 if TYPE_CHECKING:
     from blib2to3.pgen2.driver import TokenProxy
@@ -112,7 +113,9 @@ class Recorder:
                     args.insert(0, ilabel)
                 func(*args)
 
-    def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]:
+    def determine_route(
+        self, value: Optional[str] = None, force: bool = False
+    ) -> Optional[int]:
         alive_ilabels = self.ilabels
         if len(alive_ilabels) == 0:
             *_, most_successful_ilabel = self._dead_ilabels
index 046efd09338c81866432975cbe108f3e201b420a..3ece9bb41edd51c7d2d82a317cb14dafcea4f1f4 100644 (file)
@@ -1,25 +1,22 @@
 # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
 # Licensed to PSF under a Contributor Agreement.
 
-# Pgen imports
-from . import grammar, token, tokenize
-
+import os
 from typing import (
+    IO,
     Any,
     Dict,
-    IO,
     Iterator,
     List,
+    NoReturn,
     Optional,
+    Sequence,
     Tuple,
     Union,
-    Sequence,
-    NoReturn,
 )
-from blib2to3.pgen2 import grammar
-from blib2to3.pgen2.tokenize import GoodTokenInfo
-import os
 
+from blib2to3.pgen2 import grammar, token, tokenize
+from blib2to3.pgen2.tokenize import GoodTokenInfo
 
 Path = Union[str, "os.PathLike[str]"]
 
@@ -149,7 +146,7 @@ class ParserGenerator:
         state = dfa[0]
         totalset: Dict[str, int] = {}
         overlapcheck = {}
-        for label, next in state.arcs.items():
+        for label in state.arcs:
             if label in self.dfas:
                 if label in self.first:
                     fset = self.first[label]
@@ -190,9 +187,9 @@ class ParserGenerator:
             # self.dump_nfa(name, a, z)
             dfa = self.make_dfa(a, z)
             # self.dump_dfa(name, dfa)
-            oldlen = len(dfa)
+            oldlen = len(dfa)
             self.simplify_dfa(dfa)
-            newlen = len(dfa)
+            newlen = len(dfa)
             dfas[name] = dfa
             # print name, oldlen, newlen
             if startsymbol is None:
@@ -346,7 +343,7 @@ class ParserGenerator:
             self.raise_error(
                 "expected (...) or NAME or STRING, got %s/%s", self.type, self.value
             )
-            assert False
+            raise AssertionError
 
     def expect(self, type: int, value: Optional[Any] = None) -> str:
         if self.type != type or (value is not None and self.value != value):
@@ -368,7 +365,7 @@ class ParserGenerator:
         if args:
             try:
                 msg = msg % args
-            except:
+            except Exception:
                 msg = " ".join([msg] + list(map(str, args)))
         raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
 
index 117cc09d4ce3f50b9bd40b6ba4a6563163d420a6..ed2fc4e85fce976b972d7f69b038cb2270100670 100644 (file)
@@ -1,8 +1,6 @@
 """Token constants (from "token.h")."""
 
-from typing import Dict
-
-from typing import Final
+from typing import Dict, Final
 
 #  Taken from Python (r53757) and modified to include some tokens
 #   originally monkeypatched in by pgen2.tokenize
index 1dea89d7bb89c97914bbe0d21b82ffc113418296..d0607f4b1e17b202d4d19603287ed5cfc0a57288 100644 (file)
@@ -30,28 +30,41 @@ each time a new token is found."""
 import sys
 from typing import (
     Callable,
+    Final,
     Iterable,
     Iterator,
     List,
     Optional,
+    Pattern,
     Set,
     Tuple,
-    Pattern,
     Union,
     cast,
 )
 
-from typing import Final
-
-from blib2to3.pgen2.token import *
 from blib2to3.pgen2.grammar import Grammar
+from blib2to3.pgen2.token import (
+    ASYNC,
+    AWAIT,
+    COMMENT,
+    DEDENT,
+    ENDMARKER,
+    ERRORTOKEN,
+    INDENT,
+    NAME,
+    NEWLINE,
+    NL,
+    NUMBER,
+    OP,
+    STRING,
+    tok_name,
+)
 
 __author__ = "Ka-Ping Yee <ping@lfw.org>"
 __credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro"
 
 import re
 from codecs import BOM_UTF8, lookup
-from blib2to3.pgen2.token import *
 
 from . import token
 
@@ -334,7 +347,7 @@ def detect_encoding(readline: Callable[[], bytes]) -> Tuple[str, List[bytes]]:
         try:
             return readline()
         except StopIteration:
-            return b''
+            return b""
 
     def find_cookie(line: bytes) -> Optional[str]:
         try:
@@ -676,14 +689,12 @@ def generate_tokens(
         yield stashed
         stashed = None
 
-    for indent in indents[1:]:  # pop remaining indent levels
+    for _indent in indents[1:]:  # pop remaining indent levels
         yield (DEDENT, "", (lnum, 0), (lnum, 0), "")
     yield (ENDMARKER, "", (lnum, 0), (lnum, 0), "")
 
 
 if __name__ == "__main__":  # testing
-    import sys
-
     if len(sys.argv) > 1:
         tokenize(open(sys.argv[1]).readline)
     else:
index 1b4832362bf9aa65676e317bd22c67bee124943e..c30c630e81685f905e104c3f56460dc00eae6f21 100644 (file)
@@ -5,12 +5,10 @@
 
 # Python imports
 import os
-
 from typing import Union
 
 # Local imports
 from .pgen2 import driver
-
 from .pgen2.grammar import Grammar
 
 # Moved into initialize because mypyc can't handle __file__ (XXX bug)
index 156322cab7e01c6b21d7bb31f1263cc9412a5859..2a0cd6d196a3410536b4e9a07d5ded47d6c90b54 100644 (file)
@@ -15,15 +15,16 @@ There's also a pattern matching implementation here.
 from typing import (
     Any,
     Dict,
+    Iterable,
     Iterator,
     List,
     Optional,
+    Set,
     Tuple,
     TypeVar,
     Union,
-    Set,
-    Iterable,
 )
+
 from blib2to3.pgen2.grammar import Grammar
 
 __author__ = "Guido van Rossum <guido@python.org>"
@@ -58,7 +59,6 @@ RawNode = Tuple[int, Optional[str], Optional[Context], Optional[List[NL]]]
 
 
 class Base:
-
     """
     Abstract base class for Node and Leaf.
 
@@ -237,7 +237,6 @@ class Base:
 
 
 class Node(Base):
-
     """Concrete implementation for interior nodes."""
 
     fixers_applied: Optional[List[Any]]
@@ -378,7 +377,6 @@ class Node(Base):
 
 
 class Leaf(Base):
-
     """Concrete implementation for leaf nodes."""
 
     # Default values for instance variables
@@ -506,7 +504,6 @@ _Results = Dict[str, NL]
 
 
 class BasePattern:
-
     """
     A pattern is a tree matching pattern.
 
@@ -646,7 +643,6 @@ class LeafPattern(BasePattern):
 
 
 class NodePattern(BasePattern):
-
     wildcards: bool = False
 
     def __init__(
@@ -715,7 +711,6 @@ class NodePattern(BasePattern):
 
 
 class WildcardPattern(BasePattern):
-
     """
     A wildcard pattern can match zero or more nodes.