All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
3 from concurrent.futures import Executor, ProcessPoolExecutor
4 from contextlib import contextmanager
5 from datetime import datetime
7 from functools import lru_cache, partial, wraps
11 from multiprocessing import Manager, freeze_support
13 from pathlib import Path
40 from typing_extensions import Final
41 from mypy_extensions import mypyc_attr
43 from appdirs import user_cache_dir
44 from dataclasses import dataclass, field, replace
47 from typed_ast import ast3, ast27
48 from pathspec import PathSpec
51 from blib2to3.pytree import Node, Leaf, type_repr
52 from blib2to3 import pygram, pytree
53 from blib2to3.pgen2 import driver, token
54 from blib2to3.pgen2.grammar import Grammar
55 from blib2to3.pgen2.parse import ParseError
57 from _black_version import version as __version__
59 DEFAULT_LINE_LENGTH = 88
60 DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950
61 DEFAULT_INCLUDES = r"\.pyi?$"
62 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
74 LN = Union[Leaf, Node]
75 SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
78 CacheInfo = Tuple[Timestamp, FileSize]
79 Cache = Dict[Path, CacheInfo]
80 out = partial(click.secho, bold=True, err=True)
81 err = partial(click.secho, fg="red", err=True)
83 pygram.initialize(CACHE_DIR)
84 syms = pygram.python_symbols
87 class NothingChanged(UserWarning):
88 """Raised when reformatted code is the same as source."""
91 class CannotSplit(Exception):
92 """A readable split that fits the allotted line length is impossible."""
95 class InvalidInput(ValueError):
96 """Raised when input source code fails all parse attempts."""
99 class WriteBack(Enum):
106 def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack":
107 if check and not diff:
110 return cls.DIFF if diff else cls.YES
119 class TargetVersion(Enum):
128 def is_python2(self) -> bool:
129 return self is TargetVersion.PY27
132 PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38}
136 # All string literals are unicode
139 NUMERIC_UNDERSCORES = 3
140 TRAILING_COMMA_IN_CALL = 4
141 TRAILING_COMMA_IN_DEF = 5
142 # The following two feature-flags are mutually exclusive, and exactly one should be
143 # set for every version of python.
144 ASYNC_IDENTIFIERS = 6
146 ASSIGNMENT_EXPRESSIONS = 8
147 POS_ONLY_ARGUMENTS = 9
150 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
151 TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS},
152 TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
153 TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
154 TargetVersion.PY35: {
155 Feature.UNICODE_LITERALS,
156 Feature.TRAILING_COMMA_IN_CALL,
157 Feature.ASYNC_IDENTIFIERS,
159 TargetVersion.PY36: {
160 Feature.UNICODE_LITERALS,
162 Feature.NUMERIC_UNDERSCORES,
163 Feature.TRAILING_COMMA_IN_CALL,
164 Feature.TRAILING_COMMA_IN_DEF,
165 Feature.ASYNC_IDENTIFIERS,
167 TargetVersion.PY37: {
168 Feature.UNICODE_LITERALS,
170 Feature.NUMERIC_UNDERSCORES,
171 Feature.TRAILING_COMMA_IN_CALL,
172 Feature.TRAILING_COMMA_IN_DEF,
173 Feature.ASYNC_KEYWORDS,
175 TargetVersion.PY38: {
176 Feature.UNICODE_LITERALS,
178 Feature.NUMERIC_UNDERSCORES,
179 Feature.TRAILING_COMMA_IN_CALL,
180 Feature.TRAILING_COMMA_IN_DEF,
181 Feature.ASYNC_KEYWORDS,
182 Feature.ASSIGNMENT_EXPRESSIONS,
183 Feature.POS_ONLY_ARGUMENTS,
190 target_versions: Set[TargetVersion] = field(default_factory=set)
191 line_length: int = DEFAULT_LINE_LENGTH
192 string_normalization: bool = True
195 def get_cache_key(self) -> str:
196 if self.target_versions:
197 version_str = ",".join(
199 for version in sorted(self.target_versions, key=lambda v: v.value)
205 str(self.line_length),
206 str(int(self.string_normalization)),
207 str(int(self.is_pyi)),
209 return ".".join(parts)
212 def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
213 return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
216 def read_pyproject_toml(
217 ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None]
219 """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
221 Returns the path to a successfully found and read configuration file, None
224 assert not isinstance(value, (int, bool)), "Invalid parameter type passed"
226 root = find_project_root(ctx.params.get("src", ()))
227 path = root / "pyproject.toml"
234 pyproject_toml = toml.load(value)
235 config = pyproject_toml.get("tool", {}).get("black", {})
236 except (toml.TomlDecodeError, OSError) as e:
237 raise click.FileError(
238 filename=value, hint=f"Error reading configuration file: {e}"
244 if ctx.default_map is None:
246 ctx.default_map.update( # type: ignore # bad types in .pyi
247 {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
252 def target_version_option_callback(
253 c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
254 ) -> List[TargetVersion]:
255 """Compute the target versions from a --target-version flag.
257 This is its own function because mypy couldn't infer the type correctly
258 when it was a lambda, causing mypyc trouble.
260 return [TargetVersion[val.upper()] for val in v]
263 @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
264 @click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
269 default=DEFAULT_LINE_LENGTH,
270 help="How many characters per line to allow.",
276 type=click.Choice([v.name.lower() for v in TargetVersion]),
277 callback=target_version_option_callback,
280 "Python versions that should be supported by Black's output. [default: "
281 "per-file auto-detection]"
288 "Allow using Python 3.6-only syntax on all input files. This will put "
289 "trailing commas in function signatures and calls also after *args and "
290 "**kwargs. Deprecated; use --target-version instead. "
291 "[default: per-file auto-detection]"
298 "Format all input files like typing stubs regardless of file extension "
299 "(useful when piping source on standard input)."
304 "--skip-string-normalization",
306 help="Don't normalize string quotes or prefixes.",
312 "Don't write the files back, just return the status. Return code 0 "
313 "means nothing would change. Return code 1 means some files would be "
314 "reformatted. Return code 123 means there was an internal error."
320 help="Don't write the files back, just output a diff for each file on stdout.",
325 help="If --fast given, skip temporary sanity checks. [default: --safe]",
330 default=DEFAULT_INCLUDES,
332 "A regular expression that matches files and directories that should be "
333 "included on recursive searches. An empty value means all files are "
334 "included regardless of the name. Use forward slashes for directories on "
335 "all platforms (Windows, too). Exclusions are calculated first, inclusions "
343 default=DEFAULT_EXCLUDES,
345 "A regular expression that matches files and directories that should be "
346 "excluded on recursive searches. An empty value means no paths are excluded. "
347 "Use forward slashes for directories on all platforms (Windows, too). "
348 "Exclusions are calculated first, inclusions later."
357 "Don't emit non-error messages to stderr. Errors are still emitted; "
358 "silence those with 2>/dev/null."
366 "Also emit messages to stderr about files that were not changed or were "
367 "ignored due to --exclude=."
370 @click.version_option(version=__version__)
375 exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
382 exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False
385 callback=read_pyproject_toml,
386 help="Read configuration from PATH.",
393 target_version: List[TargetVersion],
399 skip_string_normalization: bool,
404 src: Tuple[str, ...],
405 config: Optional[str],
407 """The uncompromising code formatter."""
408 write_back = WriteBack.from_configuration(check=check, diff=diff)
411 err(f"Cannot use both --target-version and --py36")
414 versions = set(target_version)
417 "--py36 is deprecated and will be removed in a future version. "
418 "Use --target-version py36 instead."
420 versions = PY36_VERSIONS
422 # We'll autodetect later.
425 target_versions=versions,
426 line_length=line_length,
428 string_normalization=not skip_string_normalization,
430 if config and verbose:
431 out(f"Using configuration from {config}.", bold=False, fg="blue")
433 print(format_str(code, mode=mode))
436 include_regex = re_compile_maybe_verbose(include)
438 err(f"Invalid regular expression for include given: {include!r}")
441 exclude_regex = re_compile_maybe_verbose(exclude)
443 err(f"Invalid regular expression for exclude given: {exclude!r}")
445 report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
446 root = find_project_root(src)
447 sources: Set[Path] = set()
448 path_empty(src, quiet, verbose, ctx)
453 gen_python_files_in_dir(
454 p, root, include_regex, exclude_regex, report, get_gitignore(root)
457 elif p.is_file() or s == "-":
458 # if a file was explicitly given, we don't care about its extension
461 err(f"invalid path: {s}")
462 if len(sources) == 0:
463 if verbose or not quiet:
464 out("No Python files are present to be formatted. Nothing to do 😴")
467 if len(sources) == 1:
471 write_back=write_back,
477 sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
480 if verbose or not quiet:
481 out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨")
482 click.secho(str(report), err=True)
483 ctx.exit(report.return_code)
487 src: Tuple[str, ...], quiet: bool, verbose: bool, ctx: click.Context
490 Exit if there is no `src` provided for formatting
493 if verbose or not quiet:
494 out("No Path provided. Nothing to do 😴")
499 src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report"
501 """Reformat a single file under `src` without spawning child processes.
503 `fast`, `write_back`, and `mode` options are passed to
504 :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
508 if not src.is_file() and str(src) == "-":
509 if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
510 changed = Changed.YES
513 if write_back != WriteBack.DIFF:
514 cache = read_cache(mode)
515 res_src = src.resolve()
516 if res_src in cache and cache[res_src] == get_cache_info(res_src):
517 changed = Changed.CACHED
518 if changed is not Changed.CACHED and format_file_in_place(
519 src, fast=fast, write_back=write_back, mode=mode
521 changed = Changed.YES
522 if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
523 write_back is WriteBack.CHECK and changed is Changed.NO
525 write_cache(cache, [src], mode)
526 report.done(src, changed)
527 except Exception as exc:
528 report.failed(src, str(exc))
534 write_back: WriteBack,
538 """Reformat multiple files using a ProcessPoolExecutor."""
539 loop = asyncio.get_event_loop()
540 worker_count = os.cpu_count()
541 if sys.platform == "win32":
542 # Work around https://bugs.python.org/issue26903
543 worker_count = min(worker_count, 61)
544 executor = ProcessPoolExecutor(max_workers=worker_count)
546 loop.run_until_complete(
550 write_back=write_back,
562 async def schedule_formatting(
565 write_back: WriteBack,
568 loop: asyncio.AbstractEventLoop,
571 """Run formatting of `sources` in parallel using the provided `executor`.
573 (Use ProcessPoolExecutors for actual parallelism.)
575 `write_back`, `fast`, and `mode` options are passed to
576 :func:`format_file_in_place`.
579 if write_back != WriteBack.DIFF:
580 cache = read_cache(mode)
581 sources, cached = filter_cached(cache, sources)
582 for src in sorted(cached):
583 report.done(src, Changed.CACHED)
588 sources_to_cache = []
590 if write_back == WriteBack.DIFF:
591 # For diff output, we need locks to ensure we don't interleave output
592 # from different processes.
594 lock = manager.Lock()
596 asyncio.ensure_future(
597 loop.run_in_executor(
598 executor, format_file_in_place, src, fast, mode, write_back, lock
601 for src in sorted(sources)
603 pending: Iterable["asyncio.Future[bool]"] = tasks.keys()
605 loop.add_signal_handler(signal.SIGINT, cancel, pending)
606 loop.add_signal_handler(signal.SIGTERM, cancel, pending)
607 except NotImplementedError:
608 # There are no good alternatives for these on Windows.
611 done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
613 src = tasks.pop(task)
615 cancelled.append(task)
616 elif task.exception():
617 report.failed(src, str(task.exception()))
619 changed = Changed.YES if task.result() else Changed.NO
620 # If the file was written back or was successfully checked as
621 # well-formatted, store this information in the cache.
622 if write_back is WriteBack.YES or (
623 write_back is WriteBack.CHECK and changed is Changed.NO
625 sources_to_cache.append(src)
626 report.done(src, changed)
628 await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
630 write_cache(cache, sources_to_cache, mode)
633 def format_file_in_place(
637 write_back: WriteBack = WriteBack.NO,
638 lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
640 """Format file under `src` path. Return True if changed.
642 If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
644 `mode` and `fast` options are passed to :func:`format_file_contents`.
646 if src.suffix == ".pyi":
647 mode = replace(mode, is_pyi=True)
649 then = datetime.utcfromtimestamp(src.stat().st_mtime)
650 with open(src, "rb") as buf:
651 src_contents, encoding, newline = decode_bytes(buf.read())
653 dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
654 except NothingChanged:
657 if write_back == WriteBack.YES:
658 with open(src, "w", encoding=encoding, newline=newline) as f:
659 f.write(dst_contents)
660 elif write_back == WriteBack.DIFF:
661 now = datetime.utcnow()
662 src_name = f"{src}\t{then} +0000"
663 dst_name = f"{src}\t{now} +0000"
664 diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
666 with lock or nullcontext():
667 f = io.TextIOWrapper(
673 f.write(diff_contents)
679 def format_stdin_to_stdout(
680 fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode
682 """Format file on stdin. Return True if changed.
684 If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
685 write a diff to stdout. The `mode` argument is passed to
686 :func:`format_file_contents`.
688 then = datetime.utcnow()
689 src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
692 dst = format_file_contents(src, fast=fast, mode=mode)
695 except NothingChanged:
699 f = io.TextIOWrapper(
700 sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
702 if write_back == WriteBack.YES:
704 elif write_back == WriteBack.DIFF:
705 now = datetime.utcnow()
706 src_name = f"STDIN\t{then} +0000"
707 dst_name = f"STDOUT\t{now} +0000"
708 f.write(diff(src, dst, src_name, dst_name))
712 def format_file_contents(
713 src_contents: str, *, fast: bool, mode: FileMode
715 """Reformat contents a file and return new contents.
717 If `fast` is False, additionally confirm that the reformatted code is
718 valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
719 `mode` is passed to :func:`format_str`.
721 if src_contents.strip() == "":
724 dst_contents = format_str(src_contents, mode=mode)
725 if src_contents == dst_contents:
729 assert_equivalent(src_contents, dst_contents)
730 assert_stable(src_contents, dst_contents, mode=mode)
734 def format_str(src_contents: str, *, mode: FileMode) -> FileContent:
735 """Reformat a string and return new contents.
737 `mode` determines formatting options, such as how many characters per line are
740 src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
742 future_imports = get_future_imports(src_node)
743 if mode.target_versions:
744 versions = mode.target_versions
746 versions = detect_target_versions(src_node)
747 normalize_fmt_off(src_node)
748 lines = LineGenerator(
749 remove_u_prefix="unicode_literals" in future_imports
750 or supports_feature(versions, Feature.UNICODE_LITERALS),
752 normalize_strings=mode.string_normalization,
754 elt = EmptyLineTracker(is_pyi=mode.is_pyi)
757 split_line_features = {
759 for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
760 if supports_feature(versions, feature)
762 for current_line in lines.visit(src_node):
763 dst_contents.append(str(empty_line) * after)
764 before, after = elt.maybe_empty_lines(current_line)
765 dst_contents.append(str(empty_line) * before)
766 for line in split_line(
767 current_line, line_length=mode.line_length, features=split_line_features
769 dst_contents.append(str(line))
770 return "".join(dst_contents)
773 def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
774 """Return a tuple of (decoded_contents, encoding, newline).
776 `newline` is either CRLF or LF but `decoded_contents` is decoded with
777 universal newlines (i.e. only contains LF).
779 srcbuf = io.BytesIO(src)
780 encoding, lines = tokenize.detect_encoding(srcbuf.readline)
782 return "", encoding, "\n"
784 newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
786 with io.TextIOWrapper(srcbuf, encoding) as tiow:
787 return tiow.read(), encoding, newline
790 def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
791 if not target_versions:
792 # No target_version specified, so try all grammars.
795 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
797 pygram.python_grammar_no_print_statement_no_exec_statement,
798 # Python 2.7 with future print_function import
799 pygram.python_grammar_no_print_statement,
801 pygram.python_grammar,
804 if all(version.is_python2() for version in target_versions):
805 # Python 2-only code, so try Python 2 grammars.
807 # Python 2.7 with future print_function import
808 pygram.python_grammar_no_print_statement,
810 pygram.python_grammar,
813 # Python 3-compatible code, so only try Python 3 grammar.
815 # If we have to parse both, try to parse async as a keyword first
816 if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS):
819 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords
821 if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
823 grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
824 # At least one of the above branches must have been taken, because every Python
825 # version has exactly one of the two 'ASYNC_*' flags
829 def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
830 """Given a string with source, return the lib2to3 Node."""
831 if src_txt[-1:] != "\n":
834 for grammar in get_grammars(set(target_versions)):
835 drv = driver.Driver(grammar, pytree.convert)
837 result = drv.parse_string(src_txt, True)
840 except ParseError as pe:
841 lineno, column = pe.context[1]
842 lines = src_txt.splitlines()
844 faulty_line = lines[lineno - 1]
846 faulty_line = "<line number missing in source>"
847 exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
851 if isinstance(result, Leaf):
852 result = Node(syms.file_input, [result])
856 def lib2to3_unparse(node: Node) -> str:
857 """Given a lib2to3 node, return its string representation."""
865 class Visitor(Generic[T]):
866 """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
868 def visit(self, node: LN) -> Iterator[T]:
869 """Main method to visit `node` and its children.
871 It tries to find a `visit_*()` method for the given `node.type`, like
872 `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
873 If no dedicated `visit_*()` method is found, chooses `visit_default()`
876 Then yields objects of type `T` from the selected visitor.
879 name = token.tok_name[node.type]
881 name = str(type_repr(node.type))
882 # We explicitly branch on whether a visitor exists (instead of
883 # using self.visit_default as the default arg to getattr) in order
884 # to save needing to create a bound method object and so mypyc can
885 # generate a native call to visit_default.
886 visitf = getattr(self, f"visit_{name}", None)
888 yield from visitf(node)
890 yield from self.visit_default(node)
892 def visit_default(self, node: LN) -> Iterator[T]:
893 """Default `visit_*()` implementation. Recurses to children of `node`."""
894 if isinstance(node, Node):
895 for child in node.children:
896 yield from self.visit(child)
900 class DebugVisitor(Visitor[T]):
903 def visit_default(self, node: LN) -> Iterator[T]:
904 indent = " " * (2 * self.tree_depth)
905 if isinstance(node, Node):
906 _type = type_repr(node.type)
907 out(f"{indent}{_type}", fg="yellow")
909 for child in node.children:
910 yield from self.visit(child)
913 out(f"{indent}/{_type}", fg="yellow", bold=False)
915 _type = token.tok_name.get(node.type, str(node.type))
916 out(f"{indent}{_type}", fg="blue", nl=False)
918 # We don't have to handle prefixes for `Node` objects since
919 # that delegates to the first child anyway.
920 out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
921 out(f" {node.value!r}", fg="blue", bold=False)
924 def show(cls, code: Union[str, Leaf, Node]) -> None:
925 """Pretty-print the lib2to3 AST of a given string of `code`.
927 Convenience method for debugging.
929 v: DebugVisitor[None] = DebugVisitor()
930 if isinstance(code, str):
931 code = lib2to3_parse(code)
935 WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE}
946 STANDALONE_COMMENT: Final = 153
947 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
948 LOGIC_OPERATORS: Final = {"and", "or"}
949 COMPARATORS: Final = {
957 MATH_OPERATORS: Final = {
973 STARS: Final = {token.STAR, token.DOUBLESTAR}
974 VARARGS_SPECIALS: Final = STARS | {token.SLASH}
975 VARARGS_PARENTS: Final = {
977 syms.argument, # double star in arglist
978 syms.trailer, # single argument to call
980 syms.varargslist, # lambdas
982 UNPACKING_PARENTS: Final = {
983 syms.atom, # single element of a list or set literal
987 syms.testlist_star_expr,
989 TEST_DESCENDANTS: Final = {
1006 ASSIGNMENTS: Final = {
1022 COMPREHENSION_PRIORITY: Final = 20
1023 COMMA_PRIORITY: Final = 18
1024 TERNARY_PRIORITY: Final = 16
1025 LOGIC_PRIORITY: Final = 14
1026 STRING_PRIORITY: Final = 12
1027 COMPARATOR_PRIORITY: Final = 10
1028 MATH_PRIORITIES: Final = {
1030 token.CIRCUMFLEX: 8,
1033 token.RIGHTSHIFT: 6,
1038 token.DOUBLESLASH: 4,
1042 token.DOUBLESTAR: 2,
1044 DOT_PRIORITY: Final = 1
1048 class BracketTracker:
1049 """Keeps track of brackets on a line."""
1052 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
1053 delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
1054 previous: Optional[Leaf] = None
1055 _for_loop_depths: List[int] = field(default_factory=list)
1056 _lambda_argument_depths: List[int] = field(default_factory=list)
1058 def mark(self, leaf: Leaf) -> None:
1059 """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
1061 All leaves receive an int `bracket_depth` field that stores how deep
1062 within brackets a given leaf is. 0 means there are no enclosing brackets
1063 that started on this line.
1065 If a leaf is itself a closing bracket, it receives an `opening_bracket`
1066 field that it forms a pair with. This is a one-directional link to
1067 avoid reference cycles.
1069 If a leaf is a delimiter (a token on which Black can split the line if
1070 needed) and it's on depth 0, its `id()` is stored in the tracker's
1073 if leaf.type == token.COMMENT:
1076 self.maybe_decrement_after_for_loop_variable(leaf)
1077 self.maybe_decrement_after_lambda_arguments(leaf)
1078 if leaf.type in CLOSING_BRACKETS:
1080 opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
1081 leaf.opening_bracket = opening_bracket
1082 leaf.bracket_depth = self.depth
1084 delim = is_split_before_delimiter(leaf, self.previous)
1085 if delim and self.previous is not None:
1086 self.delimiters[id(self.previous)] = delim
1088 delim = is_split_after_delimiter(leaf, self.previous)
1090 self.delimiters[id(leaf)] = delim
1091 if leaf.type in OPENING_BRACKETS:
1092 self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
1094 self.previous = leaf
1095 self.maybe_increment_lambda_arguments(leaf)
1096 self.maybe_increment_for_loop_variable(leaf)
1098 def any_open_brackets(self) -> bool:
1099 """Return True if there is an yet unmatched open bracket on the line."""
1100 return bool(self.bracket_match)
1102 def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
1103 """Return the highest priority of a delimiter found on the line.
1105 Values are consistent with what `is_split_*_delimiter()` return.
1106 Raises ValueError on no delimiters.
1108 return max(v for k, v in self.delimiters.items() if k not in exclude)
1110 def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
1111 """Return the number of delimiters with the given `priority`.
1113 If no `priority` is passed, defaults to max priority on the line.
1115 if not self.delimiters:
1118 priority = priority or self.max_delimiter_priority()
1119 return sum(1 for p in self.delimiters.values() if p == priority)
1121 def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
1122 """In a for loop, or comprehension, the variables are often unpacks.
1124 To avoid splitting on the comma in this situation, increase the depth of
1125 tokens between `for` and `in`.
1127 if leaf.type == token.NAME and leaf.value == "for":
1129 self._for_loop_depths.append(self.depth)
1134 def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
1135 """See `maybe_increment_for_loop_variable` above for explanation."""
1137 self._for_loop_depths
1138 and self._for_loop_depths[-1] == self.depth
1139 and leaf.type == token.NAME
1140 and leaf.value == "in"
1143 self._for_loop_depths.pop()
1148 def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
1149 """In a lambda expression, there might be more than one argument.
1151 To avoid splitting on the comma in this situation, increase the depth of
1152 tokens between `lambda` and `:`.
1154 if leaf.type == token.NAME and leaf.value == "lambda":
1156 self._lambda_argument_depths.append(self.depth)
1161 def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
1162 """See `maybe_increment_lambda_arguments` above for explanation."""
1164 self._lambda_argument_depths
1165 and self._lambda_argument_depths[-1] == self.depth
1166 and leaf.type == token.COLON
1169 self._lambda_argument_depths.pop()
1174 def get_open_lsqb(self) -> Optional[Leaf]:
1175 """Return the most recent opening square bracket (if any)."""
1176 return self.bracket_match.get((self.depth - 1, token.RSQB))
1181 """Holds leaves and comments. Can be printed with `str(line)`."""
1184 leaves: List[Leaf] = field(default_factory=list)
1185 # keys ordered like `leaves`
1186 comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
1187 bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
1188 inside_brackets: bool = False
1189 should_explode: bool = False
1191 def append(self, leaf: Leaf, preformatted: bool = False) -> None:
1192 """Add a new `leaf` to the end of the line.
1194 Unless `preformatted` is True, the `leaf` will receive a new consistent
1195 whitespace prefix and metadata applied by :class:`BracketTracker`.
1196 Trailing commas are maybe removed, unpacked for loop variables are
1197 demoted from being delimiters.
1199 Inline comments are put aside.
1201 has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
1205 if token.COLON == leaf.type and self.is_class_paren_empty:
1206 del self.leaves[-2:]
1207 if self.leaves and not preformatted:
1208 # Note: at this point leaf.prefix should be empty except for
1209 # imports, for which we only preserve newlines.
1210 leaf.prefix += whitespace(
1211 leaf, complex_subscript=self.is_complex_subscript(leaf)
1213 if self.inside_brackets or not preformatted:
1214 self.bracket_tracker.mark(leaf)
1215 self.maybe_remove_trailing_comma(leaf)
1216 if not self.append_comment(leaf):
1217 self.leaves.append(leaf)
1219 def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
1220 """Like :func:`append()` but disallow invalid standalone comment structure.
1222 Raises ValueError when any `leaf` is appended after a standalone comment
1223 or when a standalone comment is not the first leaf on the line.
1225 if self.bracket_tracker.depth == 0:
1227 raise ValueError("cannot append to standalone comments")
1229 if self.leaves and leaf.type == STANDALONE_COMMENT:
1231 "cannot append standalone comments to a populated line"
1234 self.append(leaf, preformatted=preformatted)
1237 def is_comment(self) -> bool:
1238 """Is this line a standalone comment?"""
1239 return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
1242 def is_decorator(self) -> bool:
1243 """Is this line a decorator?"""
1244 return bool(self) and self.leaves[0].type == token.AT
1247 def is_import(self) -> bool:
1248 """Is this an import line?"""
1249 return bool(self) and is_import(self.leaves[0])
1252 def is_class(self) -> bool:
1253 """Is this line a class definition?"""
1256 and self.leaves[0].type == token.NAME
1257 and self.leaves[0].value == "class"
1261 def is_stub_class(self) -> bool:
1262 """Is this line a class definition with a body consisting only of "..."?"""
1263 return self.is_class and self.leaves[-3:] == [
1264 Leaf(token.DOT, ".") for _ in range(3)
1268 def is_collection_with_optional_trailing_comma(self) -> bool:
1269 """Is this line a collection literal with a trailing comma that's optional?
1271 Note that the trailing comma in a 1-tuple is not optional.
1273 if not self.leaves or len(self.leaves) < 4:
1276 # Look for and address a trailing colon.
1277 if self.leaves[-1].type == token.COLON:
1278 closer = self.leaves[-2]
1281 closer = self.leaves[-1]
1283 if closer.type not in CLOSING_BRACKETS or self.inside_brackets:
1286 if closer.type == token.RPAR:
1287 # Tuples require an extra check, because if there's only
1288 # one element in the tuple removing the comma unmakes the
1291 # We also check for parens before looking for the trailing
1292 # comma because in some cases (eg assigning a dict
1293 # literal) the literal gets wrapped in temporary parens
1294 # during parsing. This case is covered by the
1295 # collections.py test data.
1296 opener = closer.opening_bracket
1297 for _open_index, leaf in enumerate(self.leaves):
1302 # Couldn't find the matching opening paren, play it safe.
1306 comma_depth = self.leaves[close_index - 1].bracket_depth
1307 for leaf in self.leaves[_open_index + 1 : close_index]:
1308 if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA:
1311 # We haven't looked yet for the trailing comma because
1312 # we might also have caught noop parens.
1313 return self.leaves[close_index - 1].type == token.COMMA
1316 return False # it's either a one-tuple or didn't have a trailing comma
1318 if self.leaves[close_index - 1].type in CLOSING_BRACKETS:
1320 closer = self.leaves[close_index]
1321 if closer.type == token.RPAR:
1322 # TODO: this is a gut feeling. Will we ever see this?
1325 if self.leaves[close_index - 1].type != token.COMMA:
1331 def is_def(self) -> bool:
1332 """Is this a function definition? (Also returns True for async defs.)"""
1334 first_leaf = self.leaves[0]
1339 second_leaf: Optional[Leaf] = self.leaves[1]
1342 return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
1343 first_leaf.type == token.ASYNC
1344 and second_leaf is not None
1345 and second_leaf.type == token.NAME
1346 and second_leaf.value == "def"
1350 def is_class_paren_empty(self) -> bool:
1351 """Is this a class with no base classes but using parentheses?
1353 Those are unnecessary and should be removed.
1357 and len(self.leaves) == 4
1359 and self.leaves[2].type == token.LPAR
1360 and self.leaves[2].value == "("
1361 and self.leaves[3].type == token.RPAR
1362 and self.leaves[3].value == ")"
1366 def is_triple_quoted_string(self) -> bool:
1367 """Is the line a triple quoted string?"""
1370 and self.leaves[0].type == token.STRING
1371 and self.leaves[0].value.startswith(('"""', "'''"))
1374 def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
1375 """If so, needs to be split before emitting."""
1376 for leaf in self.leaves:
1377 if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:
1382 def contains_uncollapsable_type_comments(self) -> bool:
1385 last_leaf = self.leaves[-1]
1386 ignored_ids.add(id(last_leaf))
1387 if last_leaf.type == token.COMMA or (
1388 last_leaf.type == token.RPAR and not last_leaf.value
1390 # When trailing commas or optional parens are inserted by Black for
1391 # consistency, comments after the previous last element are not moved
1392 # (they don't have to, rendering will still be correct). So we ignore
1393 # trailing commas and invisible.
1394 last_leaf = self.leaves[-2]
1395 ignored_ids.add(id(last_leaf))
1399 # A type comment is uncollapsable if it is attached to a leaf
1400 # that isn't at the end of the line (since that could cause it
1401 # to get associated to a different argument) or if there are
1402 # comments before it (since that could cause it to get hidden
1404 comment_seen = False
1405 for leaf_id, comments in self.comments.items():
1406 for comment in comments:
1407 if is_type_comment(comment):
1408 if comment_seen or (
1409 not is_type_comment(comment, " ignore")
1410 and leaf_id not in ignored_ids
1418 def contains_unsplittable_type_ignore(self) -> bool:
1422 # If a 'type: ignore' is attached to the end of a line, we
1423 # can't split the line, because we can't know which of the
1424 # subexpressions the ignore was meant to apply to.
1426 # We only want this to apply to actual physical lines from the
1427 # original source, though: we don't want the presence of a
1428 # 'type: ignore' at the end of a multiline expression to
1429 # justify pushing it all onto one line. Thus we
1430 # (unfortunately) need to check the actual source lines and
1431 # only report an unsplittable 'type: ignore' if this line was
1432 # one line in the original code.
1434 # Grab the first and last line numbers, skipping generated leaves
1435 first_line = next((l.lineno for l in self.leaves if l.lineno != 0), 0)
1436 last_line = next((l.lineno for l in reversed(self.leaves) if l.lineno != 0), 0)
1438 if first_line == last_line:
1439 # We look at the last two leaves since a comma or an
1440 # invisible paren could have been added at the end of the
1442 for node in self.leaves[-2:]:
1443 for comment in self.comments.get(id(node), []):
1444 if is_type_comment(comment, " ignore"):
1449 def contains_multiline_strings(self) -> bool:
1450 return any(is_multiline_string(leaf) for leaf in self.leaves)
1452 def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
1453 """Remove trailing comma if there is one and it's safe."""
1454 if not (self.leaves and self.leaves[-1].type == token.COMMA):
1457 # We remove trailing commas only in the case of importing a
1458 # single name from a module.
1462 and len(self.leaves) > 4
1463 and self.leaves[-1].type == token.COMMA
1464 and closing.type in CLOSING_BRACKETS
1465 and self.leaves[-4].type == token.NAME
1467 # regular `from foo import bar,`
1468 self.leaves[-4].value == "import"
1469 # `from foo import (bar as baz,)
1471 len(self.leaves) > 6
1472 and self.leaves[-6].value == "import"
1473 and self.leaves[-3].value == "as"
1475 # `from foo import bar as baz,`
1477 len(self.leaves) > 5
1478 and self.leaves[-5].value == "import"
1479 and self.leaves[-3].value == "as"
1482 and closing.type == token.RPAR
1486 self.remove_trailing_comma()
1489 def append_comment(self, comment: Leaf) -> bool:
1490 """Add an inline or standalone comment to the line."""
1492 comment.type == STANDALONE_COMMENT
1493 and self.bracket_tracker.any_open_brackets()
1498 if comment.type != token.COMMENT:
1502 comment.type = STANDALONE_COMMENT
1506 last_leaf = self.leaves[-1]
1508 last_leaf.type == token.RPAR
1509 and not last_leaf.value
1510 and last_leaf.parent
1511 and len(list(last_leaf.parent.leaves())) <= 3
1512 and not is_type_comment(comment)
1514 # Comments on an optional parens wrapping a single leaf should belong to
1515 # the wrapped node except if it's a type comment. Pinning the comment like
1516 # this avoids unstable formatting caused by comment migration.
1517 if len(self.leaves) < 2:
1518 comment.type = STANDALONE_COMMENT
1522 last_leaf = self.leaves[-2]
1523 self.comments.setdefault(id(last_leaf), []).append(comment)
1526 def comments_after(self, leaf: Leaf) -> List[Leaf]:
1527 """Generate comments that should appear directly after `leaf`."""
1528 return self.comments.get(id(leaf), [])
1530 def remove_trailing_comma(self) -> None:
1531 """Remove the trailing comma and moves the comments attached to it."""
1532 trailing_comma = self.leaves.pop()
1533 trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
1534 self.comments.setdefault(id(self.leaves[-1]), []).extend(
1535 trailing_comma_comments
1538 def is_complex_subscript(self, leaf: Leaf) -> bool:
1539 """Return True iff `leaf` is part of a slice with non-trivial exprs."""
1540 open_lsqb = self.bracket_tracker.get_open_lsqb()
1541 if open_lsqb is None:
1544 subscript_start = open_lsqb.next_sibling
1546 if isinstance(subscript_start, Node):
1547 if subscript_start.type == syms.listmaker:
1550 if subscript_start.type == syms.subscriptlist:
1551 subscript_start = child_towards(subscript_start, leaf)
1552 return subscript_start is not None and any(
1553 n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
1556 def __str__(self) -> str:
1557 """Render the line."""
1561 indent = " " * self.depth
1562 leaves = iter(self.leaves)
1563 first = next(leaves)
1564 res = f"{first.prefix}{indent}{first.value}"
1567 for comment in itertools.chain.from_iterable(self.comments.values()):
1571 def __bool__(self) -> bool:
1572 """Return True if the line has leaves or comments."""
1573 return bool(self.leaves or self.comments)
1577 class EmptyLineTracker:
1578 """Provides a stateful method that returns the number of potential extra
1579 empty lines needed before and after the currently processed line.
1581 Note: this tracker works on lines that haven't been split yet. It assumes
1582 the prefix of the first leaf consists of optional newlines. Those newlines
1583 are consumed by `maybe_empty_lines()` and included in the computation.
1586 is_pyi: bool = False
1587 previous_line: Optional[Line] = None
1588 previous_after: int = 0
1589 previous_defs: List[int] = field(default_factory=list)
1591 def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1592 """Return the number of extra empty lines before and after the `current_line`.
1594 This is for separating `def`, `async def` and `class` with extra empty
1595 lines (two on module-level).
1597 before, after = self._maybe_empty_lines(current_line)
1599 # Black should not insert empty lines at the beginning
1602 if self.previous_line is None
1603 else before - self.previous_after
1605 self.previous_after = after
1606 self.previous_line = current_line
1607 return before, after
1609 def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1611 if current_line.depth == 0:
1612 max_allowed = 1 if self.is_pyi else 2
1613 if current_line.leaves:
1614 # Consume the first leaf's extra newlines.
1615 first_leaf = current_line.leaves[0]
1616 before = first_leaf.prefix.count("\n")
1617 before = min(before, max_allowed)
1618 first_leaf.prefix = ""
1621 depth = current_line.depth
1622 while self.previous_defs and self.previous_defs[-1] >= depth:
1623 self.previous_defs.pop()
1625 before = 0 if depth else 1
1627 before = 1 if depth else 2
1628 if current_line.is_decorator or current_line.is_def or current_line.is_class:
1629 return self._maybe_empty_lines_for_class_or_def(current_line, before)
1633 and self.previous_line.is_import
1634 and not current_line.is_import
1635 and depth == self.previous_line.depth
1637 return (before or 1), 0
1641 and self.previous_line.is_class
1642 and current_line.is_triple_quoted_string
1648 def _maybe_empty_lines_for_class_or_def(
1649 self, current_line: Line, before: int
1650 ) -> Tuple[int, int]:
1651 if not current_line.is_decorator:
1652 self.previous_defs.append(current_line.depth)
1653 if self.previous_line is None:
1654 # Don't insert empty lines before the first line in the file.
1657 if self.previous_line.is_decorator:
1660 if self.previous_line.depth < current_line.depth and (
1661 self.previous_line.is_class or self.previous_line.is_def
1666 self.previous_line.is_comment
1667 and self.previous_line.depth == current_line.depth
1673 if self.previous_line.depth > current_line.depth:
1675 elif current_line.is_class or self.previous_line.is_class:
1676 if current_line.is_stub_class and self.previous_line.is_stub_class:
1677 # No blank line between classes with an empty body
1681 elif current_line.is_def and not self.previous_line.is_def:
1682 # Blank line between a block of functions and a block of non-functions
1688 if current_line.depth and newlines:
1694 class LineGenerator(Visitor[Line]):
1695 """Generates reformatted Line objects. Empty lines are not emitted.
1697 Note: destroys the tree it's visiting by mutating prefixes of its leaves
1698 in ways that will no longer stringify to valid Python code on the tree.
1701 is_pyi: bool = False
1702 normalize_strings: bool = True
1703 current_line: Line = field(default_factory=Line)
1704 remove_u_prefix: bool = False
1706 def line(self, indent: int = 0) -> Iterator[Line]:
1709 If the line is empty, only emit if it makes sense.
1710 If the line is too long, split it first and then generate.
1712 If any lines were generated, set up a new current_line.
1714 if not self.current_line:
1715 self.current_line.depth += indent
1716 return # Line is empty, don't emit. Creating a new one unnecessary.
1718 complete_line = self.current_line
1719 self.current_line = Line(depth=complete_line.depth + indent)
1722 def visit_default(self, node: LN) -> Iterator[Line]:
1723 """Default `visit_*()` implementation. Recurses to children of `node`."""
1724 if isinstance(node, Leaf):
1725 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
1726 for comment in generate_comments(node):
1727 if any_open_brackets:
1728 # any comment within brackets is subject to splitting
1729 self.current_line.append(comment)
1730 elif comment.type == token.COMMENT:
1731 # regular trailing comment
1732 self.current_line.append(comment)
1733 yield from self.line()
1736 # regular standalone comment
1737 yield from self.line()
1739 self.current_line.append(comment)
1740 yield from self.line()
1742 normalize_prefix(node, inside_brackets=any_open_brackets)
1743 if self.normalize_strings and node.type == token.STRING:
1744 normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
1745 normalize_string_quotes(node)
1746 if node.type == token.NUMBER:
1747 normalize_numeric_literal(node)
1748 if node.type not in WHITESPACE:
1749 self.current_line.append(node)
1750 yield from super().visit_default(node)
1752 def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
1753 """Increase indentation level, maybe yield a line."""
1754 # In blib2to3 INDENT never holds comments.
1755 yield from self.line(+1)
1756 yield from self.visit_default(node)
1758 def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
1759 """Decrease indentation level, maybe yield a line."""
1760 # The current line might still wait for trailing comments. At DEDENT time
1761 # there won't be any (they would be prefixes on the preceding NEWLINE).
1762 # Emit the line then.
1763 yield from self.line()
1765 # While DEDENT has no value, its prefix may contain standalone comments
1766 # that belong to the current indentation level. Get 'em.
1767 yield from self.visit_default(node)
1769 # Finally, emit the dedent.
1770 yield from self.line(-1)
1773 self, node: Node, keywords: Set[str], parens: Set[str]
1774 ) -> Iterator[Line]:
1775 """Visit a statement.
1777 This implementation is shared for `if`, `while`, `for`, `try`, `except`,
1778 `def`, `with`, `class`, `assert` and assignments.
1780 The relevant Python language `keywords` for a given statement will be
1781 NAME leaves within it. This methods puts those on a separate line.
1783 `parens` holds a set of string leaf values immediately after which
1784 invisible parens should be put.
1786 normalize_invisible_parens(node, parens_after=parens)
1787 for child in node.children:
1788 if child.type == token.NAME and child.value in keywords: # type: ignore
1789 yield from self.line()
1791 yield from self.visit(child)
1793 def visit_suite(self, node: Node) -> Iterator[Line]:
1794 """Visit a suite."""
1795 if self.is_pyi and is_stub_suite(node):
1796 yield from self.visit(node.children[2])
1798 yield from self.visit_default(node)
1800 def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
1801 """Visit a statement without nested statements."""
1802 is_suite_like = node.parent and node.parent.type in STATEMENT
1804 if self.is_pyi and is_stub_body(node):
1805 yield from self.visit_default(node)
1807 yield from self.line(+1)
1808 yield from self.visit_default(node)
1809 yield from self.line(-1)
1812 if not self.is_pyi or not node.parent or not is_stub_suite(node.parent):
1813 yield from self.line()
1814 yield from self.visit_default(node)
1816 def visit_async_stmt(self, node: Node) -> Iterator[Line]:
1817 """Visit `async def`, `async for`, `async with`."""
1818 yield from self.line()
1820 children = iter(node.children)
1821 for child in children:
1822 yield from self.visit(child)
1824 if child.type == token.ASYNC:
1827 internal_stmt = next(children)
1828 for child in internal_stmt.children:
1829 yield from self.visit(child)
1831 def visit_decorators(self, node: Node) -> Iterator[Line]:
1832 """Visit decorators."""
1833 for child in node.children:
1834 yield from self.line()
1835 yield from self.visit(child)
1837 def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
1838 """Remove a semicolon and put the other statement on a separate line."""
1839 yield from self.line()
1841 def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
1842 """End of file. Process outstanding comments and end with a newline."""
1843 yield from self.visit_default(leaf)
1844 yield from self.line()
1846 def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
1847 if not self.current_line.bracket_tracker.any_open_brackets():
1848 yield from self.line()
1849 yield from self.visit_default(leaf)
1851 def visit_factor(self, node: Node) -> Iterator[Line]:
1852 """Force parentheses between a unary op and a binary power:
1854 -2 ** 8 -> -(2 ** 8)
1856 _operator, operand = node.children
1858 operand.type == syms.power
1859 and len(operand.children) == 3
1860 and operand.children[1].type == token.DOUBLESTAR
1862 lpar = Leaf(token.LPAR, "(")
1863 rpar = Leaf(token.RPAR, ")")
1864 index = operand.remove() or 0
1865 node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
1866 yield from self.visit_default(node)
1868 def __post_init__(self) -> None:
1869 """You are in a twisty little maze of passages."""
1872 self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
1873 self.visit_if_stmt = partial(
1874 v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
1876 self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
1877 self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
1878 self.visit_try_stmt = partial(
1879 v, keywords={"try", "except", "else", "finally"}, parens=Ø
1881 self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
1882 self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
1883 self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
1884 self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
1885 self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
1886 self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
1887 self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
1888 self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
1889 self.visit_async_funcdef = self.visit_async_stmt
1890 self.visit_decorated = self.visit_decorators
1893 IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
1894 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
1895 OPENING_BRACKETS = set(BRACKET.keys())
1896 CLOSING_BRACKETS = set(BRACKET.values())
1897 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
1898 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
1901 def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901
1902 """Return whitespace prefix if needed for the given `leaf`.
1904 `complex_subscript` signals whether the given leaf is part of a subscription
1905 which has non-trivial arguments, like arithmetic expressions or function calls.
1913 if t in ALWAYS_NO_SPACE:
1916 if t == token.COMMENT:
1919 assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
1920 if t == token.COLON and p.type not in {
1927 prev = leaf.prev_sibling
1929 prevp = preceding_leaf(p)
1930 if not prevp or prevp.type in OPENING_BRACKETS:
1933 if t == token.COLON:
1934 if prevp.type == token.COLON:
1937 elif prevp.type != token.COMMA and not complex_subscript:
1942 if prevp.type == token.EQUAL:
1944 if prevp.parent.type in {
1952 elif prevp.parent.type == syms.typedargslist:
1953 # A bit hacky: if the equal sign has whitespace, it means we
1954 # previously found it's a typed argument. So, we're using
1958 elif prevp.type in VARARGS_SPECIALS:
1959 if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
1962 elif prevp.type == token.COLON:
1963 if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
1964 return SPACE if complex_subscript else NO
1968 and prevp.parent.type == syms.factor
1969 and prevp.type in MATH_OPERATORS
1974 prevp.type == token.RIGHTSHIFT
1976 and prevp.parent.type == syms.shift_expr
1977 and prevp.prev_sibling
1978 and prevp.prev_sibling.type == token.NAME
1979 and prevp.prev_sibling.value == "print" # type: ignore
1981 # Python 2 print chevron
1984 elif prev.type in OPENING_BRACKETS:
1987 if p.type in {syms.parameters, syms.arglist}:
1988 # untyped function signatures or calls
1989 if not prev or prev.type != token.COMMA:
1992 elif p.type == syms.varargslist:
1994 if prev and prev.type != token.COMMA:
1997 elif p.type == syms.typedargslist:
1998 # typed function signatures
2002 if t == token.EQUAL:
2003 if prev.type != syms.tname:
2006 elif prev.type == token.EQUAL:
2007 # A bit hacky: if the equal sign has whitespace, it means we
2008 # previously found it's a typed argument. So, we're using that, too.
2011 elif prev.type != token.COMMA:
2014 elif p.type == syms.tname:
2017 prevp = preceding_leaf(p)
2018 if not prevp or prevp.type != token.COMMA:
2021 elif p.type == syms.trailer:
2022 # attributes and calls
2023 if t == token.LPAR or t == token.RPAR:
2028 prevp = preceding_leaf(p)
2029 if not prevp or prevp.type != token.NUMBER:
2032 elif t == token.LSQB:
2035 elif prev.type != token.COMMA:
2038 elif p.type == syms.argument:
2040 if t == token.EQUAL:
2044 prevp = preceding_leaf(p)
2045 if not prevp or prevp.type == token.LPAR:
2048 elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
2051 elif p.type == syms.decorator:
2055 elif p.type == syms.dotted_name:
2059 prevp = preceding_leaf(p)
2060 if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
2063 elif p.type == syms.classdef:
2067 if prev and prev.type == token.LPAR:
2070 elif p.type in {syms.subscript, syms.sliceop}:
2073 assert p.parent is not None, "subscripts are always parented"
2074 if p.parent.type == syms.subscriptlist:
2079 elif not complex_subscript:
2082 elif p.type == syms.atom:
2083 if prev and t == token.DOT:
2084 # dots, but not the first one.
2087 elif p.type == syms.dictsetmaker:
2089 if prev and prev.type == token.DOUBLESTAR:
2092 elif p.type in {syms.factor, syms.star_expr}:
2095 prevp = preceding_leaf(p)
2096 if not prevp or prevp.type in OPENING_BRACKETS:
2099 prevp_parent = prevp.parent
2100 assert prevp_parent is not None
2101 if prevp.type == token.COLON and prevp_parent.type in {
2107 elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
2110 elif t in {token.NAME, token.NUMBER, token.STRING}:
2113 elif p.type == syms.import_from:
2115 if prev and prev.type == token.DOT:
2118 elif t == token.NAME:
2122 if prev and prev.type == token.DOT:
2125 elif p.type == syms.sliceop:
2131 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
2132 """Return the first leaf that precedes `node`, if any."""
2134 res = node.prev_sibling
2136 if isinstance(res, Leaf):
2140 return list(res.leaves())[-1]
2149 def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
2150 """Return the child of `ancestor` that contains `descendant`."""
2151 node: Optional[LN] = descendant
2152 while node and node.parent != ancestor:
2157 def container_of(leaf: Leaf) -> LN:
2158 """Return `leaf` or one of its ancestors that is the topmost container of it.
2160 By "container" we mean a node where `leaf` is the very first child.
2162 same_prefix = leaf.prefix
2163 container: LN = leaf
2165 parent = container.parent
2169 if parent.children[0].prefix != same_prefix:
2172 if parent.type == syms.file_input:
2175 if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
2182 def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2183 """Return the priority of the `leaf` delimiter, given a line break after it.
2185 The delimiter priorities returned here are from those delimiters that would
2186 cause a line break after themselves.
2188 Higher numbers are higher priority.
2190 if leaf.type == token.COMMA:
2191 return COMMA_PRIORITY
2196 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2197 """Return the priority of the `leaf` delimiter, given a line break before it.
2199 The delimiter priorities returned here are from those delimiters that would
2200 cause a line break before themselves.
2202 Higher numbers are higher priority.
2204 if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
2205 # * and ** might also be MATH_OPERATORS but in this case they are not.
2206 # Don't treat them as a delimiter.
2210 leaf.type == token.DOT
2212 and leaf.parent.type not in {syms.import_from, syms.dotted_name}
2213 and (previous is None or previous.type in CLOSING_BRACKETS)
2218 leaf.type in MATH_OPERATORS
2220 and leaf.parent.type not in {syms.factor, syms.star_expr}
2222 return MATH_PRIORITIES[leaf.type]
2224 if leaf.type in COMPARATORS:
2225 return COMPARATOR_PRIORITY
2228 leaf.type == token.STRING
2229 and previous is not None
2230 and previous.type == token.STRING
2232 return STRING_PRIORITY
2234 if leaf.type not in {token.NAME, token.ASYNC}:
2240 and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
2241 or leaf.type == token.ASYNC
2244 not isinstance(leaf.prev_sibling, Leaf)
2245 or leaf.prev_sibling.value != "async"
2247 return COMPREHENSION_PRIORITY
2252 and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
2254 return COMPREHENSION_PRIORITY
2256 if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
2257 return TERNARY_PRIORITY
2259 if leaf.value == "is":
2260 return COMPARATOR_PRIORITY
2265 and leaf.parent.type in {syms.comp_op, syms.comparison}
2267 previous is not None
2268 and previous.type == token.NAME
2269 and previous.value == "not"
2272 return COMPARATOR_PRIORITY
2277 and leaf.parent.type == syms.comp_op
2279 previous is not None
2280 and previous.type == token.NAME
2281 and previous.value == "is"
2284 return COMPARATOR_PRIORITY
2286 if leaf.value in LOGIC_OPERATORS and leaf.parent:
2287 return LOGIC_PRIORITY
2292 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
2293 FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
2296 def generate_comments(leaf: LN) -> Iterator[Leaf]:
2297 """Clean the prefix of the `leaf` and generate comments from it, if any.
2299 Comments in lib2to3 are shoved into the whitespace prefix. This happens
2300 in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
2301 move because it does away with modifying the grammar to include all the
2302 possible places in which comments can be placed.
2304 The sad consequence for us though is that comments don't "belong" anywhere.
2305 This is why this function generates simple parentless Leaf objects for
2306 comments. We simply don't know what the correct parent should be.
2308 No matter though, we can live without this. We really only need to
2309 differentiate between inline and standalone comments. The latter don't
2310 share the line with any code.
2312 Inline comments are emitted as regular token.COMMENT leaves. Standalone
2313 are emitted with a fake STANDALONE_COMMENT token identifier.
2315 for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
2316 yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
2321 """Describes a piece of syntax that is a comment.
2323 It's not a :class:`blib2to3.pytree.Leaf` so that:
2325 * it can be cached (`Leaf` objects should not be reused more than once as
2326 they store their lineno, column, prefix, and parent information);
2327 * `newlines` and `consumed` fields are kept separate from the `value`. This
2328 simplifies handling of special marker comments like ``# fmt: off/on``.
2331 type: int # token.COMMENT or STANDALONE_COMMENT
2332 value: str # content of the comment
2333 newlines: int # how many newlines before the comment
2334 consumed: int # how many characters of the original leaf's prefix did we consume
2337 @lru_cache(maxsize=4096)
2338 def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
2339 """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
2340 result: List[ProtoComment] = []
2341 if not prefix or "#" not in prefix:
2347 for index, line in enumerate(prefix.split("\n")):
2348 consumed += len(line) + 1 # adding the length of the split '\n'
2349 line = line.lstrip()
2352 if not line.startswith("#"):
2353 # Escaped newlines outside of a comment are not really newlines at
2354 # all. We treat a single-line comment following an escaped newline
2355 # as a simple trailing comment.
2356 if line.endswith("\\"):
2360 if index == ignored_lines and not is_endmarker:
2361 comment_type = token.COMMENT # simple trailing comment
2363 comment_type = STANDALONE_COMMENT
2364 comment = make_comment(line)
2367 type=comment_type, value=comment, newlines=nlines, consumed=consumed
2374 def make_comment(content: str) -> str:
2375 """Return a consistently formatted comment from the given `content` string.
2377 All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
2378 space between the hash sign and the content.
2380 If `content` didn't start with a hash sign, one is provided.
2382 content = content.rstrip()
2386 if content[0] == "#":
2387 content = content[1:]
2388 if content and content[0] not in " !:#'%":
2389 content = " " + content
2390 return "#" + content
2396 inner: bool = False,
2397 features: Collection[Feature] = (),
2398 ) -> Iterator[Line]:
2399 """Split a `line` into potentially many lines.
2401 They should fit in the allotted `line_length` but might not be able to.
2402 `inner` signifies that there were a pair of brackets somewhere around the
2403 current `line`, possibly transitively. This means we can fallback to splitting
2404 by delimiters if the LHS/RHS don't yield any results.
2406 `features` are syntactical features that may be used in the output.
2412 line_str = str(line).strip("\n")
2415 not line.contains_uncollapsable_type_comments()
2416 and not line.should_explode
2417 and not line.is_collection_with_optional_trailing_comma
2419 is_line_short_enough(line, line_length=line_length, line_str=line_str)
2420 or line.contains_unsplittable_type_ignore()
2426 split_funcs: List[SplitFunc]
2428 split_funcs = [left_hand_split]
2431 def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
2432 for omit in generate_trailers_to_omit(line, line_length):
2433 lines = list(right_hand_split(line, line_length, features, omit=omit))
2434 if is_line_short_enough(lines[0], line_length=line_length):
2438 # All splits failed, best effort split with no omits.
2439 # This mostly happens to multiline strings that are by definition
2440 # reported as not fitting a single line.
2441 # line_length=1 here was historically a bug that somehow became a feature.
2442 # See #762 and #781 for the full story.
2443 yield from right_hand_split(line, line_length=1, features=features)
2445 if line.inside_brackets:
2446 split_funcs = [delimiter_split, standalone_comment_split, rhs]
2449 for split_func in split_funcs:
2450 # We are accumulating lines in `result` because we might want to abort
2451 # mission and return the original line in the end, or attempt a different
2453 result: List[Line] = []
2455 for l in split_func(line, features):
2456 if str(l).strip("\n") == line_str:
2457 raise CannotSplit("Split function returned an unchanged result")
2461 l, line_length=line_length, inner=True, features=features
2475 def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2476 """Split line into many lines, starting with the first matching bracket pair.
2478 Note: this usually looks weird, only use this for function definitions.
2479 Prefer RHS otherwise. This is why this function is not symmetrical with
2480 :func:`right_hand_split` which also handles optional parentheses.
2482 tail_leaves: List[Leaf] = []
2483 body_leaves: List[Leaf] = []
2484 head_leaves: List[Leaf] = []
2485 current_leaves = head_leaves
2486 matching_bracket: Optional[Leaf] = None
2487 for leaf in line.leaves:
2489 current_leaves is body_leaves
2490 and leaf.type in CLOSING_BRACKETS
2491 and leaf.opening_bracket is matching_bracket
2493 current_leaves = tail_leaves if body_leaves else head_leaves
2494 current_leaves.append(leaf)
2495 if current_leaves is head_leaves:
2496 if leaf.type in OPENING_BRACKETS:
2497 matching_bracket = leaf
2498 current_leaves = body_leaves
2499 if not matching_bracket:
2500 raise CannotSplit("No brackets found")
2502 head = bracket_split_build_line(head_leaves, line, matching_bracket)
2503 body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
2504 tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
2505 bracket_split_succeeded_or_raise(head, body, tail)
2506 for result in (head, body, tail):
2511 def right_hand_split(
2514 features: Collection[Feature] = (),
2515 omit: Collection[LeafID] = (),
2516 ) -> Iterator[Line]:
2517 """Split line into many lines, starting with the last matching bracket pair.
2519 If the split was by optional parentheses, attempt splitting without them, too.
2520 `omit` is a collection of closing bracket IDs that shouldn't be considered for
2523 Note: running this function modifies `bracket_depth` on the leaves of `line`.
2525 tail_leaves: List[Leaf] = []
2526 body_leaves: List[Leaf] = []
2527 head_leaves: List[Leaf] = []
2528 current_leaves = tail_leaves
2529 opening_bracket: Optional[Leaf] = None
2530 closing_bracket: Optional[Leaf] = None
2531 for leaf in reversed(line.leaves):
2532 if current_leaves is body_leaves:
2533 if leaf is opening_bracket:
2534 current_leaves = head_leaves if body_leaves else tail_leaves
2535 current_leaves.append(leaf)
2536 if current_leaves is tail_leaves:
2537 if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
2538 opening_bracket = leaf.opening_bracket
2539 closing_bracket = leaf
2540 current_leaves = body_leaves
2541 if not (opening_bracket and closing_bracket and head_leaves):
2542 # If there is no opening or closing_bracket that means the split failed and
2543 # all content is in the tail. Otherwise, if `head_leaves` are empty, it means
2544 # the matching `opening_bracket` wasn't available on `line` anymore.
2545 raise CannotSplit("No brackets found")
2547 tail_leaves.reverse()
2548 body_leaves.reverse()
2549 head_leaves.reverse()
2550 head = bracket_split_build_line(head_leaves, line, opening_bracket)
2551 body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
2552 tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
2553 bracket_split_succeeded_or_raise(head, body, tail)
2555 # the body shouldn't be exploded
2556 not body.should_explode
2557 # the opening bracket is an optional paren
2558 and opening_bracket.type == token.LPAR
2559 and not opening_bracket.value
2560 # the closing bracket is an optional paren
2561 and closing_bracket.type == token.RPAR
2562 and not closing_bracket.value
2563 # it's not an import (optional parens are the only thing we can split on
2564 # in this case; attempting a split without them is a waste of time)
2565 and not line.is_import
2566 # there are no standalone comments in the body
2567 and not body.contains_standalone_comments(0)
2568 # and we can actually remove the parens
2569 and can_omit_invisible_parens(body, line_length)
2571 omit = {id(closing_bracket), *omit}
2573 yield from right_hand_split(line, line_length, features=features, omit=omit)
2579 or is_line_short_enough(body, line_length=line_length)
2582 "Splitting failed, body is still too long and can't be split."
2585 elif head.contains_multiline_strings() or tail.contains_multiline_strings():
2587 "The current optional pair of parentheses is bound to fail to "
2588 "satisfy the splitting algorithm because the head or the tail "
2589 "contains multiline strings which by definition never fit one "
2593 ensure_visible(opening_bracket)
2594 ensure_visible(closing_bracket)
2595 for result in (head, body, tail):
2600 def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
2601 """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
2603 Do nothing otherwise.
2605 A left- or right-hand split is based on a pair of brackets. Content before
2606 (and including) the opening bracket is left on one line, content inside the
2607 brackets is put on a separate line, and finally content starting with and
2608 following the closing bracket is put on a separate line.
2610 Those are called `head`, `body`, and `tail`, respectively. If the split
2611 produced the same line (all content in `head`) or ended up with an empty `body`
2612 and the `tail` is just the closing bracket, then it's considered failed.
2614 tail_len = len(str(tail).strip())
2617 raise CannotSplit("Splitting brackets produced the same line")
2621 f"Splitting brackets on an empty body to save "
2622 f"{tail_len} characters is not worth it"
2626 def bracket_split_build_line(
2627 leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
2629 """Return a new line with given `leaves` and respective comments from `original`.
2631 If `is_body` is True, the result line is one-indented inside brackets and as such
2632 has its first leaf's prefix normalized and a trailing comma added when expected.
2634 result = Line(depth=original.depth)
2636 result.inside_brackets = True
2639 # Since body is a new indent level, remove spurious leading whitespace.
2640 normalize_prefix(leaves[0], inside_brackets=True)
2641 # Ensure a trailing comma for imports and standalone function arguments, but
2642 # be careful not to add one after any comments or within type annotations.
2645 and opening_bracket.value == "("
2646 and not any(l.type == token.COMMA for l in leaves)
2649 if original.is_import or no_commas:
2650 for i in range(len(leaves) - 1, -1, -1):
2651 if leaves[i].type == STANDALONE_COMMENT:
2654 if leaves[i].type != token.COMMA:
2655 leaves.insert(i + 1, Leaf(token.COMMA, ","))
2660 result.append(leaf, preformatted=True)
2661 for comment_after in original.comments_after(leaf):
2662 result.append(comment_after, preformatted=True)
2664 result.should_explode = should_explode(result, opening_bracket)
2668 def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc:
2669 """Normalize prefix of the first leaf in every line returned by `split_func`.
2671 This is a decorator over relevant split functions.
2675 def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2676 for l in split_func(line, features):
2677 normalize_prefix(l.leaves[0], inside_brackets=True)
2680 return split_wrapper
2683 @dont_increase_indentation
2684 def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2685 """Split according to delimiters of the highest priority.
2687 If the appropriate Features are given, the split will add trailing commas
2688 also in function signatures and calls that contain `*` and `**`.
2691 last_leaf = line.leaves[-1]
2693 raise CannotSplit("Line empty")
2695 bt = line.bracket_tracker
2697 delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
2699 raise CannotSplit("No delimiters found")
2701 if delimiter_priority == DOT_PRIORITY:
2702 if bt.delimiter_count_with_priority(delimiter_priority) == 1:
2703 raise CannotSplit("Splitting a single attribute from its owner looks wrong")
2705 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2706 lowest_depth = sys.maxsize
2707 trailing_comma_safe = True
2709 def append_to_line(leaf: Leaf) -> Iterator[Line]:
2710 """Append `leaf` to current line or to new line if appending impossible."""
2711 nonlocal current_line
2713 current_line.append_safe(leaf, preformatted=True)
2717 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2718 current_line.append(leaf)
2720 for leaf in line.leaves:
2721 yield from append_to_line(leaf)
2723 for comment_after in line.comments_after(leaf):
2724 yield from append_to_line(comment_after)
2726 lowest_depth = min(lowest_depth, leaf.bracket_depth)
2727 if leaf.bracket_depth == lowest_depth:
2728 if is_vararg(leaf, within={syms.typedargslist}):
2729 trailing_comma_safe = (
2730 trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
2732 elif is_vararg(leaf, within={syms.arglist, syms.argument}):
2733 trailing_comma_safe = (
2734 trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
2737 leaf_priority = bt.delimiters.get(id(leaf))
2738 if leaf_priority == delimiter_priority:
2741 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2745 and delimiter_priority == COMMA_PRIORITY
2746 and current_line.leaves[-1].type != token.COMMA
2747 and current_line.leaves[-1].type != STANDALONE_COMMENT
2749 current_line.append(Leaf(token.COMMA, ","))
2753 @dont_increase_indentation
2754 def standalone_comment_split(
2755 line: Line, features: Collection[Feature] = ()
2756 ) -> Iterator[Line]:
2757 """Split standalone comments from the rest of the line."""
2758 if not line.contains_standalone_comments(0):
2759 raise CannotSplit("Line does not have any standalone comments")
2761 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2763 def append_to_line(leaf: Leaf) -> Iterator[Line]:
2764 """Append `leaf` to current line or to new line if appending impossible."""
2765 nonlocal current_line
2767 current_line.append_safe(leaf, preformatted=True)
2771 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2772 current_line.append(leaf)
2774 for leaf in line.leaves:
2775 yield from append_to_line(leaf)
2777 for comment_after in line.comments_after(leaf):
2778 yield from append_to_line(comment_after)
2784 def is_import(leaf: Leaf) -> bool:
2785 """Return True if the given leaf starts an import statement."""
2792 (v == "import" and p and p.type == syms.import_name)
2793 or (v == "from" and p and p.type == syms.import_from)
2798 def is_type_comment(leaf: Leaf, suffix: str = "") -> bool:
2799 """Return True if the given leaf is a special comment.
2800 Only returns true for type comments for now."""
2803 return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix)
2806 def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
2807 """Leave existing extra newlines if not `inside_brackets`. Remove everything
2810 Note: don't use backslashes for formatting or you'll lose your voting rights.
2812 if not inside_brackets:
2813 spl = leaf.prefix.split("#")
2814 if "\\" not in spl[0]:
2815 nl_count = spl[-1].count("\n")
2818 leaf.prefix = "\n" * nl_count
2824 def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
2825 """Make all string prefixes lowercase.
2827 If remove_u_prefix is given, also removes any u prefix from the string.
2829 Note: Mutates its argument.
2831 match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL)
2832 assert match is not None, f"failed to match string {leaf.value!r}"
2833 orig_prefix = match.group(1)
2834 new_prefix = orig_prefix.lower()
2836 new_prefix = new_prefix.replace("u", "")
2837 leaf.value = f"{new_prefix}{match.group(2)}"
2840 def normalize_string_quotes(leaf: Leaf) -> None:
2841 """Prefer double quotes but only if it doesn't cause more escaping.
2843 Adds or removes backslashes as appropriate. Doesn't parse and fix
2844 strings nested in f-strings (yet).
2846 Note: Mutates its argument.
2848 value = leaf.value.lstrip("furbFURB")
2849 if value[:3] == '"""':
2852 elif value[:3] == "'''":
2855 elif value[0] == '"':
2861 first_quote_pos = leaf.value.find(orig_quote)
2862 if first_quote_pos == -1:
2863 return # There's an internal error
2865 prefix = leaf.value[:first_quote_pos]
2866 unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
2867 escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
2868 escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
2869 body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
2870 if "r" in prefix.casefold():
2871 if unescaped_new_quote.search(body):
2872 # There's at least one unescaped new_quote in this raw string
2873 # so converting is impossible
2876 # Do not introduce or remove backslashes in raw strings
2879 # remove unnecessary escapes
2880 new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
2881 if body != new_body:
2882 # Consider the string without unnecessary escapes as the original
2884 leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
2885 new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
2886 new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
2887 if "f" in prefix.casefold():
2888 matches = re.findall(
2890 (?:[^{]|^)\{ # start of the string or a non-{ followed by a single {
2891 ([^{].*?) # contents of the brackets except if begins with {{
2892 \}(?:[^}]|$) # A } followed by end of the string or a non-}
2899 # Do not introduce backslashes in interpolated expressions
2902 if new_quote == '"""' and new_body[-1:] == '"':
2904 new_body = new_body[:-1] + '\\"'
2905 orig_escape_count = body.count("\\")
2906 new_escape_count = new_body.count("\\")
2907 if new_escape_count > orig_escape_count:
2908 return # Do not introduce more escaping
2910 if new_escape_count == orig_escape_count and orig_quote == '"':
2911 return # Prefer double quotes
2913 leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
2916 def normalize_numeric_literal(leaf: Leaf) -> None:
2917 """Normalizes numeric (float, int, and complex) literals.
2919 All letters used in the representation are normalized to lowercase (except
2920 in Python 2 long literals).
2922 text = leaf.value.lower()
2923 if text.startswith(("0o", "0b")):
2924 # Leave octal and binary literals alone.
2926 elif text.startswith("0x"):
2927 # Change hex literals to upper case.
2928 before, after = text[:2], text[2:]
2929 text = f"{before}{after.upper()}"
2931 before, after = text.split("e")
2933 if after.startswith("-"):
2936 elif after.startswith("+"):
2938 before = format_float_or_int_string(before)
2939 text = f"{before}e{sign}{after}"
2940 elif text.endswith(("j", "l")):
2943 # Capitalize in "2L" because "l" looks too similar to "1".
2946 text = f"{format_float_or_int_string(number)}{suffix}"
2948 text = format_float_or_int_string(text)
2952 def format_float_or_int_string(text: str) -> str:
2953 """Formats a float string like "1.0"."""
2957 before, after = text.split(".")
2958 return f"{before or 0}.{after or 0}"
2961 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
2962 """Make existing optional parentheses invisible or create new ones.
2964 `parens_after` is a set of string leaf values immediately after which parens
2967 Standardizes on visible parentheses for single-element tuples, and keeps
2968 existing visible parentheses for other tuples and generator expressions.
2970 for pc in list_comments(node.prefix, is_endmarker=False):
2971 if pc.value in FMT_OFF:
2972 # This `node` has a prefix with `# fmt: off`, don't mess with parens.
2975 for index, child in enumerate(list(node.children)):
2976 # Add parentheses around long tuple unpacking in assignments.
2979 and isinstance(child, Node)
2980 and child.type == syms.testlist_star_expr
2985 if is_walrus_assignment(child):
2988 if child.type == syms.atom:
2989 if maybe_make_parens_invisible_in_atom(child, parent=node):
2990 wrap_in_parentheses(node, child, visible=False)
2991 elif is_one_tuple(child):
2992 wrap_in_parentheses(node, child, visible=True)
2993 elif node.type == syms.import_from:
2994 # "import from" nodes store parentheses directly as part of
2996 if child.type == token.LPAR:
2997 # make parentheses invisible
2998 child.value = "" # type: ignore
2999 node.children[-1].value = "" # type: ignore
3000 elif child.type != token.STAR:
3001 # insert invisible parentheses
3002 node.insert_child(index, Leaf(token.LPAR, ""))
3003 node.append_child(Leaf(token.RPAR, ""))
3006 elif not (isinstance(child, Leaf) and is_multiline_string(child)):
3007 wrap_in_parentheses(node, child, visible=False)
3009 check_lpar = isinstance(child, Leaf) and child.value in parens_after
3012 def normalize_fmt_off(node: Node) -> None:
3013 """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
3016 try_again = convert_one_fmt_off_pair(node)
3019 def convert_one_fmt_off_pair(node: Node) -> bool:
3020 """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
3022 Returns True if a pair was converted.
3024 for leaf in node.leaves():
3025 previous_consumed = 0
3026 for comment in list_comments(leaf.prefix, is_endmarker=False):
3027 if comment.value in FMT_OFF:
3028 # We only want standalone comments. If there's no previous leaf or
3029 # the previous leaf is indentation, it's a standalone comment in
3031 if comment.type != STANDALONE_COMMENT:
3032 prev = preceding_leaf(leaf)
3033 if prev and prev.type not in WHITESPACE:
3036 ignored_nodes = list(generate_ignored_nodes(leaf))
3037 if not ignored_nodes:
3040 first = ignored_nodes[0] # Can be a container node with the `leaf`.
3041 parent = first.parent
3042 prefix = first.prefix
3043 first.prefix = prefix[comment.consumed :]
3045 comment.value + "\n" + "".join(str(n) for n in ignored_nodes)
3047 if hidden_value.endswith("\n"):
3048 # That happens when one of the `ignored_nodes` ended with a NEWLINE
3049 # leaf (possibly followed by a DEDENT).
3050 hidden_value = hidden_value[:-1]
3051 first_idx: Optional[int] = None
3052 for ignored in ignored_nodes:
3053 index = ignored.remove()
3054 if first_idx is None:
3056 assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
3057 assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
3058 parent.insert_child(
3063 prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
3068 previous_consumed = comment.consumed
3073 def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]:
3074 """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
3076 Stops at the end of the block.
3078 container: Optional[LN] = container_of(leaf)
3079 while container is not None and container.type != token.ENDMARKER:
3081 for comment in list_comments(container.prefix, is_endmarker=False):
3082 if comment.value in FMT_ON:
3084 elif comment.value in FMT_OFF:
3091 container = container.next_sibling
3094 def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
3095 """If it's safe, make the parens in the atom `node` invisible, recursively.
3096 Additionally, remove repeated, adjacent invisible parens from the atom `node`
3097 as they are redundant.
3099 Returns whether the node should itself be wrapped in invisible parentheses.
3103 node.type != syms.atom
3104 or is_empty_tuple(node)
3105 or is_one_tuple(node)
3106 or (is_yield(node) and parent.type != syms.expr_stmt)
3107 or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
3111 first = node.children[0]
3112 last = node.children[-1]
3113 if first.type == token.LPAR and last.type == token.RPAR:
3114 middle = node.children[1]
3115 # make parentheses invisible
3116 first.value = "" # type: ignore
3117 last.value = "" # type: ignore
3118 maybe_make_parens_invisible_in_atom(middle, parent=parent)
3120 if is_atom_with_invisible_parens(middle):
3121 # Strip the invisible parens from `middle` by replacing
3122 # it with the child in-between the invisible parens
3123 middle.replace(middle.children[1])
3130 def is_atom_with_invisible_parens(node: LN) -> bool:
3131 """Given a `LN`, determines whether it's an atom `node` with invisible
3132 parens. Useful in dedupe-ing and normalizing parens.
3134 if isinstance(node, Leaf) or node.type != syms.atom:
3137 first, last = node.children[0], node.children[-1]
3139 isinstance(first, Leaf)
3140 and first.type == token.LPAR
3141 and first.value == ""
3142 and isinstance(last, Leaf)
3143 and last.type == token.RPAR
3144 and last.value == ""
3148 def is_empty_tuple(node: LN) -> bool:
3149 """Return True if `node` holds an empty tuple."""
3151 node.type == syms.atom
3152 and len(node.children) == 2
3153 and node.children[0].type == token.LPAR
3154 and node.children[1].type == token.RPAR
3158 def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
3159 """Returns `wrapped` if `node` is of the shape ( wrapped ).
3161 Parenthesis can be optional. Returns None otherwise"""
3162 if len(node.children) != 3:
3165 lpar, wrapped, rpar = node.children
3166 if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
3172 def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
3173 """Wrap `child` in parentheses.
3175 This replaces `child` with an atom holding the parentheses and the old
3176 child. That requires moving the prefix.
3178 If `visible` is False, the leaves will be valueless (and thus invisible).
3180 lpar = Leaf(token.LPAR, "(" if visible else "")
3181 rpar = Leaf(token.RPAR, ")" if visible else "")
3182 prefix = child.prefix
3184 index = child.remove() or 0
3185 new_child = Node(syms.atom, [lpar, child, rpar])
3186 new_child.prefix = prefix
3187 parent.insert_child(index, new_child)
3190 def is_one_tuple(node: LN) -> bool:
3191 """Return True if `node` holds a tuple with one element, with or without parens."""
3192 if node.type == syms.atom:
3193 gexp = unwrap_singleton_parenthesis(node)
3194 if gexp is None or gexp.type != syms.testlist_gexp:
3197 return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
3200 node.type in IMPLICIT_TUPLE
3201 and len(node.children) == 2
3202 and node.children[1].type == token.COMMA
3206 def is_walrus_assignment(node: LN) -> bool:
3207 """Return True iff `node` is of the shape ( test := test )"""
3208 inner = unwrap_singleton_parenthesis(node)
3209 return inner is not None and inner.type == syms.namedexpr_test
3212 def is_yield(node: LN) -> bool:
3213 """Return True if `node` holds a `yield` or `yield from` expression."""
3214 if node.type == syms.yield_expr:
3217 if node.type == token.NAME and node.value == "yield": # type: ignore
3220 if node.type != syms.atom:
3223 if len(node.children) != 3:
3226 lpar, expr, rpar = node.children
3227 if lpar.type == token.LPAR and rpar.type == token.RPAR:
3228 return is_yield(expr)
3233 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
3234 """Return True if `leaf` is a star or double star in a vararg or kwarg.
3236 If `within` includes VARARGS_PARENTS, this applies to function signatures.
3237 If `within` includes UNPACKING_PARENTS, it applies to right hand-side
3238 extended iterable unpacking (PEP 3132) and additional unpacking
3239 generalizations (PEP 448).
3241 if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
3245 if p.type == syms.star_expr:
3246 # Star expressions are also used as assignment targets in extended
3247 # iterable unpacking (PEP 3132). See what its parent is instead.
3253 return p.type in within
3256 def is_multiline_string(leaf: Leaf) -> bool:
3257 """Return True if `leaf` is a multiline string that actually spans many lines."""
3258 value = leaf.value.lstrip("furbFURB")
3259 return value[:3] in {'"""', "'''"} and "\n" in value
3262 def is_stub_suite(node: Node) -> bool:
3263 """Return True if `node` is a suite with a stub body."""
3265 len(node.children) != 4
3266 or node.children[0].type != token.NEWLINE
3267 or node.children[1].type != token.INDENT
3268 or node.children[3].type != token.DEDENT
3272 return is_stub_body(node.children[2])
3275 def is_stub_body(node: LN) -> bool:
3276 """Return True if `node` is a simple statement containing an ellipsis."""
3277 if not isinstance(node, Node) or node.type != syms.simple_stmt:
3280 if len(node.children) != 2:
3283 child = node.children[0]
3285 child.type == syms.atom
3286 and len(child.children) == 3
3287 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
3291 def max_delimiter_priority_in_atom(node: LN) -> Priority:
3292 """Return maximum delimiter priority inside `node`.
3294 This is specific to atoms with contents contained in a pair of parentheses.
3295 If `node` isn't an atom or there are no enclosing parentheses, returns 0.
3297 if node.type != syms.atom:
3300 first = node.children[0]
3301 last = node.children[-1]
3302 if not (first.type == token.LPAR and last.type == token.RPAR):
3305 bt = BracketTracker()
3306 for c in node.children[1:-1]:
3307 if isinstance(c, Leaf):
3310 for leaf in c.leaves():
3313 return bt.max_delimiter_priority()
3319 def ensure_visible(leaf: Leaf) -> None:
3320 """Make sure parentheses are visible.
3322 They could be invisible as part of some statements (see
3323 :func:`normalize_invisible_parens` and :func:`visit_import_from`).
3325 if leaf.type == token.LPAR:
3327 elif leaf.type == token.RPAR:
3331 def should_explode(line: Line, opening_bracket: Leaf) -> bool:
3332 """Should `line` immediately be split with `delimiter_split()` after RHS?"""
3335 opening_bracket.parent
3336 and opening_bracket.parent.type in {syms.atom, syms.import_from}
3337 and opening_bracket.value in "[{("
3342 last_leaf = line.leaves[-1]
3343 exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set()
3344 max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
3345 except (IndexError, ValueError):
3348 return max_priority == COMMA_PRIORITY
3351 def get_features_used(node: Node) -> Set[Feature]:
3352 """Return a set of (relatively) new Python features used in this file.
3354 Currently looking for:
3356 - underscores in numeric literals;
3357 - trailing commas after * or ** in function signatures and calls;
3358 - positional only arguments in function signatures and lambdas;
3360 features: Set[Feature] = set()
3361 for n in node.pre_order():
3362 if n.type == token.STRING:
3363 value_head = n.value[:2] # type: ignore
3364 if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
3365 features.add(Feature.F_STRINGS)
3367 elif n.type == token.NUMBER:
3368 if "_" in n.value: # type: ignore
3369 features.add(Feature.NUMERIC_UNDERSCORES)
3371 elif n.type == token.SLASH:
3372 if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}:
3373 features.add(Feature.POS_ONLY_ARGUMENTS)
3375 elif n.type == token.COLONEQUAL:
3376 features.add(Feature.ASSIGNMENT_EXPRESSIONS)
3379 n.type in {syms.typedargslist, syms.arglist}
3381 and n.children[-1].type == token.COMMA
3383 if n.type == syms.typedargslist:
3384 feature = Feature.TRAILING_COMMA_IN_DEF
3386 feature = Feature.TRAILING_COMMA_IN_CALL
3388 for ch in n.children:
3389 if ch.type in STARS:
3390 features.add(feature)
3392 if ch.type == syms.argument:
3393 for argch in ch.children:
3394 if argch.type in STARS:
3395 features.add(feature)
3400 def detect_target_versions(node: Node) -> Set[TargetVersion]:
3401 """Detect the version to target based on the nodes used."""
3402 features = get_features_used(node)
3404 version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
3408 def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
3409 """Generate sets of closing bracket IDs that should be omitted in a RHS.
3411 Brackets can be omitted if the entire trailer up to and including
3412 a preceding closing bracket fits in one line.
3414 Yielded sets are cumulative (contain results of previous yields, too). First
3418 omit: Set[LeafID] = set()
3421 length = 4 * line.depth
3422 opening_bracket: Optional[Leaf] = None
3423 closing_bracket: Optional[Leaf] = None
3424 inner_brackets: Set[LeafID] = set()
3425 for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
3426 length += leaf_length
3427 if length > line_length:
3430 has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
3431 if leaf.type == STANDALONE_COMMENT or has_inline_comment:
3435 if leaf is opening_bracket:
3436 opening_bracket = None
3437 elif leaf.type in CLOSING_BRACKETS:
3438 inner_brackets.add(id(leaf))
3439 elif leaf.type in CLOSING_BRACKETS:
3440 if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS:
3441 # Empty brackets would fail a split so treat them as "inner"
3442 # brackets (e.g. only add them to the `omit` set if another
3443 # pair of brackets was good enough.
3444 inner_brackets.add(id(leaf))
3448 omit.add(id(closing_bracket))
3449 omit.update(inner_brackets)
3450 inner_brackets.clear()
3454 opening_bracket = leaf.opening_bracket
3455 closing_bracket = leaf
3458 def get_future_imports(node: Node) -> Set[str]:
3459 """Return a set of __future__ imports in the file."""
3460 imports: Set[str] = set()
3462 def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
3463 for child in children:
3464 if isinstance(child, Leaf):
3465 if child.type == token.NAME:
3468 elif child.type == syms.import_as_name:
3469 orig_name = child.children[0]
3470 assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
3471 assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
3472 yield orig_name.value
3474 elif child.type == syms.import_as_names:
3475 yield from get_imports_from_children(child.children)
3478 raise AssertionError("Invalid syntax parsing imports")
3480 for child in node.children:
3481 if child.type != syms.simple_stmt:
3484 first_child = child.children[0]
3485 if isinstance(first_child, Leaf):
3486 # Continue looking if we see a docstring; otherwise stop.
3488 len(child.children) == 2
3489 and first_child.type == token.STRING
3490 and child.children[1].type == token.NEWLINE
3496 elif first_child.type == syms.import_from:
3497 module_name = first_child.children[1]
3498 if not isinstance(module_name, Leaf) or module_name.value != "__future__":
3501 imports |= set(get_imports_from_children(first_child.children[3:]))
3509 def get_gitignore(root: Path) -> PathSpec:
3510 """ Return a PathSpec matching gitignore content if present."""
3511 gitignore = root / ".gitignore"
3512 lines: List[str] = []
3513 if gitignore.is_file():
3514 with gitignore.open() as gf:
3515 lines = gf.readlines()
3516 return PathSpec.from_lines("gitwildmatch", lines)
3519 def gen_python_files_in_dir(
3522 include: Pattern[str],
3523 exclude: Pattern[str],
3525 gitignore: PathSpec,
3526 ) -> Iterator[Path]:
3527 """Generate all files under `path` whose paths are not excluded by the
3528 `exclude` regex, but are included by the `include` regex.
3530 Symbolic links pointing outside of the `root` directory are ignored.
3532 `report` is where output about exclusions goes.
3534 assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
3535 for child in path.iterdir():
3536 # First ignore files matching .gitignore
3537 if gitignore.match_file(child.as_posix()):
3538 report.path_ignored(child, f"matches the .gitignore file content")
3541 # Then ignore with `exclude` option.
3543 normalized_path = "/" + child.resolve().relative_to(root).as_posix()
3544 except OSError as e:
3545 report.path_ignored(child, f"cannot be read because {e}")
3549 if child.is_symlink():
3550 report.path_ignored(
3551 child, f"is a symbolic link that points outside {root}"
3558 normalized_path += "/"
3560 exclude_match = exclude.search(normalized_path)
3561 if exclude_match and exclude_match.group(0):
3562 report.path_ignored(child, f"matches the --exclude regular expression")
3566 yield from gen_python_files_in_dir(
3567 child, root, include, exclude, report, gitignore
3570 elif child.is_file():
3571 include_match = include.search(normalized_path)
3577 def find_project_root(srcs: Iterable[str]) -> Path:
3578 """Return a directory containing .git, .hg, or pyproject.toml.
3580 That directory can be one of the directories passed in `srcs` or their
3583 If no directory in the tree contains a marker that would specify it's the
3584 project root, the root of the file system is returned.
3587 return Path("/").resolve()
3589 common_base = min(Path(src).resolve() for src in srcs)
3590 if common_base.is_dir():
3591 # Append a fake file so `parents` below returns `common_base_dir`, too.
3592 common_base /= "fake-file"
3593 for directory in common_base.parents:
3594 if (directory / ".git").exists():
3597 if (directory / ".hg").is_dir():
3600 if (directory / "pyproject.toml").is_file():
3608 """Provides a reformatting counter. Can be rendered with `str(report)`."""
3613 verbose: bool = False
3614 change_count: int = 0
3616 failure_count: int = 0
3618 def done(self, src: Path, changed: Changed) -> None:
3619 """Increment the counter for successful reformatting. Write out a message."""
3620 if changed is Changed.YES:
3621 reformatted = "would reformat" if self.check or self.diff else "reformatted"
3622 if self.verbose or not self.quiet:
3623 out(f"{reformatted} {src}")
3624 self.change_count += 1
3627 if changed is Changed.NO:
3628 msg = f"{src} already well formatted, good job."
3630 msg = f"{src} wasn't modified on disk since last run."
3631 out(msg, bold=False)
3632 self.same_count += 1
3634 def failed(self, src: Path, message: str) -> None:
3635 """Increment the counter for failed reformatting. Write out a message."""
3636 err(f"error: cannot format {src}: {message}")
3637 self.failure_count += 1
3639 def path_ignored(self, path: Path, message: str) -> None:
3641 out(f"{path} ignored: {message}", bold=False)
3644 def return_code(self) -> int:
3645 """Return the exit code that the app should use.
3647 This considers the current state of changed files and failures:
3648 - if there were any failures, return 123;
3649 - if any files were changed and --check is being used, return 1;
3650 - otherwise return 0.
3652 # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
3653 # 126 we have special return codes reserved by the shell.
3654 if self.failure_count:
3657 elif self.change_count and self.check:
3662 def __str__(self) -> str:
3663 """Render a color report of the current state.
3665 Use `click.unstyle` to remove colors.
3667 if self.check or self.diff:
3668 reformatted = "would be reformatted"
3669 unchanged = "would be left unchanged"
3670 failed = "would fail to reformat"
3672 reformatted = "reformatted"
3673 unchanged = "left unchanged"
3674 failed = "failed to reformat"
3676 if self.change_count:
3677 s = "s" if self.change_count > 1 else ""
3679 click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
3682 s = "s" if self.same_count > 1 else ""
3683 report.append(f"{self.same_count} file{s} {unchanged}")
3684 if self.failure_count:
3685 s = "s" if self.failure_count > 1 else ""
3687 click.style(f"{self.failure_count} file{s} {failed}", fg="red")
3689 return ", ".join(report) + "."
3692 def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
3693 filename = "<unknown>"
3694 if sys.version_info >= (3, 8):
3695 # TODO: support Python 4+ ;)
3696 for minor_version in range(sys.version_info[1], 4, -1):
3698 return ast.parse(src, filename, feature_version=(3, minor_version))
3702 for feature_version in (7, 6):
3704 return ast3.parse(src, filename, feature_version=feature_version)
3708 return ast27.parse(src)
3711 def _fixup_ast_constants(
3712 node: Union[ast.AST, ast3.AST, ast27.AST]
3713 ) -> Union[ast.AST, ast3.AST, ast27.AST]:
3714 """Map ast nodes deprecated in 3.8 to Constant."""
3715 if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
3716 return ast.Constant(value=node.s)
3718 if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
3719 return ast.Constant(value=node.n)
3721 if isinstance(node, (ast.NameConstant, ast3.NameConstant)):
3722 return ast.Constant(value=node.value)
3727 def assert_equivalent(src: str, dst: str) -> None:
3728 """Raise AssertionError if `src` and `dst` aren't equivalent."""
3730 def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]:
3731 """Simple visitor generating strings to compare ASTs by content."""
3733 node = _fixup_ast_constants(node)
3735 yield f"{' ' * depth}{node.__class__.__name__}("
3737 for field in sorted(node._fields): # noqa: F402
3738 # TypeIgnore has only one field 'lineno' which breaks this comparison
3739 type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
3740 if sys.version_info >= (3, 8):
3741 type_ignore_classes += (ast.TypeIgnore,)
3742 if isinstance(node, type_ignore_classes):
3746 value = getattr(node, field)
3747 except AttributeError:
3750 yield f"{' ' * (depth+1)}{field}="
3752 if isinstance(value, list):
3754 # Ignore nested tuples within del statements, because we may insert
3755 # parentheses and they change the AST.
3758 and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
3759 and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
3761 for item in item.elts:
3762 yield from _v(item, depth + 2)
3764 elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
3765 yield from _v(item, depth + 2)
3767 elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)):
3768 yield from _v(value, depth + 2)
3771 yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}"
3773 yield f"{' ' * depth}) # /{node.__class__.__name__}"
3776 src_ast = parse_ast(src)
3777 except Exception as exc:
3778 raise AssertionError(
3779 f"cannot use --safe with this file; failed to parse source file. "
3780 f"AST error message: {exc}"
3784 dst_ast = parse_ast(dst)
3785 except Exception as exc:
3786 log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
3787 raise AssertionError(
3788 f"INTERNAL ERROR: Black produced invalid code: {exc}. "
3789 f"Please report a bug on https://github.com/psf/black/issues. "
3790 f"This invalid output might be helpful: {log}"
3793 src_ast_str = "\n".join(_v(src_ast))
3794 dst_ast_str = "\n".join(_v(dst_ast))
3795 if src_ast_str != dst_ast_str:
3796 log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
3797 raise AssertionError(
3798 f"INTERNAL ERROR: Black produced code that is not equivalent to "
3800 f"Please report a bug on https://github.com/psf/black/issues. "
3801 f"This diff might be helpful: {log}"
3805 def assert_stable(src: str, dst: str, mode: FileMode) -> None:
3806 """Raise AssertionError if `dst` reformats differently the second time."""
3807 newdst = format_str(dst, mode=mode)
3810 diff(src, dst, "source", "first pass"),
3811 diff(dst, newdst, "first pass", "second pass"),
3813 raise AssertionError(
3814 f"INTERNAL ERROR: Black produced different code on the second pass "
3815 f"of the formatter. "
3816 f"Please report a bug on https://github.com/psf/black/issues. "
3817 f"This diff might be helpful: {log}"
3821 @mypyc_attr(patchable=True)
3822 def dump_to_file(*output: str) -> str:
3823 """Dump `output` to a temporary file. Return path to the file."""
3824 with tempfile.NamedTemporaryFile(
3825 mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
3827 for lines in output:
3829 if lines and lines[-1] != "\n":
3835 def nullcontext() -> Iterator[None]:
3836 """Return an empty context manager.
3838 To be used like `nullcontext` in Python 3.7.
3843 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
3844 """Return a unified diff string between strings `a` and `b`."""
3847 a_lines = [line + "\n" for line in a.split("\n")]
3848 b_lines = [line + "\n" for line in b.split("\n")]
3850 difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
3854 def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None:
3855 """asyncio signal handler that cancels all `tasks` and reports to stderr."""
3861 def shutdown(loop: asyncio.AbstractEventLoop) -> None:
3862 """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
3864 if sys.version_info[:2] >= (3, 7):
3865 all_tasks = asyncio.all_tasks
3867 all_tasks = asyncio.Task.all_tasks
3868 # This part is borrowed from asyncio/runners.py in Python 3.7b2.
3869 to_cancel = [task for task in all_tasks(loop) if not task.done()]
3873 for task in to_cancel:
3875 loop.run_until_complete(
3876 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
3879 # `concurrent.futures.Future` objects cannot be cancelled once they
3880 # are already running. There might be some when the `shutdown()` happened.
3881 # Silence their logger's spew about the event loop being closed.
3882 cf_logger = logging.getLogger("concurrent.futures")
3883 cf_logger.setLevel(logging.CRITICAL)
3887 def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
3888 """Replace `regex` with `replacement` twice on `original`.
3890 This is used by string normalization to perform replaces on
3891 overlapping matches.
3893 return regex.sub(replacement, regex.sub(replacement, original))
3896 def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
3897 """Compile a regular expression string in `regex`.
3899 If it contains newlines, use verbose mode.
3902 regex = "(?x)" + regex
3903 compiled: Pattern[str] = re.compile(regex)
3907 def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
3908 """Like `reversed(enumerate(sequence))` if that were possible."""
3909 index = len(sequence) - 1
3910 for element in reversed(sequence):
3911 yield (index, element)
3915 def enumerate_with_length(
3916 line: Line, reversed: bool = False
3917 ) -> Iterator[Tuple[Index, Leaf, int]]:
3918 """Return an enumeration of leaves with their length.
3920 Stops prematurely on multiline strings and standalone comments.
3923 Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
3924 enumerate_reversed if reversed else enumerate,
3926 for index, leaf in op(line.leaves):
3927 length = len(leaf.prefix) + len(leaf.value)
3928 if "\n" in leaf.value:
3929 return # Multiline strings, we can't continue.
3931 for comment in line.comments_after(leaf):
3932 length += len(comment.value)
3934 yield index, leaf, length
3937 def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
3938 """Return True if `line` is no longer than `line_length`.
3940 Uses the provided `line_str` rendering, if any, otherwise computes a new one.
3943 line_str = str(line).strip("\n")
3945 len(line_str) <= line_length
3946 and "\n" not in line_str # multiline strings
3947 and not line.contains_standalone_comments()
3951 def can_be_split(line: Line) -> bool:
3952 """Return False if the line cannot be split *for sure*.
3954 This is not an exhaustive search but a cheap heuristic that we can use to
3955 avoid some unfortunate formattings (mostly around wrapping unsplittable code
3956 in unnecessary parentheses).
3958 leaves = line.leaves
3962 if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
3966 for leaf in leaves[-2::-1]:
3967 if leaf.type in OPENING_BRACKETS:
3968 if next.type not in CLOSING_BRACKETS:
3972 elif leaf.type == token.DOT:
3974 elif leaf.type == token.NAME:
3975 if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
3978 elif leaf.type not in CLOSING_BRACKETS:
3981 if dot_count > 1 and call_count > 1:
3987 def can_omit_invisible_parens(line: Line, line_length: int) -> bool:
3988 """Does `line` have a shape safe to reformat without optional parens around it?
3990 Returns True for only a subset of potentially nice looking formattings but
3991 the point is to not return false positives that end up producing lines that
3994 bt = line.bracket_tracker
3995 if not bt.delimiters:
3996 # Without delimiters the optional parentheses are useless.
3999 max_priority = bt.max_delimiter_priority()
4000 if bt.delimiter_count_with_priority(max_priority) > 1:
4001 # With more than one delimiter of a kind the optional parentheses read better.
4004 if max_priority == DOT_PRIORITY:
4005 # A single stranded method call doesn't require optional parentheses.
4008 assert len(line.leaves) >= 2, "Stranded delimiter"
4010 first = line.leaves[0]
4011 second = line.leaves[1]
4012 penultimate = line.leaves[-2]
4013 last = line.leaves[-1]
4015 # With a single delimiter, omit if the expression starts or ends with
4017 if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
4019 length = 4 * line.depth
4020 for _index, leaf, leaf_length in enumerate_with_length(line):
4021 if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
4024 length += leaf_length
4025 if length > line_length:
4028 if leaf.type in OPENING_BRACKETS:
4029 # There are brackets we can further split on.
4033 # checked the entire string and line length wasn't exceeded
4034 if len(line.leaves) == _index + 1:
4037 # Note: we are not returning False here because a line might have *both*
4038 # a leading opening bracket and a trailing closing bracket. If the
4039 # opening bracket doesn't match our rule, maybe the closing will.
4042 last.type == token.RPAR
4043 or last.type == token.RBRACE
4045 # don't use indexing for omitting optional parentheses;
4047 last.type == token.RSQB
4049 and last.parent.type != syms.trailer
4052 if penultimate.type in OPENING_BRACKETS:
4053 # Empty brackets don't help.
4056 if is_multiline_string(first):
4057 # Additional wrapping of a multiline string in this situation is
4061 length = 4 * line.depth
4062 seen_other_brackets = False
4063 for _index, leaf, leaf_length in enumerate_with_length(line):
4064 length += leaf_length
4065 if leaf is last.opening_bracket:
4066 if seen_other_brackets or length <= line_length:
4069 elif leaf.type in OPENING_BRACKETS:
4070 # There are brackets we can further split on.
4071 seen_other_brackets = True
4076 def get_cache_file(mode: FileMode) -> Path:
4077 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
4080 def read_cache(mode: FileMode) -> Cache:
4081 """Read the cache if it exists and is well formed.
4083 If it is not well formed, the call to write_cache later should resolve the issue.
4085 cache_file = get_cache_file(mode)
4086 if not cache_file.exists():
4089 with cache_file.open("rb") as fobj:
4091 cache: Cache = pickle.load(fobj)
4092 except (pickle.UnpicklingError, ValueError):
4098 def get_cache_info(path: Path) -> CacheInfo:
4099 """Return the information used to check if a file is already formatted or not."""
4101 return stat.st_mtime, stat.st_size
4104 def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
4105 """Split an iterable of paths in `sources` into two sets.
4107 The first contains paths of files that modified on disk or are not in the
4108 cache. The other contains paths to non-modified files.
4110 todo, done = set(), set()
4113 if cache.get(src) != get_cache_info(src):
4120 def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None:
4121 """Update the cache file."""
4122 cache_file = get_cache_file(mode)
4124 CACHE_DIR.mkdir(parents=True, exist_ok=True)
4125 new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
4126 with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
4127 pickle.dump(new_cache, f, protocol=4)
4128 os.replace(f.name, cache_file)
4133 def patch_click() -> None:
4134 """Make Click not crash.
4136 On certain misconfigured environments, Python 3 selects the ASCII encoding as the
4137 default which restricts paths that it can access during the lifetime of the
4138 application. Click refuses to work in this scenario by raising a RuntimeError.
4140 In case of Black the likelihood that non-ASCII characters are going to be used in
4141 file paths is minimal since it's Python source code. Moreover, this crash was
4142 spurious on Python 3.7 thanks to PEP 538 and PEP 540.
4145 from click import core
4146 from click import _unicodefun # type: ignore
4147 except ModuleNotFoundError:
4150 for module in (core, _unicodefun):
4151 if hasattr(module, "_verify_python3_env"):
4152 module._verify_python3_env = lambda: None
4155 def patched_main() -> None:
4161 if __name__ == "__main__":