All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
3 from concurrent.futures import Executor, ProcessPoolExecutor
4 from contextlib import contextmanager
5 from datetime import datetime
7 from functools import lru_cache, partial, wraps
11 from multiprocessing import Manager, freeze_support
13 from pathlib import Path
41 from appdirs import user_cache_dir
42 from attr import dataclass, evolve, Factory
45 from typed_ast import ast3, ast27
48 from blib2to3.pytree import Node, Leaf, type_repr
49 from blib2to3 import pygram, pytree
50 from blib2to3.pgen2 import driver, token
51 from blib2to3.pgen2.grammar import Grammar
52 from blib2to3.pgen2.parse import ParseError
54 from _version import version as __version__
56 DEFAULT_LINE_LENGTH = 88
58 r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"
60 DEFAULT_INCLUDES = r"\.pyi?$"
61 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
73 LN = Union[Leaf, Node]
74 SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
77 CacheInfo = Tuple[Timestamp, FileSize]
78 Cache = Dict[Path, CacheInfo]
79 out = partial(click.secho, bold=True, err=True)
80 err = partial(click.secho, fg="red", err=True)
82 pygram.initialize(CACHE_DIR)
83 syms = pygram.python_symbols
86 class NothingChanged(UserWarning):
87 """Raised when reformatted code is the same as source."""
90 class CannotSplit(Exception):
91 """A readable split that fits the allotted line length is impossible."""
94 class InvalidInput(ValueError):
95 """Raised when input source code fails all parse attempts."""
98 class WriteBack(Enum):
105 def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack":
106 if check and not diff:
109 return cls.DIFF if diff else cls.YES
118 class TargetVersion(Enum):
127 def is_python2(self) -> bool:
128 return self is TargetVersion.PY27
131 PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38}
135 # All string literals are unicode
138 NUMERIC_UNDERSCORES = 3
139 TRAILING_COMMA_IN_CALL = 4
140 TRAILING_COMMA_IN_DEF = 5
141 # The following two feature-flags are mutually exclusive, and exactly one should be
142 # set for every version of python.
143 ASYNC_IDENTIFIERS = 6
145 ASSIGNMENT_EXPRESSIONS = 8
146 POS_ONLY_ARGUMENTS = 9
149 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
150 TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS},
151 TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
152 TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
153 TargetVersion.PY35: {
154 Feature.UNICODE_LITERALS,
155 Feature.TRAILING_COMMA_IN_CALL,
156 Feature.ASYNC_IDENTIFIERS,
158 TargetVersion.PY36: {
159 Feature.UNICODE_LITERALS,
161 Feature.NUMERIC_UNDERSCORES,
162 Feature.TRAILING_COMMA_IN_CALL,
163 Feature.TRAILING_COMMA_IN_DEF,
164 Feature.ASYNC_IDENTIFIERS,
166 TargetVersion.PY37: {
167 Feature.UNICODE_LITERALS,
169 Feature.NUMERIC_UNDERSCORES,
170 Feature.TRAILING_COMMA_IN_CALL,
171 Feature.TRAILING_COMMA_IN_DEF,
172 Feature.ASYNC_KEYWORDS,
174 TargetVersion.PY38: {
175 Feature.UNICODE_LITERALS,
177 Feature.NUMERIC_UNDERSCORES,
178 Feature.TRAILING_COMMA_IN_CALL,
179 Feature.TRAILING_COMMA_IN_DEF,
180 Feature.ASYNC_KEYWORDS,
181 Feature.ASSIGNMENT_EXPRESSIONS,
182 Feature.POS_ONLY_ARGUMENTS,
189 target_versions: Set[TargetVersion] = Factory(set)
190 line_length: int = DEFAULT_LINE_LENGTH
191 string_normalization: bool = True
194 def get_cache_key(self) -> str:
195 if self.target_versions:
196 version_str = ",".join(
198 for version in sorted(self.target_versions, key=lambda v: v.value)
204 str(self.line_length),
205 str(int(self.string_normalization)),
206 str(int(self.is_pyi)),
208 return ".".join(parts)
211 def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
212 return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
215 def read_pyproject_toml(
216 ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None]
218 """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
220 Returns the path to a successfully found and read configuration file, None
223 assert not isinstance(value, (int, bool)), "Invalid parameter type passed"
225 root = find_project_root(ctx.params.get("src", ()))
226 path = root / "pyproject.toml"
233 pyproject_toml = toml.load(value)
234 config = pyproject_toml.get("tool", {}).get("black", {})
235 except (toml.TomlDecodeError, OSError) as e:
236 raise click.FileError(
237 filename=value, hint=f"Error reading configuration file: {e}"
243 if ctx.default_map is None:
245 ctx.default_map.update( # type: ignore # bad types in .pyi
246 {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
251 @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
252 @click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
257 default=DEFAULT_LINE_LENGTH,
258 help="How many characters per line to allow.",
264 type=click.Choice([v.name.lower() for v in TargetVersion]),
265 callback=lambda c, p, v: [TargetVersion[val.upper()] for val in v],
268 "Python versions that should be supported by Black's output. [default: "
269 "per-file auto-detection]"
276 "Allow using Python 3.6-only syntax on all input files. This will put "
277 "trailing commas in function signatures and calls also after *args and "
278 "**kwargs. Deprecated; use --target-version instead. "
279 "[default: per-file auto-detection]"
286 "Format all input files like typing stubs regardless of file extension "
287 "(useful when piping source on standard input)."
292 "--skip-string-normalization",
294 help="Don't normalize string quotes or prefixes.",
300 "Don't write the files back, just return the status. Return code 0 "
301 "means nothing would change. Return code 1 means some files would be "
302 "reformatted. Return code 123 means there was an internal error."
308 help="Don't write the files back, just output a diff for each file on stdout.",
313 help="If --fast given, skip temporary sanity checks. [default: --safe]",
318 default=DEFAULT_INCLUDES,
320 "A regular expression that matches files and directories that should be "
321 "included on recursive searches. An empty value means all files are "
322 "included regardless of the name. Use forward slashes for directories on "
323 "all platforms (Windows, too). Exclusions are calculated first, inclusions "
331 default=DEFAULT_EXCLUDES,
333 "A regular expression that matches files and directories that should be "
334 "excluded on recursive searches. An empty value means no paths are excluded. "
335 "Use forward slashes for directories on all platforms (Windows, too). "
336 "Exclusions are calculated first, inclusions later."
345 "Don't emit non-error messages to stderr. Errors are still emitted; "
346 "silence those with 2>/dev/null."
354 "Also emit messages to stderr about files that were not changed or were "
355 "ignored due to --exclude=."
358 @click.version_option(version=__version__)
363 exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
370 exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False
373 callback=read_pyproject_toml,
374 help="Read configuration from PATH.",
381 target_version: List[TargetVersion],
387 skip_string_normalization: bool,
393 config: Optional[str],
395 """The uncompromising code formatter."""
396 write_back = WriteBack.from_configuration(check=check, diff=diff)
399 err(f"Cannot use both --target-version and --py36")
402 versions = set(target_version)
405 "--py36 is deprecated and will be removed in a future version. "
406 "Use --target-version py36 instead."
408 versions = PY36_VERSIONS
410 # We'll autodetect later.
413 target_versions=versions,
414 line_length=line_length,
416 string_normalization=not skip_string_normalization,
418 if config and verbose:
419 out(f"Using configuration from {config}.", bold=False, fg="blue")
421 print(format_str(code, mode=mode))
424 include_regex = re_compile_maybe_verbose(include)
426 err(f"Invalid regular expression for include given: {include!r}")
429 exclude_regex = re_compile_maybe_verbose(exclude)
431 err(f"Invalid regular expression for exclude given: {exclude!r}")
433 report = Report(check=check, quiet=quiet, verbose=verbose)
434 root = find_project_root(src)
435 sources: Set[Path] = set()
436 path_empty(src, quiet, verbose, ctx)
441 gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
443 elif p.is_file() or s == "-":
444 # if a file was explicitly given, we don't care about its extension
447 err(f"invalid path: {s}")
448 if len(sources) == 0:
449 if verbose or not quiet:
450 out("No Python files are present to be formatted. Nothing to do 😴")
453 if len(sources) == 1:
457 write_back=write_back,
463 sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
466 if verbose or not quiet:
467 out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨")
468 click.secho(str(report), err=True)
469 ctx.exit(report.return_code)
472 def path_empty(src: Tuple[str], quiet: bool, verbose: bool, ctx: click.Context) -> None:
474 Exit if there is no `src` provided for formatting
477 if verbose or not quiet:
478 out("No Path provided. Nothing to do 😴")
483 src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report"
485 """Reformat a single file under `src` without spawning child processes.
487 `fast`, `write_back`, and `mode` options are passed to
488 :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
492 if not src.is_file() and str(src) == "-":
493 if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
494 changed = Changed.YES
497 if write_back != WriteBack.DIFF:
498 cache = read_cache(mode)
499 res_src = src.resolve()
500 if res_src in cache and cache[res_src] == get_cache_info(res_src):
501 changed = Changed.CACHED
502 if changed is not Changed.CACHED and format_file_in_place(
503 src, fast=fast, write_back=write_back, mode=mode
505 changed = Changed.YES
506 if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
507 write_back is WriteBack.CHECK and changed is Changed.NO
509 write_cache(cache, [src], mode)
510 report.done(src, changed)
511 except Exception as exc:
512 report.failed(src, str(exc))
518 write_back: WriteBack,
522 """Reformat multiple files using a ProcessPoolExecutor."""
523 loop = asyncio.get_event_loop()
524 worker_count = os.cpu_count()
525 if sys.platform == "win32":
526 # Work around https://bugs.python.org/issue26903
527 worker_count = min(worker_count, 61)
528 executor = ProcessPoolExecutor(max_workers=worker_count)
530 loop.run_until_complete(
534 write_back=write_back,
546 async def schedule_formatting(
549 write_back: WriteBack,
552 loop: asyncio.AbstractEventLoop,
555 """Run formatting of `sources` in parallel using the provided `executor`.
557 (Use ProcessPoolExecutors for actual parallelism.)
559 `write_back`, `fast`, and `mode` options are passed to
560 :func:`format_file_in_place`.
563 if write_back != WriteBack.DIFF:
564 cache = read_cache(mode)
565 sources, cached = filter_cached(cache, sources)
566 for src in sorted(cached):
567 report.done(src, Changed.CACHED)
572 sources_to_cache = []
574 if write_back == WriteBack.DIFF:
575 # For diff output, we need locks to ensure we don't interleave output
576 # from different processes.
578 lock = manager.Lock()
580 asyncio.ensure_future(
581 loop.run_in_executor(
582 executor, format_file_in_place, src, fast, mode, write_back, lock
585 for src in sorted(sources)
587 pending: Iterable[asyncio.Future] = tasks.keys()
589 loop.add_signal_handler(signal.SIGINT, cancel, pending)
590 loop.add_signal_handler(signal.SIGTERM, cancel, pending)
591 except NotImplementedError:
592 # There are no good alternatives for these on Windows.
595 done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
597 src = tasks.pop(task)
599 cancelled.append(task)
600 elif task.exception():
601 report.failed(src, str(task.exception()))
603 changed = Changed.YES if task.result() else Changed.NO
604 # If the file was written back or was successfully checked as
605 # well-formatted, store this information in the cache.
606 if write_back is WriteBack.YES or (
607 write_back is WriteBack.CHECK and changed is Changed.NO
609 sources_to_cache.append(src)
610 report.done(src, changed)
612 await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
614 write_cache(cache, sources_to_cache, mode)
617 def format_file_in_place(
621 write_back: WriteBack = WriteBack.NO,
622 lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
624 """Format file under `src` path. Return True if changed.
626 If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
628 `mode` and `fast` options are passed to :func:`format_file_contents`.
630 if src.suffix == ".pyi":
631 mode = evolve(mode, is_pyi=True)
633 then = datetime.utcfromtimestamp(src.stat().st_mtime)
634 with open(src, "rb") as buf:
635 src_contents, encoding, newline = decode_bytes(buf.read())
637 dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
638 except NothingChanged:
641 if write_back == write_back.YES:
642 with open(src, "w", encoding=encoding, newline=newline) as f:
643 f.write(dst_contents)
644 elif write_back == write_back.DIFF:
645 now = datetime.utcnow()
646 src_name = f"{src}\t{then} +0000"
647 dst_name = f"{src}\t{now} +0000"
648 diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
650 with lock or nullcontext():
651 f = io.TextIOWrapper(
657 f.write(diff_contents)
663 def format_stdin_to_stdout(
664 fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode
666 """Format file on stdin. Return True if changed.
668 If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
669 write a diff to stdout. The `mode` argument is passed to
670 :func:`format_file_contents`.
672 then = datetime.utcnow()
673 src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
676 dst = format_file_contents(src, fast=fast, mode=mode)
679 except NothingChanged:
683 f = io.TextIOWrapper(
684 sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
686 if write_back == WriteBack.YES:
688 elif write_back == WriteBack.DIFF:
689 now = datetime.utcnow()
690 src_name = f"STDIN\t{then} +0000"
691 dst_name = f"STDOUT\t{now} +0000"
692 f.write(diff(src, dst, src_name, dst_name))
696 def format_file_contents(
697 src_contents: str, *, fast: bool, mode: FileMode
699 """Reformat contents a file and return new contents.
701 If `fast` is False, additionally confirm that the reformatted code is
702 valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
703 `mode` is passed to :func:`format_str`.
705 if src_contents.strip() == "":
708 dst_contents = format_str(src_contents, mode=mode)
709 if src_contents == dst_contents:
713 assert_equivalent(src_contents, dst_contents)
714 assert_stable(src_contents, dst_contents, mode=mode)
718 def format_str(src_contents: str, *, mode: FileMode) -> FileContent:
719 """Reformat a string and return new contents.
721 `mode` determines formatting options, such as how many characters per line are
724 src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
726 future_imports = get_future_imports(src_node)
727 if mode.target_versions:
728 versions = mode.target_versions
730 versions = detect_target_versions(src_node)
731 normalize_fmt_off(src_node)
732 lines = LineGenerator(
733 remove_u_prefix="unicode_literals" in future_imports
734 or supports_feature(versions, Feature.UNICODE_LITERALS),
736 normalize_strings=mode.string_normalization,
738 elt = EmptyLineTracker(is_pyi=mode.is_pyi)
741 split_line_features = {
743 for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
744 if supports_feature(versions, feature)
746 for current_line in lines.visit(src_node):
747 for _ in range(after):
748 dst_contents.append(str(empty_line))
749 before, after = elt.maybe_empty_lines(current_line)
750 for _ in range(before):
751 dst_contents.append(str(empty_line))
752 for line in split_line(
753 current_line, line_length=mode.line_length, features=split_line_features
755 dst_contents.append(str(line))
756 return "".join(dst_contents)
759 def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
760 """Return a tuple of (decoded_contents, encoding, newline).
762 `newline` is either CRLF or LF but `decoded_contents` is decoded with
763 universal newlines (i.e. only contains LF).
765 srcbuf = io.BytesIO(src)
766 encoding, lines = tokenize.detect_encoding(srcbuf.readline)
768 return "", encoding, "\n"
770 newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
772 with io.TextIOWrapper(srcbuf, encoding) as tiow:
773 return tiow.read(), encoding, newline
776 def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
777 if not target_versions:
778 # No target_version specified, so try all grammars.
781 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
783 pygram.python_grammar_no_print_statement_no_exec_statement,
784 # Python 2.7 with future print_function import
785 pygram.python_grammar_no_print_statement,
787 pygram.python_grammar,
789 elif all(version.is_python2() for version in target_versions):
790 # Python 2-only code, so try Python 2 grammars.
792 # Python 2.7 with future print_function import
793 pygram.python_grammar_no_print_statement,
795 pygram.python_grammar,
798 # Python 3-compatible code, so only try Python 3 grammar.
800 # If we have to parse both, try to parse async as a keyword first
801 if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS):
804 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords # noqa: B950
806 if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
808 grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
809 # At least one of the above branches must have been taken, because every Python
810 # version has exactly one of the two 'ASYNC_*' flags
814 def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
815 """Given a string with source, return the lib2to3 Node."""
816 if src_txt[-1:] != "\n":
819 for grammar in get_grammars(set(target_versions)):
820 drv = driver.Driver(grammar, pytree.convert)
822 result = drv.parse_string(src_txt, True)
825 except ParseError as pe:
826 lineno, column = pe.context[1]
827 lines = src_txt.splitlines()
829 faulty_line = lines[lineno - 1]
831 faulty_line = "<line number missing in source>"
832 exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
836 if isinstance(result, Leaf):
837 result = Node(syms.file_input, [result])
841 def lib2to3_unparse(node: Node) -> str:
842 """Given a lib2to3 node, return its string representation."""
850 class Visitor(Generic[T]):
851 """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
853 def visit(self, node: LN) -> Iterator[T]:
854 """Main method to visit `node` and its children.
856 It tries to find a `visit_*()` method for the given `node.type`, like
857 `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
858 If no dedicated `visit_*()` method is found, chooses `visit_default()`
861 Then yields objects of type `T` from the selected visitor.
864 name = token.tok_name[node.type]
866 name = type_repr(node.type)
867 yield from getattr(self, f"visit_{name}", self.visit_default)(node)
869 def visit_default(self, node: LN) -> Iterator[T]:
870 """Default `visit_*()` implementation. Recurses to children of `node`."""
871 if isinstance(node, Node):
872 for child in node.children:
873 yield from self.visit(child)
877 class DebugVisitor(Visitor[T]):
880 def visit_default(self, node: LN) -> Iterator[T]:
881 indent = " " * (2 * self.tree_depth)
882 if isinstance(node, Node):
883 _type = type_repr(node.type)
884 out(f"{indent}{_type}", fg="yellow")
886 for child in node.children:
887 yield from self.visit(child)
890 out(f"{indent}/{_type}", fg="yellow", bold=False)
892 _type = token.tok_name.get(node.type, str(node.type))
893 out(f"{indent}{_type}", fg="blue", nl=False)
895 # We don't have to handle prefixes for `Node` objects since
896 # that delegates to the first child anyway.
897 out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
898 out(f" {node.value!r}", fg="blue", bold=False)
901 def show(cls, code: Union[str, Leaf, Node]) -> None:
902 """Pretty-print the lib2to3 AST of a given string of `code`.
904 Convenience method for debugging.
906 v: DebugVisitor[None] = DebugVisitor()
907 if isinstance(code, str):
908 code = lib2to3_parse(code)
912 WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
923 STANDALONE_COMMENT = 153
924 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
925 LOGIC_OPERATORS = {"and", "or"}
950 STARS = {token.STAR, token.DOUBLESTAR}
951 VARARGS_SPECIALS = STARS | {token.SLASH}
954 syms.argument, # double star in arglist
955 syms.trailer, # single argument to call
957 syms.varargslist, # lambdas
959 UNPACKING_PARENTS = {
960 syms.atom, # single element of a list or set literal
964 syms.testlist_star_expr,
999 COMPREHENSION_PRIORITY = 20
1001 TERNARY_PRIORITY = 16
1003 STRING_PRIORITY = 12
1004 COMPARATOR_PRIORITY = 10
1007 token.CIRCUMFLEX: 8,
1010 token.RIGHTSHIFT: 6,
1015 token.DOUBLESLASH: 4,
1019 token.DOUBLESTAR: 2,
1025 class BracketTracker:
1026 """Keeps track of brackets on a line."""
1029 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
1030 delimiters: Dict[LeafID, Priority] = Factory(dict)
1031 previous: Optional[Leaf] = None
1032 _for_loop_depths: List[int] = Factory(list)
1033 _lambda_argument_depths: List[int] = Factory(list)
1035 def mark(self, leaf: Leaf) -> None:
1036 """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
1038 All leaves receive an int `bracket_depth` field that stores how deep
1039 within brackets a given leaf is. 0 means there are no enclosing brackets
1040 that started on this line.
1042 If a leaf is itself a closing bracket, it receives an `opening_bracket`
1043 field that it forms a pair with. This is a one-directional link to
1044 avoid reference cycles.
1046 If a leaf is a delimiter (a token on which Black can split the line if
1047 needed) and it's on depth 0, its `id()` is stored in the tracker's
1050 if leaf.type == token.COMMENT:
1053 self.maybe_decrement_after_for_loop_variable(leaf)
1054 self.maybe_decrement_after_lambda_arguments(leaf)
1055 if leaf.type in CLOSING_BRACKETS:
1057 opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
1058 leaf.opening_bracket = opening_bracket
1059 leaf.bracket_depth = self.depth
1061 delim = is_split_before_delimiter(leaf, self.previous)
1062 if delim and self.previous is not None:
1063 self.delimiters[id(self.previous)] = delim
1065 delim = is_split_after_delimiter(leaf, self.previous)
1067 self.delimiters[id(leaf)] = delim
1068 if leaf.type in OPENING_BRACKETS:
1069 self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
1071 self.previous = leaf
1072 self.maybe_increment_lambda_arguments(leaf)
1073 self.maybe_increment_for_loop_variable(leaf)
1075 def any_open_brackets(self) -> bool:
1076 """Return True if there is an yet unmatched open bracket on the line."""
1077 return bool(self.bracket_match)
1079 def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
1080 """Return the highest priority of a delimiter found on the line.
1082 Values are consistent with what `is_split_*_delimiter()` return.
1083 Raises ValueError on no delimiters.
1085 return max(v for k, v in self.delimiters.items() if k not in exclude)
1087 def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
1088 """Return the number of delimiters with the given `priority`.
1090 If no `priority` is passed, defaults to max priority on the line.
1092 if not self.delimiters:
1095 priority = priority or self.max_delimiter_priority()
1096 return sum(1 for p in self.delimiters.values() if p == priority)
1098 def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
1099 """In a for loop, or comprehension, the variables are often unpacks.
1101 To avoid splitting on the comma in this situation, increase the depth of
1102 tokens between `for` and `in`.
1104 if leaf.type == token.NAME and leaf.value == "for":
1106 self._for_loop_depths.append(self.depth)
1111 def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
1112 """See `maybe_increment_for_loop_variable` above for explanation."""
1114 self._for_loop_depths
1115 and self._for_loop_depths[-1] == self.depth
1116 and leaf.type == token.NAME
1117 and leaf.value == "in"
1120 self._for_loop_depths.pop()
1125 def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
1126 """In a lambda expression, there might be more than one argument.
1128 To avoid splitting on the comma in this situation, increase the depth of
1129 tokens between `lambda` and `:`.
1131 if leaf.type == token.NAME and leaf.value == "lambda":
1133 self._lambda_argument_depths.append(self.depth)
1138 def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
1139 """See `maybe_increment_lambda_arguments` above for explanation."""
1141 self._lambda_argument_depths
1142 and self._lambda_argument_depths[-1] == self.depth
1143 and leaf.type == token.COLON
1146 self._lambda_argument_depths.pop()
1151 def get_open_lsqb(self) -> Optional[Leaf]:
1152 """Return the most recent opening square bracket (if any)."""
1153 return self.bracket_match.get((self.depth - 1, token.RSQB))
1158 """Holds leaves and comments. Can be printed with `str(line)`."""
1161 leaves: List[Leaf] = Factory(list)
1162 comments: Dict[LeafID, List[Leaf]] = Factory(dict) # keys ordered like `leaves`
1163 bracket_tracker: BracketTracker = Factory(BracketTracker)
1164 inside_brackets: bool = False
1165 should_explode: bool = False
1167 def append(self, leaf: Leaf, preformatted: bool = False) -> None:
1168 """Add a new `leaf` to the end of the line.
1170 Unless `preformatted` is True, the `leaf` will receive a new consistent
1171 whitespace prefix and metadata applied by :class:`BracketTracker`.
1172 Trailing commas are maybe removed, unpacked for loop variables are
1173 demoted from being delimiters.
1175 Inline comments are put aside.
1177 has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
1181 if token.COLON == leaf.type and self.is_class_paren_empty:
1182 del self.leaves[-2:]
1183 if self.leaves and not preformatted:
1184 # Note: at this point leaf.prefix should be empty except for
1185 # imports, for which we only preserve newlines.
1186 leaf.prefix += whitespace(
1187 leaf, complex_subscript=self.is_complex_subscript(leaf)
1189 if self.inside_brackets or not preformatted:
1190 self.bracket_tracker.mark(leaf)
1191 self.maybe_remove_trailing_comma(leaf)
1192 if not self.append_comment(leaf):
1193 self.leaves.append(leaf)
1195 def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
1196 """Like :func:`append()` but disallow invalid standalone comment structure.
1198 Raises ValueError when any `leaf` is appended after a standalone comment
1199 or when a standalone comment is not the first leaf on the line.
1201 if self.bracket_tracker.depth == 0:
1203 raise ValueError("cannot append to standalone comments")
1205 if self.leaves and leaf.type == STANDALONE_COMMENT:
1207 "cannot append standalone comments to a populated line"
1210 self.append(leaf, preformatted=preformatted)
1213 def is_comment(self) -> bool:
1214 """Is this line a standalone comment?"""
1215 return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
1218 def is_decorator(self) -> bool:
1219 """Is this line a decorator?"""
1220 return bool(self) and self.leaves[0].type == token.AT
1223 def is_import(self) -> bool:
1224 """Is this an import line?"""
1225 return bool(self) and is_import(self.leaves[0])
1228 def is_class(self) -> bool:
1229 """Is this line a class definition?"""
1232 and self.leaves[0].type == token.NAME
1233 and self.leaves[0].value == "class"
1237 def is_stub_class(self) -> bool:
1238 """Is this line a class definition with a body consisting only of "..."?"""
1239 return self.is_class and self.leaves[-3:] == [
1240 Leaf(token.DOT, ".") for _ in range(3)
1244 def is_def(self) -> bool:
1245 """Is this a function definition? (Also returns True for async defs.)"""
1247 first_leaf = self.leaves[0]
1252 second_leaf: Optional[Leaf] = self.leaves[1]
1255 return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
1256 first_leaf.type == token.ASYNC
1257 and second_leaf is not None
1258 and second_leaf.type == token.NAME
1259 and second_leaf.value == "def"
1263 def is_class_paren_empty(self) -> bool:
1264 """Is this a class with no base classes but using parentheses?
1266 Those are unnecessary and should be removed.
1270 and len(self.leaves) == 4
1272 and self.leaves[2].type == token.LPAR
1273 and self.leaves[2].value == "("
1274 and self.leaves[3].type == token.RPAR
1275 and self.leaves[3].value == ")"
1279 def is_triple_quoted_string(self) -> bool:
1280 """Is the line a triple quoted string?"""
1283 and self.leaves[0].type == token.STRING
1284 and self.leaves[0].value.startswith(('"""', "'''"))
1287 def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
1288 """If so, needs to be split before emitting."""
1289 for leaf in self.leaves:
1290 if leaf.type == STANDALONE_COMMENT:
1291 if leaf.bracket_depth <= depth_limit:
1295 def contains_uncollapsable_type_comments(self) -> bool:
1298 last_leaf = self.leaves[-1]
1299 ignored_ids.add(id(last_leaf))
1300 if last_leaf.type == token.COMMA or (
1301 last_leaf.type == token.RPAR and not last_leaf.value
1303 # When trailing commas or optional parens are inserted by Black for
1304 # consistency, comments after the previous last element are not moved
1305 # (they don't have to, rendering will still be correct). So we ignore
1306 # trailing commas and invisible.
1307 last_leaf = self.leaves[-2]
1308 ignored_ids.add(id(last_leaf))
1312 # A type comment is uncollapsable if it is attached to a leaf
1313 # that isn't at the end of the line (since that could cause it
1314 # to get associated to a different argument) or if there are
1315 # comments before it (since that could cause it to get hidden
1317 comment_seen = False
1318 for leaf_id, comments in self.comments.items():
1319 for comment in comments:
1320 if is_type_comment(comment):
1321 if leaf_id not in ignored_ids or comment_seen:
1328 def contains_unsplittable_type_ignore(self) -> bool:
1332 # If a 'type: ignore' is attached to the end of a line, we
1333 # can't split the line, because we can't know which of the
1334 # subexpressions the ignore was meant to apply to.
1336 # We only want this to apply to actual physical lines from the
1337 # original source, though: we don't want the presence of a
1338 # 'type: ignore' at the end of a multiline expression to
1339 # justify pushing it all onto one line. Thus we
1340 # (unfortunately) need to check the actual source lines and
1341 # only report an unsplittable 'type: ignore' if this line was
1342 # one line in the original code.
1344 # Grab the first and last line numbers, skipping generated leaves
1345 first_line = next((l.lineno for l in self.leaves if l.lineno != 0), 0)
1346 last_line = next((l.lineno for l in reversed(self.leaves) if l.lineno != 0), 0)
1348 if first_line == last_line:
1349 # We look at the last two leaves since a comma or an
1350 # invisible paren could have been added at the end of the
1352 for node in self.leaves[-2:]:
1353 for comment in self.comments.get(id(node), []):
1354 if is_type_comment(comment, " ignore"):
1359 def contains_multiline_strings(self) -> bool:
1360 for leaf in self.leaves:
1361 if is_multiline_string(leaf):
1366 def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
1367 """Remove trailing comma if there is one and it's safe."""
1370 and self.leaves[-1].type == token.COMMA
1371 and closing.type in CLOSING_BRACKETS
1375 if closing.type == token.RBRACE:
1376 self.remove_trailing_comma()
1379 if closing.type == token.RSQB:
1380 comma = self.leaves[-1]
1381 if comma.parent and comma.parent.type == syms.listmaker:
1382 self.remove_trailing_comma()
1385 # For parens let's check if it's safe to remove the comma.
1386 # Imports are always safe.
1388 self.remove_trailing_comma()
1391 # Otherwise, if the trailing one is the only one, we might mistakenly
1392 # change a tuple into a different type by removing the comma.
1393 depth = closing.bracket_depth + 1
1395 opening = closing.opening_bracket
1396 for _opening_index, leaf in enumerate(self.leaves):
1403 for leaf in self.leaves[_opening_index + 1 :]:
1407 bracket_depth = leaf.bracket_depth
1408 if bracket_depth == depth and leaf.type == token.COMMA:
1410 if leaf.parent and leaf.parent.type in {
1418 self.remove_trailing_comma()
1423 def append_comment(self, comment: Leaf) -> bool:
1424 """Add an inline or standalone comment to the line."""
1426 comment.type == STANDALONE_COMMENT
1427 and self.bracket_tracker.any_open_brackets()
1432 if comment.type != token.COMMENT:
1436 comment.type = STANDALONE_COMMENT
1440 last_leaf = self.leaves[-1]
1442 last_leaf.type == token.RPAR
1443 and not last_leaf.value
1444 and last_leaf.parent
1445 and len(list(last_leaf.parent.leaves())) <= 3
1446 and not is_type_comment(comment)
1448 # Comments on an optional parens wrapping a single leaf should belong to
1449 # the wrapped node except if it's a type comment. Pinning the comment like
1450 # this avoids unstable formatting caused by comment migration.
1451 if len(self.leaves) < 2:
1452 comment.type = STANDALONE_COMMENT
1455 last_leaf = self.leaves[-2]
1456 self.comments.setdefault(id(last_leaf), []).append(comment)
1459 def comments_after(self, leaf: Leaf) -> List[Leaf]:
1460 """Generate comments that should appear directly after `leaf`."""
1461 return self.comments.get(id(leaf), [])
1463 def remove_trailing_comma(self) -> None:
1464 """Remove the trailing comma and moves the comments attached to it."""
1465 trailing_comma = self.leaves.pop()
1466 trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
1467 self.comments.setdefault(id(self.leaves[-1]), []).extend(
1468 trailing_comma_comments
1471 def is_complex_subscript(self, leaf: Leaf) -> bool:
1472 """Return True iff `leaf` is part of a slice with non-trivial exprs."""
1473 open_lsqb = self.bracket_tracker.get_open_lsqb()
1474 if open_lsqb is None:
1477 subscript_start = open_lsqb.next_sibling
1479 if isinstance(subscript_start, Node):
1480 if subscript_start.type == syms.listmaker:
1483 if subscript_start.type == syms.subscriptlist:
1484 subscript_start = child_towards(subscript_start, leaf)
1485 return subscript_start is not None and any(
1486 n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
1489 def __str__(self) -> str:
1490 """Render the line."""
1494 indent = " " * self.depth
1495 leaves = iter(self.leaves)
1496 first = next(leaves)
1497 res = f"{first.prefix}{indent}{first.value}"
1500 for comment in itertools.chain.from_iterable(self.comments.values()):
1504 def __bool__(self) -> bool:
1505 """Return True if the line has leaves or comments."""
1506 return bool(self.leaves or self.comments)
1510 class EmptyLineTracker:
1511 """Provides a stateful method that returns the number of potential extra
1512 empty lines needed before and after the currently processed line.
1514 Note: this tracker works on lines that haven't been split yet. It assumes
1515 the prefix of the first leaf consists of optional newlines. Those newlines
1516 are consumed by `maybe_empty_lines()` and included in the computation.
1519 is_pyi: bool = False
1520 previous_line: Optional[Line] = None
1521 previous_after: int = 0
1522 previous_defs: List[int] = Factory(list)
1524 def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1525 """Return the number of extra empty lines before and after the `current_line`.
1527 This is for separating `def`, `async def` and `class` with extra empty
1528 lines (two on module-level).
1530 before, after = self._maybe_empty_lines(current_line)
1532 # Black should not insert empty lines at the beginning
1535 if self.previous_line is None
1536 else before - self.previous_after
1538 self.previous_after = after
1539 self.previous_line = current_line
1540 return before, after
1542 def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1544 if current_line.depth == 0:
1545 max_allowed = 1 if self.is_pyi else 2
1546 if current_line.leaves:
1547 # Consume the first leaf's extra newlines.
1548 first_leaf = current_line.leaves[0]
1549 before = first_leaf.prefix.count("\n")
1550 before = min(before, max_allowed)
1551 first_leaf.prefix = ""
1554 depth = current_line.depth
1555 while self.previous_defs and self.previous_defs[-1] >= depth:
1556 self.previous_defs.pop()
1558 before = 0 if depth else 1
1560 before = 1 if depth else 2
1561 if current_line.is_decorator or current_line.is_def or current_line.is_class:
1562 return self._maybe_empty_lines_for_class_or_def(current_line, before)
1566 and self.previous_line.is_import
1567 and not current_line.is_import
1568 and depth == self.previous_line.depth
1570 return (before or 1), 0
1574 and self.previous_line.is_class
1575 and current_line.is_triple_quoted_string
1581 def _maybe_empty_lines_for_class_or_def(
1582 self, current_line: Line, before: int
1583 ) -> Tuple[int, int]:
1584 if not current_line.is_decorator:
1585 self.previous_defs.append(current_line.depth)
1586 if self.previous_line is None:
1587 # Don't insert empty lines before the first line in the file.
1590 if self.previous_line.is_decorator:
1593 if self.previous_line.depth < current_line.depth and (
1594 self.previous_line.is_class or self.previous_line.is_def
1599 self.previous_line.is_comment
1600 and self.previous_line.depth == current_line.depth
1606 if self.previous_line.depth > current_line.depth:
1608 elif current_line.is_class or self.previous_line.is_class:
1609 if current_line.is_stub_class and self.previous_line.is_stub_class:
1610 # No blank line between classes with an empty body
1614 elif current_line.is_def and not self.previous_line.is_def:
1615 # Blank line between a block of functions and a block of non-functions
1621 if current_line.depth and newlines:
1627 class LineGenerator(Visitor[Line]):
1628 """Generates reformatted Line objects. Empty lines are not emitted.
1630 Note: destroys the tree it's visiting by mutating prefixes of its leaves
1631 in ways that will no longer stringify to valid Python code on the tree.
1634 is_pyi: bool = False
1635 normalize_strings: bool = True
1636 current_line: Line = Factory(Line)
1637 remove_u_prefix: bool = False
1639 def line(self, indent: int = 0) -> Iterator[Line]:
1642 If the line is empty, only emit if it makes sense.
1643 If the line is too long, split it first and then generate.
1645 If any lines were generated, set up a new current_line.
1647 if not self.current_line:
1648 self.current_line.depth += indent
1649 return # Line is empty, don't emit. Creating a new one unnecessary.
1651 complete_line = self.current_line
1652 self.current_line = Line(depth=complete_line.depth + indent)
1655 def visit_default(self, node: LN) -> Iterator[Line]:
1656 """Default `visit_*()` implementation. Recurses to children of `node`."""
1657 if isinstance(node, Leaf):
1658 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
1659 for comment in generate_comments(node):
1660 if any_open_brackets:
1661 # any comment within brackets is subject to splitting
1662 self.current_line.append(comment)
1663 elif comment.type == token.COMMENT:
1664 # regular trailing comment
1665 self.current_line.append(comment)
1666 yield from self.line()
1669 # regular standalone comment
1670 yield from self.line()
1672 self.current_line.append(comment)
1673 yield from self.line()
1675 normalize_prefix(node, inside_brackets=any_open_brackets)
1676 if self.normalize_strings and node.type == token.STRING:
1677 normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
1678 normalize_string_quotes(node)
1679 if node.type == token.NUMBER:
1680 normalize_numeric_literal(node)
1681 if node.type not in WHITESPACE:
1682 self.current_line.append(node)
1683 yield from super().visit_default(node)
1685 def visit_atom(self, node: Node) -> Iterator[Line]:
1686 # Always make parentheses invisible around a single node, because it should
1687 # not be needed (except in the case of yield, where removing the parentheses
1688 # produces a SyntaxError).
1690 len(node.children) == 3
1691 and isinstance(node.children[0], Leaf)
1692 and node.children[0].type == token.LPAR
1693 and isinstance(node.children[2], Leaf)
1694 and node.children[2].type == token.RPAR
1695 and isinstance(node.children[1], Leaf)
1697 node.children[1].type == token.NAME
1698 and node.children[1].value == "yield"
1701 node.children[0].value = ""
1702 node.children[2].value = ""
1703 yield from super().visit_default(node)
1705 def visit_factor(self, node: Node) -> Iterator[Line]:
1706 """Force parentheses between a unary op and a binary power:
1708 -2 ** 8 -> -(2 ** 8)
1710 child = node.children[1]
1711 if child.type == syms.power and len(child.children) == 3:
1712 lpar = Leaf(token.LPAR, "(")
1713 rpar = Leaf(token.RPAR, ")")
1714 index = child.remove() or 0
1715 node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
1716 yield from self.visit_default(node)
1718 def visit_INDENT(self, node: Node) -> Iterator[Line]:
1719 """Increase indentation level, maybe yield a line."""
1720 # In blib2to3 INDENT never holds comments.
1721 yield from self.line(+1)
1722 yield from self.visit_default(node)
1724 def visit_DEDENT(self, node: Node) -> Iterator[Line]:
1725 """Decrease indentation level, maybe yield a line."""
1726 # The current line might still wait for trailing comments. At DEDENT time
1727 # there won't be any (they would be prefixes on the preceding NEWLINE).
1728 # Emit the line then.
1729 yield from self.line()
1731 # While DEDENT has no value, its prefix may contain standalone comments
1732 # that belong to the current indentation level. Get 'em.
1733 yield from self.visit_default(node)
1735 # Finally, emit the dedent.
1736 yield from self.line(-1)
1739 self, node: Node, keywords: Set[str], parens: Set[str]
1740 ) -> Iterator[Line]:
1741 """Visit a statement.
1743 This implementation is shared for `if`, `while`, `for`, `try`, `except`,
1744 `def`, `with`, `class`, `assert` and assignments.
1746 The relevant Python language `keywords` for a given statement will be
1747 NAME leaves within it. This methods puts those on a separate line.
1749 `parens` holds a set of string leaf values immediately after which
1750 invisible parens should be put.
1752 normalize_invisible_parens(node, parens_after=parens)
1753 for child in node.children:
1754 if child.type == token.NAME and child.value in keywords: # type: ignore
1755 yield from self.line()
1757 yield from self.visit(child)
1759 def visit_suite(self, node: Node) -> Iterator[Line]:
1760 """Visit a suite."""
1761 if self.is_pyi and is_stub_suite(node):
1762 yield from self.visit(node.children[2])
1764 yield from self.visit_default(node)
1766 def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
1767 """Visit a statement without nested statements."""
1768 is_suite_like = node.parent and node.parent.type in STATEMENT
1770 if self.is_pyi and is_stub_body(node):
1771 yield from self.visit_default(node)
1773 yield from self.line(+1)
1774 yield from self.visit_default(node)
1775 yield from self.line(-1)
1778 if not self.is_pyi or not node.parent or not is_stub_suite(node.parent):
1779 yield from self.line()
1780 yield from self.visit_default(node)
1782 def visit_async_stmt(self, node: Node) -> Iterator[Line]:
1783 """Visit `async def`, `async for`, `async with`."""
1784 yield from self.line()
1786 children = iter(node.children)
1787 for child in children:
1788 yield from self.visit(child)
1790 if child.type == token.ASYNC:
1793 internal_stmt = next(children)
1794 for child in internal_stmt.children:
1795 yield from self.visit(child)
1797 def visit_decorators(self, node: Node) -> Iterator[Line]:
1798 """Visit decorators."""
1799 for child in node.children:
1800 yield from self.line()
1801 yield from self.visit(child)
1803 def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
1804 """Remove a semicolon and put the other statement on a separate line."""
1805 yield from self.line()
1807 def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
1808 """End of file. Process outstanding comments and end with a newline."""
1809 yield from self.visit_default(leaf)
1810 yield from self.line()
1812 def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
1813 if not self.current_line.bracket_tracker.any_open_brackets():
1814 yield from self.line()
1815 yield from self.visit_default(leaf)
1817 def __attrs_post_init__(self) -> None:
1818 """You are in a twisty little maze of passages."""
1821 self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
1822 self.visit_if_stmt = partial(
1823 v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
1825 self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
1826 self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
1827 self.visit_try_stmt = partial(
1828 v, keywords={"try", "except", "else", "finally"}, parens=Ø
1830 self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
1831 self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
1832 self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
1833 self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
1834 self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
1835 self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
1836 self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
1837 self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
1838 self.visit_async_funcdef = self.visit_async_stmt
1839 self.visit_decorated = self.visit_decorators
1842 IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
1843 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
1844 OPENING_BRACKETS = set(BRACKET.keys())
1845 CLOSING_BRACKETS = set(BRACKET.values())
1846 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
1847 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
1850 def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901
1851 """Return whitespace prefix if needed for the given `leaf`.
1853 `complex_subscript` signals whether the given leaf is part of a subscription
1854 which has non-trivial arguments, like arithmetic expressions or function calls.
1862 if t in ALWAYS_NO_SPACE:
1865 if t == token.COMMENT:
1868 assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
1869 if t == token.COLON and p.type not in {
1876 prev = leaf.prev_sibling
1878 prevp = preceding_leaf(p)
1879 if not prevp or prevp.type in OPENING_BRACKETS:
1882 if t == token.COLON:
1883 if prevp.type == token.COLON:
1886 elif prevp.type != token.COMMA and not complex_subscript:
1891 if prevp.type == token.EQUAL:
1893 if prevp.parent.type in {
1901 elif prevp.parent.type == syms.typedargslist:
1902 # A bit hacky: if the equal sign has whitespace, it means we
1903 # previously found it's a typed argument. So, we're using
1907 elif prevp.type in VARARGS_SPECIALS:
1908 if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
1911 elif prevp.type == token.COLON:
1912 if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
1913 return SPACE if complex_subscript else NO
1917 and prevp.parent.type == syms.factor
1918 and prevp.type in MATH_OPERATORS
1923 prevp.type == token.RIGHTSHIFT
1925 and prevp.parent.type == syms.shift_expr
1926 and prevp.prev_sibling
1927 and prevp.prev_sibling.type == token.NAME
1928 and prevp.prev_sibling.value == "print" # type: ignore
1930 # Python 2 print chevron
1933 elif prev.type in OPENING_BRACKETS:
1936 if p.type in {syms.parameters, syms.arglist}:
1937 # untyped function signatures or calls
1938 if not prev or prev.type != token.COMMA:
1941 elif p.type == syms.varargslist:
1943 if prev and prev.type != token.COMMA:
1946 elif p.type == syms.typedargslist:
1947 # typed function signatures
1951 if t == token.EQUAL:
1952 if prev.type != syms.tname:
1955 elif prev.type == token.EQUAL:
1956 # A bit hacky: if the equal sign has whitespace, it means we
1957 # previously found it's a typed argument. So, we're using that, too.
1960 elif prev.type != token.COMMA:
1963 elif p.type == syms.tname:
1966 prevp = preceding_leaf(p)
1967 if not prevp or prevp.type != token.COMMA:
1970 elif p.type == syms.trailer:
1971 # attributes and calls
1972 if t == token.LPAR or t == token.RPAR:
1977 prevp = preceding_leaf(p)
1978 if not prevp or prevp.type != token.NUMBER:
1981 elif t == token.LSQB:
1984 elif prev.type != token.COMMA:
1987 elif p.type == syms.argument:
1989 if t == token.EQUAL:
1993 prevp = preceding_leaf(p)
1994 if not prevp or prevp.type == token.LPAR:
1997 elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
2000 elif p.type == syms.decorator:
2004 elif p.type == syms.dotted_name:
2008 prevp = preceding_leaf(p)
2009 if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
2012 elif p.type == syms.classdef:
2016 if prev and prev.type == token.LPAR:
2019 elif p.type in {syms.subscript, syms.sliceop}:
2022 assert p.parent is not None, "subscripts are always parented"
2023 if p.parent.type == syms.subscriptlist:
2028 elif not complex_subscript:
2031 elif p.type == syms.atom:
2032 if prev and t == token.DOT:
2033 # dots, but not the first one.
2036 elif p.type == syms.dictsetmaker:
2038 if prev and prev.type == token.DOUBLESTAR:
2041 elif p.type in {syms.factor, syms.star_expr}:
2044 prevp = preceding_leaf(p)
2045 if not prevp or prevp.type in OPENING_BRACKETS:
2048 prevp_parent = prevp.parent
2049 assert prevp_parent is not None
2050 if prevp.type == token.COLON and prevp_parent.type in {
2056 elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
2059 elif t in {token.NAME, token.NUMBER, token.STRING}:
2062 elif p.type == syms.import_from:
2064 if prev and prev.type == token.DOT:
2067 elif t == token.NAME:
2071 if prev and prev.type == token.DOT:
2074 elif p.type == syms.sliceop:
2080 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
2081 """Return the first leaf that precedes `node`, if any."""
2083 res = node.prev_sibling
2085 if isinstance(res, Leaf):
2089 return list(res.leaves())[-1]
2098 def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
2099 """Return the child of `ancestor` that contains `descendant`."""
2100 node: Optional[LN] = descendant
2101 while node and node.parent != ancestor:
2106 def container_of(leaf: Leaf) -> LN:
2107 """Return `leaf` or one of its ancestors that is the topmost container of it.
2109 By "container" we mean a node where `leaf` is the very first child.
2111 same_prefix = leaf.prefix
2112 container: LN = leaf
2114 parent = container.parent
2118 if parent.children[0].prefix != same_prefix:
2121 if parent.type == syms.file_input:
2124 if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
2131 def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2132 """Return the priority of the `leaf` delimiter, given a line break after it.
2134 The delimiter priorities returned here are from those delimiters that would
2135 cause a line break after themselves.
2137 Higher numbers are higher priority.
2139 if leaf.type == token.COMMA:
2140 return COMMA_PRIORITY
2145 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2146 """Return the priority of the `leaf` delimiter, given a line break before it.
2148 The delimiter priorities returned here are from those delimiters that would
2149 cause a line break before themselves.
2151 Higher numbers are higher priority.
2153 if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
2154 # * and ** might also be MATH_OPERATORS but in this case they are not.
2155 # Don't treat them as a delimiter.
2159 leaf.type == token.DOT
2161 and leaf.parent.type not in {syms.import_from, syms.dotted_name}
2162 and (previous is None or previous.type in CLOSING_BRACKETS)
2167 leaf.type in MATH_OPERATORS
2169 and leaf.parent.type not in {syms.factor, syms.star_expr}
2171 return MATH_PRIORITIES[leaf.type]
2173 if leaf.type in COMPARATORS:
2174 return COMPARATOR_PRIORITY
2177 leaf.type == token.STRING
2178 and previous is not None
2179 and previous.type == token.STRING
2181 return STRING_PRIORITY
2183 if leaf.type not in {token.NAME, token.ASYNC}:
2189 and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
2190 or leaf.type == token.ASYNC
2193 not isinstance(leaf.prev_sibling, Leaf)
2194 or leaf.prev_sibling.value != "async"
2196 return COMPREHENSION_PRIORITY
2201 and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
2203 return COMPREHENSION_PRIORITY
2205 if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
2206 return TERNARY_PRIORITY
2208 if leaf.value == "is":
2209 return COMPARATOR_PRIORITY
2214 and leaf.parent.type in {syms.comp_op, syms.comparison}
2216 previous is not None
2217 and previous.type == token.NAME
2218 and previous.value == "not"
2221 return COMPARATOR_PRIORITY
2226 and leaf.parent.type == syms.comp_op
2228 previous is not None
2229 and previous.type == token.NAME
2230 and previous.value == "is"
2233 return COMPARATOR_PRIORITY
2235 if leaf.value in LOGIC_OPERATORS and leaf.parent:
2236 return LOGIC_PRIORITY
2241 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
2242 FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
2245 def generate_comments(leaf: LN) -> Iterator[Leaf]:
2246 """Clean the prefix of the `leaf` and generate comments from it, if any.
2248 Comments in lib2to3 are shoved into the whitespace prefix. This happens
2249 in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
2250 move because it does away with modifying the grammar to include all the
2251 possible places in which comments can be placed.
2253 The sad consequence for us though is that comments don't "belong" anywhere.
2254 This is why this function generates simple parentless Leaf objects for
2255 comments. We simply don't know what the correct parent should be.
2257 No matter though, we can live without this. We really only need to
2258 differentiate between inline and standalone comments. The latter don't
2259 share the line with any code.
2261 Inline comments are emitted as regular token.COMMENT leaves. Standalone
2262 are emitted with a fake STANDALONE_COMMENT token identifier.
2264 for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
2265 yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
2270 """Describes a piece of syntax that is a comment.
2272 It's not a :class:`blib2to3.pytree.Leaf` so that:
2274 * it can be cached (`Leaf` objects should not be reused more than once as
2275 they store their lineno, column, prefix, and parent information);
2276 * `newlines` and `consumed` fields are kept separate from the `value`. This
2277 simplifies handling of special marker comments like ``# fmt: off/on``.
2280 type: int # token.COMMENT or STANDALONE_COMMENT
2281 value: str # content of the comment
2282 newlines: int # how many newlines before the comment
2283 consumed: int # how many characters of the original leaf's prefix did we consume
2286 @lru_cache(maxsize=4096)
2287 def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
2288 """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
2289 result: List[ProtoComment] = []
2290 if not prefix or "#" not in prefix:
2296 for index, line in enumerate(prefix.split("\n")):
2297 consumed += len(line) + 1 # adding the length of the split '\n'
2298 line = line.lstrip()
2301 if not line.startswith("#"):
2302 # Escaped newlines outside of a comment are not really newlines at
2303 # all. We treat a single-line comment following an escaped newline
2304 # as a simple trailing comment.
2305 if line.endswith("\\"):
2309 if index == ignored_lines and not is_endmarker:
2310 comment_type = token.COMMENT # simple trailing comment
2312 comment_type = STANDALONE_COMMENT
2313 comment = make_comment(line)
2316 type=comment_type, value=comment, newlines=nlines, consumed=consumed
2323 def make_comment(content: str) -> str:
2324 """Return a consistently formatted comment from the given `content` string.
2326 All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
2327 space between the hash sign and the content.
2329 If `content` didn't start with a hash sign, one is provided.
2331 content = content.rstrip()
2335 if content[0] == "#":
2336 content = content[1:]
2337 if content and content[0] not in " !:#'%":
2338 content = " " + content
2339 return "#" + content
2345 inner: bool = False,
2346 features: Collection[Feature] = (),
2347 ) -> Iterator[Line]:
2348 """Split a `line` into potentially many lines.
2350 They should fit in the allotted `line_length` but might not be able to.
2351 `inner` signifies that there were a pair of brackets somewhere around the
2352 current `line`, possibly transitively. This means we can fallback to splitting
2353 by delimiters if the LHS/RHS don't yield any results.
2355 `features` are syntactical features that may be used in the output.
2361 line_str = str(line).strip("\n")
2364 not line.contains_uncollapsable_type_comments()
2365 and not line.should_explode
2367 is_line_short_enough(line, line_length=line_length, line_str=line_str)
2368 or line.contains_unsplittable_type_ignore()
2374 split_funcs: List[SplitFunc]
2376 split_funcs = [left_hand_split]
2379 def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
2380 for omit in generate_trailers_to_omit(line, line_length):
2381 lines = list(right_hand_split(line, line_length, features, omit=omit))
2382 if is_line_short_enough(lines[0], line_length=line_length):
2386 # All splits failed, best effort split with no omits.
2387 # This mostly happens to multiline strings that are by definition
2388 # reported as not fitting a single line.
2389 yield from right_hand_split(line, line_length, features=features)
2391 if line.inside_brackets:
2392 split_funcs = [delimiter_split, standalone_comment_split, rhs]
2395 for split_func in split_funcs:
2396 # We are accumulating lines in `result` because we might want to abort
2397 # mission and return the original line in the end, or attempt a different
2399 result: List[Line] = []
2401 for l in split_func(line, features):
2402 if str(l).strip("\n") == line_str:
2403 raise CannotSplit("Split function returned an unchanged result")
2407 l, line_length=line_length, inner=True, features=features
2421 def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2422 """Split line into many lines, starting with the first matching bracket pair.
2424 Note: this usually looks weird, only use this for function definitions.
2425 Prefer RHS otherwise. This is why this function is not symmetrical with
2426 :func:`right_hand_split` which also handles optional parentheses.
2428 tail_leaves: List[Leaf] = []
2429 body_leaves: List[Leaf] = []
2430 head_leaves: List[Leaf] = []
2431 current_leaves = head_leaves
2432 matching_bracket = None
2433 for leaf in line.leaves:
2435 current_leaves is body_leaves
2436 and leaf.type in CLOSING_BRACKETS
2437 and leaf.opening_bracket is matching_bracket
2439 current_leaves = tail_leaves if body_leaves else head_leaves
2440 current_leaves.append(leaf)
2441 if current_leaves is head_leaves:
2442 if leaf.type in OPENING_BRACKETS:
2443 matching_bracket = leaf
2444 current_leaves = body_leaves
2445 if not matching_bracket:
2446 raise CannotSplit("No brackets found")
2448 head = bracket_split_build_line(head_leaves, line, matching_bracket)
2449 body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
2450 tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
2451 bracket_split_succeeded_or_raise(head, body, tail)
2452 for result in (head, body, tail):
2457 def right_hand_split(
2460 features: Collection[Feature] = (),
2461 omit: Collection[LeafID] = (),
2462 ) -> Iterator[Line]:
2463 """Split line into many lines, starting with the last matching bracket pair.
2465 If the split was by optional parentheses, attempt splitting without them, too.
2466 `omit` is a collection of closing bracket IDs that shouldn't be considered for
2469 Note: running this function modifies `bracket_depth` on the leaves of `line`.
2471 tail_leaves: List[Leaf] = []
2472 body_leaves: List[Leaf] = []
2473 head_leaves: List[Leaf] = []
2474 current_leaves = tail_leaves
2475 opening_bracket = None
2476 closing_bracket = None
2477 for leaf in reversed(line.leaves):
2478 if current_leaves is body_leaves:
2479 if leaf is opening_bracket:
2480 current_leaves = head_leaves if body_leaves else tail_leaves
2481 current_leaves.append(leaf)
2482 if current_leaves is tail_leaves:
2483 if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
2484 opening_bracket = leaf.opening_bracket
2485 closing_bracket = leaf
2486 current_leaves = body_leaves
2487 if not (opening_bracket and closing_bracket and head_leaves):
2488 # If there is no opening or closing_bracket that means the split failed and
2489 # all content is in the tail. Otherwise, if `head_leaves` are empty, it means
2490 # the matching `opening_bracket` wasn't available on `line` anymore.
2491 raise CannotSplit("No brackets found")
2493 tail_leaves.reverse()
2494 body_leaves.reverse()
2495 head_leaves.reverse()
2496 head = bracket_split_build_line(head_leaves, line, opening_bracket)
2497 body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
2498 tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
2499 bracket_split_succeeded_or_raise(head, body, tail)
2501 # the body shouldn't be exploded
2502 not body.should_explode
2503 # the opening bracket is an optional paren
2504 and opening_bracket.type == token.LPAR
2505 and not opening_bracket.value
2506 # the closing bracket is an optional paren
2507 and closing_bracket.type == token.RPAR
2508 and not closing_bracket.value
2509 # it's not an import (optional parens are the only thing we can split on
2510 # in this case; attempting a split without them is a waste of time)
2511 and not line.is_import
2512 # there are no standalone comments in the body
2513 and not body.contains_standalone_comments(0)
2514 # and we can actually remove the parens
2515 and can_omit_invisible_parens(body, line_length)
2517 omit = {id(closing_bracket), *omit}
2519 yield from right_hand_split(line, line_length, features=features, omit=omit)
2525 or is_line_short_enough(body, line_length=line_length)
2528 "Splitting failed, body is still too long and can't be split."
2531 elif head.contains_multiline_strings() or tail.contains_multiline_strings():
2533 "The current optional pair of parentheses is bound to fail to "
2534 "satisfy the splitting algorithm because the head or the tail "
2535 "contains multiline strings which by definition never fit one "
2539 ensure_visible(opening_bracket)
2540 ensure_visible(closing_bracket)
2541 for result in (head, body, tail):
2546 def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
2547 """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
2549 Do nothing otherwise.
2551 A left- or right-hand split is based on a pair of brackets. Content before
2552 (and including) the opening bracket is left on one line, content inside the
2553 brackets is put on a separate line, and finally content starting with and
2554 following the closing bracket is put on a separate line.
2556 Those are called `head`, `body`, and `tail`, respectively. If the split
2557 produced the same line (all content in `head`) or ended up with an empty `body`
2558 and the `tail` is just the closing bracket, then it's considered failed.
2560 tail_len = len(str(tail).strip())
2563 raise CannotSplit("Splitting brackets produced the same line")
2567 f"Splitting brackets on an empty body to save "
2568 f"{tail_len} characters is not worth it"
2572 def bracket_split_build_line(
2573 leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
2575 """Return a new line with given `leaves` and respective comments from `original`.
2577 If `is_body` is True, the result line is one-indented inside brackets and as such
2578 has its first leaf's prefix normalized and a trailing comma added when expected.
2580 result = Line(depth=original.depth)
2582 result.inside_brackets = True
2585 # Since body is a new indent level, remove spurious leading whitespace.
2586 normalize_prefix(leaves[0], inside_brackets=True)
2587 # Ensure a trailing comma for imports and standalone function arguments, but
2588 # be careful not to add one after any comments.
2589 no_commas = original.is_def and not any(
2590 l.type == token.COMMA for l in leaves
2593 if original.is_import or no_commas:
2594 for i in range(len(leaves) - 1, -1, -1):
2595 if leaves[i].type == STANDALONE_COMMENT:
2597 elif leaves[i].type == token.COMMA:
2600 leaves.insert(i + 1, Leaf(token.COMMA, ","))
2604 result.append(leaf, preformatted=True)
2605 for comment_after in original.comments_after(leaf):
2606 result.append(comment_after, preformatted=True)
2608 result.should_explode = should_explode(result, opening_bracket)
2612 def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc:
2613 """Normalize prefix of the first leaf in every line returned by `split_func`.
2615 This is a decorator over relevant split functions.
2619 def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2620 for l in split_func(line, features):
2621 normalize_prefix(l.leaves[0], inside_brackets=True)
2624 return split_wrapper
2627 @dont_increase_indentation
2628 def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2629 """Split according to delimiters of the highest priority.
2631 If the appropriate Features are given, the split will add trailing commas
2632 also in function signatures and calls that contain `*` and `**`.
2635 last_leaf = line.leaves[-1]
2637 raise CannotSplit("Line empty")
2639 bt = line.bracket_tracker
2641 delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
2643 raise CannotSplit("No delimiters found")
2645 if delimiter_priority == DOT_PRIORITY:
2646 if bt.delimiter_count_with_priority(delimiter_priority) == 1:
2647 raise CannotSplit("Splitting a single attribute from its owner looks wrong")
2649 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2650 lowest_depth = sys.maxsize
2651 trailing_comma_safe = True
2653 def append_to_line(leaf: Leaf) -> Iterator[Line]:
2654 """Append `leaf` to current line or to new line if appending impossible."""
2655 nonlocal current_line
2657 current_line.append_safe(leaf, preformatted=True)
2661 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2662 current_line.append(leaf)
2664 for leaf in line.leaves:
2665 yield from append_to_line(leaf)
2667 for comment_after in line.comments_after(leaf):
2668 yield from append_to_line(comment_after)
2670 lowest_depth = min(lowest_depth, leaf.bracket_depth)
2671 if leaf.bracket_depth == lowest_depth:
2672 if is_vararg(leaf, within={syms.typedargslist}):
2673 trailing_comma_safe = (
2674 trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
2676 elif is_vararg(leaf, within={syms.arglist, syms.argument}):
2677 trailing_comma_safe = (
2678 trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
2681 leaf_priority = bt.delimiters.get(id(leaf))
2682 if leaf_priority == delimiter_priority:
2685 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2689 and delimiter_priority == COMMA_PRIORITY
2690 and current_line.leaves[-1].type != token.COMMA
2691 and current_line.leaves[-1].type != STANDALONE_COMMENT
2693 current_line.append(Leaf(token.COMMA, ","))
2697 @dont_increase_indentation
2698 def standalone_comment_split(
2699 line: Line, features: Collection[Feature] = ()
2700 ) -> Iterator[Line]:
2701 """Split standalone comments from the rest of the line."""
2702 if not line.contains_standalone_comments(0):
2703 raise CannotSplit("Line does not have any standalone comments")
2705 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2707 def append_to_line(leaf: Leaf) -> Iterator[Line]:
2708 """Append `leaf` to current line or to new line if appending impossible."""
2709 nonlocal current_line
2711 current_line.append_safe(leaf, preformatted=True)
2715 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2716 current_line.append(leaf)
2718 for leaf in line.leaves:
2719 yield from append_to_line(leaf)
2721 for comment_after in line.comments_after(leaf):
2722 yield from append_to_line(comment_after)
2728 def is_import(leaf: Leaf) -> bool:
2729 """Return True if the given leaf starts an import statement."""
2736 (v == "import" and p and p.type == syms.import_name)
2737 or (v == "from" and p and p.type == syms.import_from)
2742 def is_type_comment(leaf: Leaf, suffix: str = "") -> bool:
2743 """Return True if the given leaf is a special comment.
2744 Only returns true for type comments for now."""
2747 return t in {token.COMMENT, t == STANDALONE_COMMENT} and v.startswith(
2752 def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
2753 """Leave existing extra newlines if not `inside_brackets`. Remove everything
2756 Note: don't use backslashes for formatting or you'll lose your voting rights.
2758 if not inside_brackets:
2759 spl = leaf.prefix.split("#")
2760 if "\\" not in spl[0]:
2761 nl_count = spl[-1].count("\n")
2764 leaf.prefix = "\n" * nl_count
2770 def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
2771 """Make all string prefixes lowercase.
2773 If remove_u_prefix is given, also removes any u prefix from the string.
2775 Note: Mutates its argument.
2777 match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL)
2778 assert match is not None, f"failed to match string {leaf.value!r}"
2779 orig_prefix = match.group(1)
2780 new_prefix = orig_prefix.lower()
2782 new_prefix = new_prefix.replace("u", "")
2783 leaf.value = f"{new_prefix}{match.group(2)}"
2786 def normalize_string_quotes(leaf: Leaf) -> None:
2787 """Prefer double quotes but only if it doesn't cause more escaping.
2789 Adds or removes backslashes as appropriate. Doesn't parse and fix
2790 strings nested in f-strings (yet).
2792 Note: Mutates its argument.
2794 value = leaf.value.lstrip("furbFURB")
2795 if value[:3] == '"""':
2798 elif value[:3] == "'''":
2801 elif value[0] == '"':
2807 first_quote_pos = leaf.value.find(orig_quote)
2808 if first_quote_pos == -1:
2809 return # There's an internal error
2811 prefix = leaf.value[:first_quote_pos]
2812 unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
2813 escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
2814 escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
2815 body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
2816 if "r" in prefix.casefold():
2817 if unescaped_new_quote.search(body):
2818 # There's at least one unescaped new_quote in this raw string
2819 # so converting is impossible
2822 # Do not introduce or remove backslashes in raw strings
2825 # remove unnecessary escapes
2826 new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
2827 if body != new_body:
2828 # Consider the string without unnecessary escapes as the original
2830 leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
2831 new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
2832 new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
2833 if "f" in prefix.casefold():
2834 matches = re.findall(
2836 (?:[^{]|^)\{ # start of the string or a non-{ followed by a single {
2837 ([^{].*?) # contents of the brackets except if begins with {{
2838 \}(?:[^}]|$) # A } followed by end of the string or a non-}
2845 # Do not introduce backslashes in interpolated expressions
2847 if new_quote == '"""' and new_body[-1:] == '"':
2849 new_body = new_body[:-1] + '\\"'
2850 orig_escape_count = body.count("\\")
2851 new_escape_count = new_body.count("\\")
2852 if new_escape_count > orig_escape_count:
2853 return # Do not introduce more escaping
2855 if new_escape_count == orig_escape_count and orig_quote == '"':
2856 return # Prefer double quotes
2858 leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
2861 def normalize_numeric_literal(leaf: Leaf) -> None:
2862 """Normalizes numeric (float, int, and complex) literals.
2864 All letters used in the representation are normalized to lowercase (except
2865 in Python 2 long literals).
2867 text = leaf.value.lower()
2868 if text.startswith(("0o", "0b")):
2869 # Leave octal and binary literals alone.
2871 elif text.startswith("0x"):
2872 # Change hex literals to upper case.
2873 before, after = text[:2], text[2:]
2874 text = f"{before}{after.upper()}"
2876 before, after = text.split("e")
2878 if after.startswith("-"):
2881 elif after.startswith("+"):
2883 before = format_float_or_int_string(before)
2884 text = f"{before}e{sign}{after}"
2885 elif text.endswith(("j", "l")):
2888 # Capitalize in "2L" because "l" looks too similar to "1".
2891 text = f"{format_float_or_int_string(number)}{suffix}"
2893 text = format_float_or_int_string(text)
2897 def format_float_or_int_string(text: str) -> str:
2898 """Formats a float string like "1.0"."""
2902 before, after = text.split(".")
2903 return f"{before or 0}.{after or 0}"
2906 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
2907 """Make existing optional parentheses invisible or create new ones.
2909 `parens_after` is a set of string leaf values immediately after which parens
2912 Standardizes on visible parentheses for single-element tuples, and keeps
2913 existing visible parentheses for other tuples and generator expressions.
2915 for pc in list_comments(node.prefix, is_endmarker=False):
2916 if pc.value in FMT_OFF:
2917 # This `node` has a prefix with `# fmt: off`, don't mess with parens.
2921 for index, child in enumerate(list(node.children)):
2922 # Add parentheses around long tuple unpacking in assignments.
2925 and isinstance(child, Node)
2926 and child.type == syms.testlist_star_expr
2931 if is_walrus_assignment(child):
2933 if child.type == syms.atom:
2934 # Determines if the underlying atom should be surrounded with
2935 # invisible params - also makes parens invisible recursively
2936 # within the atom and removes repeated invisible parens within
2938 should_surround_with_parens = maybe_make_parens_invisible_in_atom(
2942 if should_surround_with_parens:
2943 lpar = Leaf(token.LPAR, "")
2944 rpar = Leaf(token.RPAR, "")
2945 index = child.remove() or 0
2946 node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
2947 elif is_one_tuple(child):
2948 # wrap child in visible parentheses
2949 lpar = Leaf(token.LPAR, "(")
2950 rpar = Leaf(token.RPAR, ")")
2952 node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
2953 elif node.type == syms.import_from:
2954 # "import from" nodes store parentheses directly as part of
2956 if child.type == token.LPAR:
2957 # make parentheses invisible
2958 child.value = "" # type: ignore
2959 node.children[-1].value = "" # type: ignore
2960 elif child.type != token.STAR:
2961 # insert invisible parentheses
2962 node.insert_child(index, Leaf(token.LPAR, ""))
2963 node.append_child(Leaf(token.RPAR, ""))
2966 elif not (isinstance(child, Leaf) and is_multiline_string(child)):
2967 # wrap child in invisible parentheses
2968 lpar = Leaf(token.LPAR, "")
2969 rpar = Leaf(token.RPAR, "")
2970 index = child.remove() or 0
2971 prefix = child.prefix
2973 new_child = Node(syms.atom, [lpar, child, rpar])
2974 new_child.prefix = prefix
2975 node.insert_child(index, new_child)
2977 check_lpar = isinstance(child, Leaf) and child.value in parens_after
2980 def normalize_fmt_off(node: Node) -> None:
2981 """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
2984 try_again = convert_one_fmt_off_pair(node)
2987 def convert_one_fmt_off_pair(node: Node) -> bool:
2988 """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
2990 Returns True if a pair was converted.
2992 for leaf in node.leaves():
2993 previous_consumed = 0
2994 for comment in list_comments(leaf.prefix, is_endmarker=False):
2995 if comment.value in FMT_OFF:
2996 # We only want standalone comments. If there's no previous leaf or
2997 # the previous leaf is indentation, it's a standalone comment in
2999 if comment.type != STANDALONE_COMMENT:
3000 prev = preceding_leaf(leaf)
3001 if prev and prev.type not in WHITESPACE:
3004 ignored_nodes = list(generate_ignored_nodes(leaf))
3005 if not ignored_nodes:
3008 first = ignored_nodes[0] # Can be a container node with the `leaf`.
3009 parent = first.parent
3010 prefix = first.prefix
3011 first.prefix = prefix[comment.consumed :]
3013 comment.value + "\n" + "".join(str(n) for n in ignored_nodes)
3015 if hidden_value.endswith("\n"):
3016 # That happens when one of the `ignored_nodes` ended with a NEWLINE
3017 # leaf (possibly followed by a DEDENT).
3018 hidden_value = hidden_value[:-1]
3020 for ignored in ignored_nodes:
3021 index = ignored.remove()
3022 if first_idx is None:
3024 assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
3025 assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
3026 parent.insert_child(
3031 prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
3036 previous_consumed = comment.consumed
3041 def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]:
3042 """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
3044 Stops at the end of the block.
3046 container: Optional[LN] = container_of(leaf)
3047 while container is not None and container.type != token.ENDMARKER:
3048 for comment in list_comments(container.prefix, is_endmarker=False):
3049 if comment.value in FMT_ON:
3054 container = container.next_sibling
3057 def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
3058 """If it's safe, make the parens in the atom `node` invisible, recursively.
3059 Additionally, remove repeated, adjacent invisible parens from the atom `node`
3060 as they are redundant.
3062 Returns whether the node should itself be wrapped in invisible parentheses.
3066 node.type != syms.atom
3067 or is_empty_tuple(node)
3068 or is_one_tuple(node)
3069 or (is_yield(node) and parent.type != syms.expr_stmt)
3070 or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
3074 first = node.children[0]
3075 last = node.children[-1]
3076 if first.type == token.LPAR and last.type == token.RPAR:
3077 middle = node.children[1]
3078 # make parentheses invisible
3079 first.value = "" # type: ignore
3080 last.value = "" # type: ignore
3081 maybe_make_parens_invisible_in_atom(middle, parent=parent)
3083 if is_atom_with_invisible_parens(middle):
3084 # Strip the invisible parens from `middle` by replacing
3085 # it with the child in-between the invisible parens
3086 middle.replace(middle.children[1])
3093 def is_atom_with_invisible_parens(node: LN) -> bool:
3094 """Given a `LN`, determines whether it's an atom `node` with invisible
3095 parens. Useful in dedupe-ing and normalizing parens.
3097 if isinstance(node, Leaf) or node.type != syms.atom:
3100 first, last = node.children[0], node.children[-1]
3102 isinstance(first, Leaf)
3103 and first.type == token.LPAR
3104 and first.value == ""
3105 and isinstance(last, Leaf)
3106 and last.type == token.RPAR
3107 and last.value == ""
3111 def is_empty_tuple(node: LN) -> bool:
3112 """Return True if `node` holds an empty tuple."""
3114 node.type == syms.atom
3115 and len(node.children) == 2
3116 and node.children[0].type == token.LPAR
3117 and node.children[1].type == token.RPAR
3121 def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
3122 """Returns `wrapped` if `node` is of the shape ( wrapped ).
3124 Parenthesis can be optional. Returns None otherwise"""
3125 if len(node.children) != 3:
3127 lpar, wrapped, rpar = node.children
3128 if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
3134 def is_one_tuple(node: LN) -> bool:
3135 """Return True if `node` holds a tuple with one element, with or without parens."""
3136 if node.type == syms.atom:
3137 gexp = unwrap_singleton_parenthesis(node)
3138 if gexp is None or gexp.type != syms.testlist_gexp:
3141 return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
3144 node.type in IMPLICIT_TUPLE
3145 and len(node.children) == 2
3146 and node.children[1].type == token.COMMA
3150 def is_walrus_assignment(node: LN) -> bool:
3151 """Return True iff `node` is of the shape ( test := test )"""
3152 inner = unwrap_singleton_parenthesis(node)
3153 return inner is not None and inner.type == syms.namedexpr_test
3156 def is_yield(node: LN) -> bool:
3157 """Return True if `node` holds a `yield` or `yield from` expression."""
3158 if node.type == syms.yield_expr:
3161 if node.type == token.NAME and node.value == "yield": # type: ignore
3164 if node.type != syms.atom:
3167 if len(node.children) != 3:
3170 lpar, expr, rpar = node.children
3171 if lpar.type == token.LPAR and rpar.type == token.RPAR:
3172 return is_yield(expr)
3177 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
3178 """Return True if `leaf` is a star or double star in a vararg or kwarg.
3180 If `within` includes VARARGS_PARENTS, this applies to function signatures.
3181 If `within` includes UNPACKING_PARENTS, it applies to right hand-side
3182 extended iterable unpacking (PEP 3132) and additional unpacking
3183 generalizations (PEP 448).
3185 if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
3189 if p.type == syms.star_expr:
3190 # Star expressions are also used as assignment targets in extended
3191 # iterable unpacking (PEP 3132). See what its parent is instead.
3197 return p.type in within
3200 def is_multiline_string(leaf: Leaf) -> bool:
3201 """Return True if `leaf` is a multiline string that actually spans many lines."""
3202 value = leaf.value.lstrip("furbFURB")
3203 return value[:3] in {'"""', "'''"} and "\n" in value
3206 def is_stub_suite(node: Node) -> bool:
3207 """Return True if `node` is a suite with a stub body."""
3209 len(node.children) != 4
3210 or node.children[0].type != token.NEWLINE
3211 or node.children[1].type != token.INDENT
3212 or node.children[3].type != token.DEDENT
3216 return is_stub_body(node.children[2])
3219 def is_stub_body(node: LN) -> bool:
3220 """Return True if `node` is a simple statement containing an ellipsis."""
3221 if not isinstance(node, Node) or node.type != syms.simple_stmt:
3224 if len(node.children) != 2:
3227 child = node.children[0]
3229 child.type == syms.atom
3230 and len(child.children) == 3
3231 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
3235 def max_delimiter_priority_in_atom(node: LN) -> Priority:
3236 """Return maximum delimiter priority inside `node`.
3238 This is specific to atoms with contents contained in a pair of parentheses.
3239 If `node` isn't an atom or there are no enclosing parentheses, returns 0.
3241 if node.type != syms.atom:
3244 first = node.children[0]
3245 last = node.children[-1]
3246 if not (first.type == token.LPAR and last.type == token.RPAR):
3249 bt = BracketTracker()
3250 for c in node.children[1:-1]:
3251 if isinstance(c, Leaf):
3254 for leaf in c.leaves():
3257 return bt.max_delimiter_priority()
3263 def ensure_visible(leaf: Leaf) -> None:
3264 """Make sure parentheses are visible.
3266 They could be invisible as part of some statements (see
3267 :func:`normalize_invisible_parens` and :func:`visit_import_from`).
3269 if leaf.type == token.LPAR:
3271 elif leaf.type == token.RPAR:
3275 def should_explode(line: Line, opening_bracket: Leaf) -> bool:
3276 """Should `line` immediately be split with `delimiter_split()` after RHS?"""
3279 opening_bracket.parent
3280 and opening_bracket.parent.type in {syms.atom, syms.import_from}
3281 and opening_bracket.value in "[{("
3286 last_leaf = line.leaves[-1]
3287 exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set()
3288 max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
3289 except (IndexError, ValueError):
3292 return max_priority == COMMA_PRIORITY
3295 def get_features_used(node: Node) -> Set[Feature]:
3296 """Return a set of (relatively) new Python features used in this file.
3298 Currently looking for:
3300 - underscores in numeric literals;
3301 - trailing commas after * or ** in function signatures and calls;
3302 - positional only arguments in function signatures and lambdas;
3304 features: Set[Feature] = set()
3305 for n in node.pre_order():
3306 if n.type == token.STRING:
3307 value_head = n.value[:2] # type: ignore
3308 if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
3309 features.add(Feature.F_STRINGS)
3311 elif n.type == token.NUMBER:
3312 if "_" in n.value: # type: ignore
3313 features.add(Feature.NUMERIC_UNDERSCORES)
3315 elif n.type == token.SLASH:
3316 if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}:
3317 features.add(Feature.POS_ONLY_ARGUMENTS)
3319 elif n.type == token.COLONEQUAL:
3320 features.add(Feature.ASSIGNMENT_EXPRESSIONS)
3323 n.type in {syms.typedargslist, syms.arglist}
3325 and n.children[-1].type == token.COMMA
3327 if n.type == syms.typedargslist:
3328 feature = Feature.TRAILING_COMMA_IN_DEF
3330 feature = Feature.TRAILING_COMMA_IN_CALL
3332 for ch in n.children:
3333 if ch.type in STARS:
3334 features.add(feature)
3336 if ch.type == syms.argument:
3337 for argch in ch.children:
3338 if argch.type in STARS:
3339 features.add(feature)
3344 def detect_target_versions(node: Node) -> Set[TargetVersion]:
3345 """Detect the version to target based on the nodes used."""
3346 features = get_features_used(node)
3348 version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
3352 def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
3353 """Generate sets of closing bracket IDs that should be omitted in a RHS.
3355 Brackets can be omitted if the entire trailer up to and including
3356 a preceding closing bracket fits in one line.
3358 Yielded sets are cumulative (contain results of previous yields, too). First
3362 omit: Set[LeafID] = set()
3365 length = 4 * line.depth
3366 opening_bracket = None
3367 closing_bracket = None
3368 inner_brackets: Set[LeafID] = set()
3369 for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
3370 length += leaf_length
3371 if length > line_length:
3374 has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
3375 if leaf.type == STANDALONE_COMMENT or has_inline_comment:
3379 if leaf is opening_bracket:
3380 opening_bracket = None
3381 elif leaf.type in CLOSING_BRACKETS:
3382 inner_brackets.add(id(leaf))
3383 elif leaf.type in CLOSING_BRACKETS:
3384 if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS:
3385 # Empty brackets would fail a split so treat them as "inner"
3386 # brackets (e.g. only add them to the `omit` set if another
3387 # pair of brackets was good enough.
3388 inner_brackets.add(id(leaf))
3392 omit.add(id(closing_bracket))
3393 omit.update(inner_brackets)
3394 inner_brackets.clear()
3398 opening_bracket = leaf.opening_bracket
3399 closing_bracket = leaf
3402 def get_future_imports(node: Node) -> Set[str]:
3403 """Return a set of __future__ imports in the file."""
3404 imports: Set[str] = set()
3406 def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
3407 for child in children:
3408 if isinstance(child, Leaf):
3409 if child.type == token.NAME:
3411 elif child.type == syms.import_as_name:
3412 orig_name = child.children[0]
3413 assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
3414 assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
3415 yield orig_name.value
3416 elif child.type == syms.import_as_names:
3417 yield from get_imports_from_children(child.children)
3419 raise AssertionError("Invalid syntax parsing imports")
3421 for child in node.children:
3422 if child.type != syms.simple_stmt:
3424 first_child = child.children[0]
3425 if isinstance(first_child, Leaf):
3426 # Continue looking if we see a docstring; otherwise stop.
3428 len(child.children) == 2
3429 and first_child.type == token.STRING
3430 and child.children[1].type == token.NEWLINE
3435 elif first_child.type == syms.import_from:
3436 module_name = first_child.children[1]
3437 if not isinstance(module_name, Leaf) or module_name.value != "__future__":
3439 imports |= set(get_imports_from_children(first_child.children[3:]))
3445 def gen_python_files_in_dir(
3448 include: Pattern[str],
3449 exclude: Pattern[str],
3451 ) -> Iterator[Path]:
3452 """Generate all files under `path` whose paths are not excluded by the
3453 `exclude` regex, but are included by the `include` regex.
3455 Symbolic links pointing outside of the `root` directory are ignored.
3457 `report` is where output about exclusions goes.
3459 assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
3460 for child in path.iterdir():
3462 normalized_path = "/" + child.resolve().relative_to(root).as_posix()
3464 if child.is_symlink():
3465 report.path_ignored(
3466 child, f"is a symbolic link that points outside {root}"
3473 normalized_path += "/"
3474 exclude_match = exclude.search(normalized_path)
3475 if exclude_match and exclude_match.group(0):
3476 report.path_ignored(child, f"matches the --exclude regular expression")
3480 yield from gen_python_files_in_dir(child, root, include, exclude, report)
3482 elif child.is_file():
3483 include_match = include.search(normalized_path)
3489 def find_project_root(srcs: Iterable[str]) -> Path:
3490 """Return a directory containing .git, .hg, or pyproject.toml.
3492 That directory can be one of the directories passed in `srcs` or their
3495 If no directory in the tree contains a marker that would specify it's the
3496 project root, the root of the file system is returned.
3499 return Path("/").resolve()
3501 common_base = min(Path(src).resolve() for src in srcs)
3502 if common_base.is_dir():
3503 # Append a fake file so `parents` below returns `common_base_dir`, too.
3504 common_base /= "fake-file"
3505 for directory in common_base.parents:
3506 if (directory / ".git").is_dir():
3509 if (directory / ".hg").is_dir():
3512 if (directory / "pyproject.toml").is_file():
3520 """Provides a reformatting counter. Can be rendered with `str(report)`."""
3524 verbose: bool = False
3525 change_count: int = 0
3527 failure_count: int = 0
3529 def done(self, src: Path, changed: Changed) -> None:
3530 """Increment the counter for successful reformatting. Write out a message."""
3531 if changed is Changed.YES:
3532 reformatted = "would reformat" if self.check else "reformatted"
3533 if self.verbose or not self.quiet:
3534 out(f"{reformatted} {src}")
3535 self.change_count += 1
3538 if changed is Changed.NO:
3539 msg = f"{src} already well formatted, good job."
3541 msg = f"{src} wasn't modified on disk since last run."
3542 out(msg, bold=False)
3543 self.same_count += 1
3545 def failed(self, src: Path, message: str) -> None:
3546 """Increment the counter for failed reformatting. Write out a message."""
3547 err(f"error: cannot format {src}: {message}")
3548 self.failure_count += 1
3550 def path_ignored(self, path: Path, message: str) -> None:
3552 out(f"{path} ignored: {message}", bold=False)
3555 def return_code(self) -> int:
3556 """Return the exit code that the app should use.
3558 This considers the current state of changed files and failures:
3559 - if there were any failures, return 123;
3560 - if any files were changed and --check is being used, return 1;
3561 - otherwise return 0.
3563 # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
3564 # 126 we have special return codes reserved by the shell.
3565 if self.failure_count:
3568 elif self.change_count and self.check:
3573 def __str__(self) -> str:
3574 """Render a color report of the current state.
3576 Use `click.unstyle` to remove colors.
3579 reformatted = "would be reformatted"
3580 unchanged = "would be left unchanged"
3581 failed = "would fail to reformat"
3583 reformatted = "reformatted"
3584 unchanged = "left unchanged"
3585 failed = "failed to reformat"
3587 if self.change_count:
3588 s = "s" if self.change_count > 1 else ""
3590 click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
3593 s = "s" if self.same_count > 1 else ""
3594 report.append(f"{self.same_count} file{s} {unchanged}")
3595 if self.failure_count:
3596 s = "s" if self.failure_count > 1 else ""
3598 click.style(f"{self.failure_count} file{s} {failed}", fg="red")
3600 return ", ".join(report) + "."
3603 def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
3604 filename = "<unknown>"
3605 if sys.version_info >= (3, 8):
3606 # TODO: support Python 4+ ;)
3607 for minor_version in range(sys.version_info[1], 4, -1):
3609 return ast.parse(src, filename, feature_version=(3, minor_version))
3613 for feature_version in (7, 6):
3615 return ast3.parse(src, filename, feature_version=feature_version)
3619 return ast27.parse(src)
3622 def _fixup_ast_constants(
3623 node: Union[ast.AST, ast3.AST, ast27.AST]
3624 ) -> Union[ast.AST, ast3.AST, ast27.AST]:
3625 """Map ast nodes deprecated in 3.8 to Constant."""
3626 # casts are required until this is released:
3627 # https://github.com/python/typeshed/pull/3142
3628 if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
3629 return cast(ast.AST, ast.Constant(value=node.s))
3630 elif isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
3631 return cast(ast.AST, ast.Constant(value=node.n))
3632 elif isinstance(node, (ast.NameConstant, ast3.NameConstant)):
3633 return cast(ast.AST, ast.Constant(value=node.value))
3637 def assert_equivalent(src: str, dst: str) -> None:
3638 """Raise AssertionError if `src` and `dst` aren't equivalent."""
3640 def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]:
3641 """Simple visitor generating strings to compare ASTs by content."""
3643 node = _fixup_ast_constants(node)
3645 yield f"{' ' * depth}{node.__class__.__name__}("
3647 for field in sorted(node._fields):
3648 # TypeIgnore has only one field 'lineno' which breaks this comparison
3649 type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
3650 if sys.version_info >= (3, 8):
3651 type_ignore_classes += (ast.TypeIgnore,)
3652 if isinstance(node, type_ignore_classes):
3656 value = getattr(node, field)
3657 except AttributeError:
3660 yield f"{' ' * (depth+1)}{field}="
3662 if isinstance(value, list):
3664 # Ignore nested tuples within del statements, because we may insert
3665 # parentheses and they change the AST.
3668 and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
3669 and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
3671 for item in item.elts:
3672 yield from _v(item, depth + 2)
3673 elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
3674 yield from _v(item, depth + 2)
3676 elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)):
3677 yield from _v(value, depth + 2)
3680 yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}"
3682 yield f"{' ' * depth}) # /{node.__class__.__name__}"
3685 src_ast = parse_ast(src)
3686 except Exception as exc:
3687 raise AssertionError(
3688 f"cannot use --safe with this file; failed to parse source file. "
3689 f"AST error message: {exc}"
3693 dst_ast = parse_ast(dst)
3694 except Exception as exc:
3695 log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
3696 raise AssertionError(
3697 f"INTERNAL ERROR: Black produced invalid code: {exc}. "
3698 f"Please report a bug on https://github.com/psf/black/issues. "
3699 f"This invalid output might be helpful: {log}"
3702 src_ast_str = "\n".join(_v(src_ast))
3703 dst_ast_str = "\n".join(_v(dst_ast))
3704 if src_ast_str != dst_ast_str:
3705 log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
3706 raise AssertionError(
3707 f"INTERNAL ERROR: Black produced code that is not equivalent to "
3709 f"Please report a bug on https://github.com/psf/black/issues. "
3710 f"This diff might be helpful: {log}"
3714 def assert_stable(src: str, dst: str, mode: FileMode) -> None:
3715 """Raise AssertionError if `dst` reformats differently the second time."""
3716 newdst = format_str(dst, mode=mode)
3719 diff(src, dst, "source", "first pass"),
3720 diff(dst, newdst, "first pass", "second pass"),
3722 raise AssertionError(
3723 f"INTERNAL ERROR: Black produced different code on the second pass "
3724 f"of the formatter. "
3725 f"Please report a bug on https://github.com/psf/black/issues. "
3726 f"This diff might be helpful: {log}"
3730 def dump_to_file(*output: str) -> str:
3731 """Dump `output` to a temporary file. Return path to the file."""
3732 with tempfile.NamedTemporaryFile(
3733 mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
3735 for lines in output:
3737 if lines and lines[-1] != "\n":
3743 def nullcontext() -> Iterator[None]:
3744 """Return context manager that does nothing.
3745 Similar to `nullcontext` from python 3.7"""
3749 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
3750 """Return a unified diff string between strings `a` and `b`."""
3753 a_lines = [line + "\n" for line in a.split("\n")]
3754 b_lines = [line + "\n" for line in b.split("\n")]
3756 difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
3760 def cancel(tasks: Iterable[asyncio.Task]) -> None:
3761 """asyncio signal handler that cancels all `tasks` and reports to stderr."""
3767 def shutdown(loop: asyncio.AbstractEventLoop) -> None:
3768 """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
3770 if sys.version_info[:2] >= (3, 7):
3771 all_tasks = asyncio.all_tasks
3773 all_tasks = asyncio.Task.all_tasks
3774 # This part is borrowed from asyncio/runners.py in Python 3.7b2.
3775 to_cancel = [task for task in all_tasks(loop) if not task.done()]
3779 for task in to_cancel:
3781 loop.run_until_complete(
3782 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
3785 # `concurrent.futures.Future` objects cannot be cancelled once they
3786 # are already running. There might be some when the `shutdown()` happened.
3787 # Silence their logger's spew about the event loop being closed.
3788 cf_logger = logging.getLogger("concurrent.futures")
3789 cf_logger.setLevel(logging.CRITICAL)
3793 def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
3794 """Replace `regex` with `replacement` twice on `original`.
3796 This is used by string normalization to perform replaces on
3797 overlapping matches.
3799 return regex.sub(replacement, regex.sub(replacement, original))
3802 def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
3803 """Compile a regular expression string in `regex`.
3805 If it contains newlines, use verbose mode.
3808 regex = "(?x)" + regex
3809 compiled: Pattern[str] = re.compile(regex)
3813 def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
3814 """Like `reversed(enumerate(sequence))` if that were possible."""
3815 index = len(sequence) - 1
3816 for element in reversed(sequence):
3817 yield (index, element)
3821 def enumerate_with_length(
3822 line: Line, reversed: bool = False
3823 ) -> Iterator[Tuple[Index, Leaf, int]]:
3824 """Return an enumeration of leaves with their length.
3826 Stops prematurely on multiline strings and standalone comments.
3829 Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
3830 enumerate_reversed if reversed else enumerate,
3832 for index, leaf in op(line.leaves):
3833 length = len(leaf.prefix) + len(leaf.value)
3834 if "\n" in leaf.value:
3835 return # Multiline strings, we can't continue.
3837 for comment in line.comments_after(leaf):
3838 length += len(comment.value)
3840 yield index, leaf, length
3843 def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
3844 """Return True if `line` is no longer than `line_length`.
3846 Uses the provided `line_str` rendering, if any, otherwise computes a new one.
3849 line_str = str(line).strip("\n")
3851 len(line_str) <= line_length
3852 and "\n" not in line_str # multiline strings
3853 and not line.contains_standalone_comments()
3857 def can_be_split(line: Line) -> bool:
3858 """Return False if the line cannot be split *for sure*.
3860 This is not an exhaustive search but a cheap heuristic that we can use to
3861 avoid some unfortunate formattings (mostly around wrapping unsplittable code
3862 in unnecessary parentheses).
3864 leaves = line.leaves
3868 if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
3872 for leaf in leaves[-2::-1]:
3873 if leaf.type in OPENING_BRACKETS:
3874 if next.type not in CLOSING_BRACKETS:
3878 elif leaf.type == token.DOT:
3880 elif leaf.type == token.NAME:
3881 if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
3884 elif leaf.type not in CLOSING_BRACKETS:
3887 if dot_count > 1 and call_count > 1:
3893 def can_omit_invisible_parens(line: Line, line_length: int) -> bool:
3894 """Does `line` have a shape safe to reformat without optional parens around it?
3896 Returns True for only a subset of potentially nice looking formattings but
3897 the point is to not return false positives that end up producing lines that
3900 bt = line.bracket_tracker
3901 if not bt.delimiters:
3902 # Without delimiters the optional parentheses are useless.
3905 max_priority = bt.max_delimiter_priority()
3906 if bt.delimiter_count_with_priority(max_priority) > 1:
3907 # With more than one delimiter of a kind the optional parentheses read better.
3910 if max_priority == DOT_PRIORITY:
3911 # A single stranded method call doesn't require optional parentheses.
3914 assert len(line.leaves) >= 2, "Stranded delimiter"
3916 first = line.leaves[0]
3917 second = line.leaves[1]
3918 penultimate = line.leaves[-2]
3919 last = line.leaves[-1]
3921 # With a single delimiter, omit if the expression starts or ends with
3923 if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
3925 length = 4 * line.depth
3926 for _index, leaf, leaf_length in enumerate_with_length(line):
3927 if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
3930 length += leaf_length
3931 if length > line_length:
3934 if leaf.type in OPENING_BRACKETS:
3935 # There are brackets we can further split on.
3939 # checked the entire string and line length wasn't exceeded
3940 if len(line.leaves) == _index + 1:
3943 # Note: we are not returning False here because a line might have *both*
3944 # a leading opening bracket and a trailing closing bracket. If the
3945 # opening bracket doesn't match our rule, maybe the closing will.
3948 last.type == token.RPAR
3949 or last.type == token.RBRACE
3951 # don't use indexing for omitting optional parentheses;
3953 last.type == token.RSQB
3955 and last.parent.type != syms.trailer
3958 if penultimate.type in OPENING_BRACKETS:
3959 # Empty brackets don't help.
3962 if is_multiline_string(first):
3963 # Additional wrapping of a multiline string in this situation is
3967 length = 4 * line.depth
3968 seen_other_brackets = False
3969 for _index, leaf, leaf_length in enumerate_with_length(line):
3970 length += leaf_length
3971 if leaf is last.opening_bracket:
3972 if seen_other_brackets or length <= line_length:
3975 elif leaf.type in OPENING_BRACKETS:
3976 # There are brackets we can further split on.
3977 seen_other_brackets = True
3982 def get_cache_file(mode: FileMode) -> Path:
3983 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
3986 def read_cache(mode: FileMode) -> Cache:
3987 """Read the cache if it exists and is well formed.
3989 If it is not well formed, the call to write_cache later should resolve the issue.
3991 cache_file = get_cache_file(mode)
3992 if not cache_file.exists():
3995 with cache_file.open("rb") as fobj:
3997 cache: Cache = pickle.load(fobj)
3998 except pickle.UnpicklingError:
4004 def get_cache_info(path: Path) -> CacheInfo:
4005 """Return the information used to check if a file is already formatted or not."""
4007 return stat.st_mtime, stat.st_size
4010 def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
4011 """Split an iterable of paths in `sources` into two sets.
4013 The first contains paths of files that modified on disk or are not in the
4014 cache. The other contains paths to non-modified files.
4016 todo, done = set(), set()
4019 if cache.get(src) != get_cache_info(src):
4026 def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None:
4027 """Update the cache file."""
4028 cache_file = get_cache_file(mode)
4030 CACHE_DIR.mkdir(parents=True, exist_ok=True)
4031 new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
4032 with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
4033 pickle.dump(new_cache, f, protocol=pickle.HIGHEST_PROTOCOL)
4034 os.replace(f.name, cache_file)
4039 def patch_click() -> None:
4040 """Make Click not crash.
4042 On certain misconfigured environments, Python 3 selects the ASCII encoding as the
4043 default which restricts paths that it can access during the lifetime of the
4044 application. Click refuses to work in this scenario by raising a RuntimeError.
4046 In case of Black the likelihood that non-ASCII characters are going to be used in
4047 file paths is minimal since it's Python source code. Moreover, this crash was
4048 spurious on Python 3.7 thanks to PEP 538 and PEP 540.
4051 from click import core
4052 from click import _unicodefun # type: ignore
4053 except ModuleNotFoundError:
4056 for module in (core, _unicodefun):
4057 if hasattr(module, "_verify_python3_env"):
4058 module._verify_python3_env = lambda: None
4061 def patched_main() -> None:
4067 if __name__ == "__main__":