All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
2 from asyncio.base_events import BaseEventLoop
3 from concurrent.futures import Executor, ProcessPoolExecutor
4 from datetime import datetime
6 from functools import lru_cache, partial, wraps
10 from multiprocessing import Manager, freeze_support
12 from pathlib import Path
39 from appdirs import user_cache_dir
40 from attr import dataclass, evolve, Factory
45 from blib2to3.pytree import Node, Leaf, type_repr
46 from blib2to3 import pygram, pytree
47 from blib2to3.pgen2 import driver, token
48 from blib2to3.pgen2.grammar import Grammar
49 from blib2to3.pgen2.parse import ParseError
52 __version__ = "19.3b0"
53 DEFAULT_LINE_LENGTH = 88
55 r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"
57 DEFAULT_INCLUDES = r"\.pyi?$"
58 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
70 LN = Union[Leaf, Node]
71 SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
74 CacheInfo = Tuple[Timestamp, FileSize]
75 Cache = Dict[Path, CacheInfo]
76 out = partial(click.secho, bold=True, err=True)
77 err = partial(click.secho, fg="red", err=True)
79 pygram.initialize(CACHE_DIR)
80 syms = pygram.python_symbols
83 class NothingChanged(UserWarning):
84 """Raised when reformatted code is the same as source."""
87 class CannotSplit(Exception):
88 """A readable split that fits the allotted line length is impossible."""
91 class InvalidInput(ValueError):
92 """Raised when input source code fails all parse attempts."""
95 class WriteBack(Enum):
102 def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack":
103 if check and not diff:
106 return cls.DIFF if diff else cls.YES
115 class TargetVersion(Enum):
124 def is_python2(self) -> bool:
125 return self is TargetVersion.PY27
128 PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38}
132 # All string literals are unicode
135 NUMERIC_UNDERSCORES = 3
136 TRAILING_COMMA_IN_CALL = 4
137 TRAILING_COMMA_IN_DEF = 5
140 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
141 TargetVersion.PY27: set(),
142 TargetVersion.PY33: {Feature.UNICODE_LITERALS},
143 TargetVersion.PY34: {Feature.UNICODE_LITERALS},
144 TargetVersion.PY35: {Feature.UNICODE_LITERALS, Feature.TRAILING_COMMA_IN_CALL},
145 TargetVersion.PY36: {
146 Feature.UNICODE_LITERALS,
148 Feature.NUMERIC_UNDERSCORES,
149 Feature.TRAILING_COMMA_IN_CALL,
150 Feature.TRAILING_COMMA_IN_DEF,
152 TargetVersion.PY37: {
153 Feature.UNICODE_LITERALS,
155 Feature.NUMERIC_UNDERSCORES,
156 Feature.TRAILING_COMMA_IN_CALL,
157 Feature.TRAILING_COMMA_IN_DEF,
159 TargetVersion.PY38: {
160 Feature.UNICODE_LITERALS,
162 Feature.NUMERIC_UNDERSCORES,
163 Feature.TRAILING_COMMA_IN_CALL,
164 Feature.TRAILING_COMMA_IN_DEF,
171 target_versions: Set[TargetVersion] = Factory(set)
172 line_length: int = DEFAULT_LINE_LENGTH
173 string_normalization: bool = True
176 def get_cache_key(self) -> str:
177 if self.target_versions:
178 version_str = ",".join(
180 for version in sorted(self.target_versions, key=lambda v: v.value)
186 str(self.line_length),
187 str(int(self.string_normalization)),
188 str(int(self.is_pyi)),
190 return ".".join(parts)
193 def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
194 return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
197 def read_pyproject_toml(
198 ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None]
200 """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
202 Returns the path to a successfully found and read configuration file, None
205 assert not isinstance(value, (int, bool)), "Invalid parameter type passed"
207 root = find_project_root(ctx.params.get("src", ()))
208 path = root / "pyproject.toml"
215 pyproject_toml = toml.load(value)
216 config = pyproject_toml.get("tool", {}).get("black", {})
217 except (toml.TomlDecodeError, OSError) as e:
218 raise click.FileError(
219 filename=value, hint=f"Error reading configuration file: {e}"
225 if ctx.default_map is None:
227 ctx.default_map.update( # type: ignore # bad types in .pyi
228 {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
233 @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
238 default=DEFAULT_LINE_LENGTH,
239 help="How many characters per line to allow.",
245 type=click.Choice([v.name.lower() for v in TargetVersion]),
246 callback=lambda c, p, v: [TargetVersion[val.upper()] for val in v],
249 "Python versions that should be supported by Black's output. [default: "
250 "per-file auto-detection]"
257 "Allow using Python 3.6-only syntax on all input files. This will put "
258 "trailing commas in function signatures and calls also after *args and "
259 "**kwargs. Deprecated; use --target-version instead. "
260 "[default: per-file auto-detection]"
267 "Format all input files like typing stubs regardless of file extension "
268 "(useful when piping source on standard input)."
273 "--skip-string-normalization",
275 help="Don't normalize string quotes or prefixes.",
281 "Don't write the files back, just return the status. Return code 0 "
282 "means nothing would change. Return code 1 means some files would be "
283 "reformatted. Return code 123 means there was an internal error."
289 help="Don't write the files back, just output a diff for each file on stdout.",
294 help="If --fast given, skip temporary sanity checks. [default: --safe]",
299 default=DEFAULT_INCLUDES,
301 "A regular expression that matches files and directories that should be "
302 "included on recursive searches. An empty value means all files are "
303 "included regardless of the name. Use forward slashes for directories on "
304 "all platforms (Windows, too). Exclusions are calculated first, inclusions "
312 default=DEFAULT_EXCLUDES,
314 "A regular expression that matches files and directories that should be "
315 "excluded on recursive searches. An empty value means no paths are excluded. "
316 "Use forward slashes for directories on all platforms (Windows, too). "
317 "Exclusions are calculated first, inclusions later."
326 "Don't emit non-error messages to stderr. Errors are still emitted, "
327 "silence those with 2>/dev/null."
335 "Also emit messages to stderr about files that were not changed or were "
336 "ignored due to --exclude=."
339 @click.version_option(version=__version__)
344 exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
351 exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False
354 callback=read_pyproject_toml,
355 help="Read configuration from PATH.",
361 target_version: List[TargetVersion],
367 skip_string_normalization: bool,
373 config: Optional[str],
375 """The uncompromising code formatter."""
376 write_back = WriteBack.from_configuration(check=check, diff=diff)
379 err(f"Cannot use both --target-version and --py36")
382 versions = set(target_version)
385 "--py36 is deprecated and will be removed in a future version. "
386 "Use --target-version py36 instead."
388 versions = PY36_VERSIONS
390 # We'll autodetect later.
393 target_versions=versions,
394 line_length=line_length,
396 string_normalization=not skip_string_normalization,
398 if config and verbose:
399 out(f"Using configuration from {config}.", bold=False, fg="blue")
401 include_regex = re_compile_maybe_verbose(include)
403 err(f"Invalid regular expression for include given: {include!r}")
406 exclude_regex = re_compile_maybe_verbose(exclude)
408 err(f"Invalid regular expression for exclude given: {exclude!r}")
410 report = Report(check=check, quiet=quiet, verbose=verbose)
411 root = find_project_root(src)
412 sources: Set[Path] = set()
417 gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
419 elif p.is_file() or s == "-":
420 # if a file was explicitly given, we don't care about its extension
423 err(f"invalid path: {s}")
424 if len(sources) == 0:
425 if verbose or not quiet:
426 out("No paths given. Nothing to do 😴")
429 if len(sources) == 1:
433 write_back=write_back,
438 loop = asyncio.get_event_loop()
439 executor = ProcessPoolExecutor(max_workers=os.cpu_count())
441 loop.run_until_complete(
445 write_back=write_back,
454 if verbose or not quiet:
455 bang = "💥 💔 💥" if report.return_code else "✨ 🍰 ✨"
456 out(f"All done! {bang}")
457 click.secho(str(report), err=True)
458 ctx.exit(report.return_code)
462 src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report"
464 """Reformat a single file under `src` without spawning child processes.
466 If `quiet` is True, non-error messages are not output. `line_length`,
467 `write_back`, `fast` and `pyi` options are passed to
468 :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
472 if not src.is_file() and str(src) == "-":
473 if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
474 changed = Changed.YES
477 if write_back != WriteBack.DIFF:
478 cache = read_cache(mode)
479 res_src = src.resolve()
480 if res_src in cache and cache[res_src] == get_cache_info(res_src):
481 changed = Changed.CACHED
482 if changed is not Changed.CACHED and format_file_in_place(
483 src, fast=fast, write_back=write_back, mode=mode
485 changed = Changed.YES
486 if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
487 write_back is WriteBack.CHECK and changed is Changed.NO
489 write_cache(cache, [src], mode)
490 report.done(src, changed)
491 except Exception as exc:
492 report.failed(src, str(exc))
495 async def schedule_formatting(
498 write_back: WriteBack,
504 """Run formatting of `sources` in parallel using the provided `executor`.
506 (Use ProcessPoolExecutors for actual parallelism.)
508 `line_length`, `write_back`, `fast`, and `pyi` options are passed to
509 :func:`format_file_in_place`.
512 if write_back != WriteBack.DIFF:
513 cache = read_cache(mode)
514 sources, cached = filter_cached(cache, sources)
515 for src in sorted(cached):
516 report.done(src, Changed.CACHED)
521 sources_to_cache = []
523 if write_back == WriteBack.DIFF:
524 # For diff output, we need locks to ensure we don't interleave output
525 # from different processes.
527 lock = manager.Lock()
529 asyncio.ensure_future(
530 loop.run_in_executor(
531 executor, format_file_in_place, src, fast, mode, write_back, lock
534 for src in sorted(sources)
536 pending: Iterable[asyncio.Future] = tasks.keys()
538 loop.add_signal_handler(signal.SIGINT, cancel, pending)
539 loop.add_signal_handler(signal.SIGTERM, cancel, pending)
540 except NotImplementedError:
541 # There are no good alternatives for these on Windows.
544 done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
546 src = tasks.pop(task)
548 cancelled.append(task)
549 elif task.exception():
550 report.failed(src, str(task.exception()))
552 changed = Changed.YES if task.result() else Changed.NO
553 # If the file was written back or was successfully checked as
554 # well-formatted, store this information in the cache.
555 if write_back is WriteBack.YES or (
556 write_back is WriteBack.CHECK and changed is Changed.NO
558 sources_to_cache.append(src)
559 report.done(src, changed)
561 await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
563 write_cache(cache, sources_to_cache, mode)
566 def format_file_in_place(
570 write_back: WriteBack = WriteBack.NO,
571 lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
573 """Format file under `src` path. Return True if changed.
575 If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
577 `line_length` and `fast` options are passed to :func:`format_file_contents`.
579 if src.suffix == ".pyi":
580 mode = evolve(mode, is_pyi=True)
582 then = datetime.utcfromtimestamp(src.stat().st_mtime)
583 with open(src, "rb") as buf:
584 src_contents, encoding, newline = decode_bytes(buf.read())
586 dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
587 except NothingChanged:
590 if write_back == write_back.YES:
591 with open(src, "w", encoding=encoding, newline=newline) as f:
592 f.write(dst_contents)
593 elif write_back == write_back.DIFF:
594 now = datetime.utcnow()
595 src_name = f"{src}\t{then} +0000"
596 dst_name = f"{src}\t{now} +0000"
597 diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
601 f = io.TextIOWrapper(
607 f.write(diff_contents)
615 def format_stdin_to_stdout(
616 fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode
618 """Format file on stdin. Return True if changed.
620 If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
621 write a diff to stdout. The `mode` argument is passed to
622 :func:`format_file_contents`.
624 then = datetime.utcnow()
625 src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
628 dst = format_file_contents(src, fast=fast, mode=mode)
631 except NothingChanged:
635 f = io.TextIOWrapper(
636 sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
638 if write_back == WriteBack.YES:
640 elif write_back == WriteBack.DIFF:
641 now = datetime.utcnow()
642 src_name = f"STDIN\t{then} +0000"
643 dst_name = f"STDOUT\t{now} +0000"
644 f.write(diff(src, dst, src_name, dst_name))
648 def format_file_contents(
649 src_contents: str, *, fast: bool, mode: FileMode
651 """Reformat contents a file and return new contents.
653 If `fast` is False, additionally confirm that the reformatted code is
654 valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
655 `line_length` is passed to :func:`format_str`.
657 if src_contents.strip() == "":
660 dst_contents = format_str(src_contents, mode=mode)
661 if src_contents == dst_contents:
665 assert_equivalent(src_contents, dst_contents)
666 assert_stable(src_contents, dst_contents, mode=mode)
670 def format_str(src_contents: str, *, mode: FileMode) -> FileContent:
671 """Reformat a string and return new contents.
673 `line_length` determines how many characters per line are allowed.
675 src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
677 future_imports = get_future_imports(src_node)
678 if mode.target_versions:
679 versions = mode.target_versions
681 versions = detect_target_versions(src_node)
682 normalize_fmt_off(src_node)
683 lines = LineGenerator(
684 remove_u_prefix="unicode_literals" in future_imports
685 or supports_feature(versions, Feature.UNICODE_LITERALS),
687 normalize_strings=mode.string_normalization,
689 elt = EmptyLineTracker(is_pyi=mode.is_pyi)
692 split_line_features = {
694 for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
695 if supports_feature(versions, feature)
697 for current_line in lines.visit(src_node):
698 for _ in range(after):
699 dst_contents += str(empty_line)
700 before, after = elt.maybe_empty_lines(current_line)
701 for _ in range(before):
702 dst_contents += str(empty_line)
703 for line in split_line(
704 current_line, line_length=mode.line_length, features=split_line_features
706 dst_contents += str(line)
710 def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
711 """Return a tuple of (decoded_contents, encoding, newline).
713 `newline` is either CRLF or LF but `decoded_contents` is decoded with
714 universal newlines (i.e. only contains LF).
716 srcbuf = io.BytesIO(src)
717 encoding, lines = tokenize.detect_encoding(srcbuf.readline)
719 return "", encoding, "\n"
721 newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
723 with io.TextIOWrapper(srcbuf, encoding) as tiow:
724 return tiow.read(), encoding, newline
727 def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
728 if not target_versions:
729 # No target_version specified, so try all grammars.
731 pygram.python_grammar_no_print_statement_no_exec_statement,
732 pygram.python_grammar_no_print_statement,
733 pygram.python_grammar,
735 elif all(version.is_python2() for version in target_versions):
736 # Python 2-only code, so try Python 2 grammars.
737 return [pygram.python_grammar_no_print_statement, pygram.python_grammar]
739 # Python 3-compatible code, so only try Python 3 grammar.
740 return [pygram.python_grammar_no_print_statement_no_exec_statement]
743 def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
744 """Given a string with source, return the lib2to3 Node."""
745 if src_txt[-1:] != "\n":
748 for grammar in get_grammars(set(target_versions)):
749 drv = driver.Driver(grammar, pytree.convert)
751 result = drv.parse_string(src_txt, True)
754 except ParseError as pe:
755 lineno, column = pe.context[1]
756 lines = src_txt.splitlines()
758 faulty_line = lines[lineno - 1]
760 faulty_line = "<line number missing in source>"
761 exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
765 if isinstance(result, Leaf):
766 result = Node(syms.file_input, [result])
770 def lib2to3_unparse(node: Node) -> str:
771 """Given a lib2to3 node, return its string representation."""
779 class Visitor(Generic[T]):
780 """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
782 def visit(self, node: LN) -> Iterator[T]:
783 """Main method to visit `node` and its children.
785 It tries to find a `visit_*()` method for the given `node.type`, like
786 `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
787 If no dedicated `visit_*()` method is found, chooses `visit_default()`
790 Then yields objects of type `T` from the selected visitor.
793 name = token.tok_name[node.type]
795 name = type_repr(node.type)
796 yield from getattr(self, f"visit_{name}", self.visit_default)(node)
798 def visit_default(self, node: LN) -> Iterator[T]:
799 """Default `visit_*()` implementation. Recurses to children of `node`."""
800 if isinstance(node, Node):
801 for child in node.children:
802 yield from self.visit(child)
806 class DebugVisitor(Visitor[T]):
809 def visit_default(self, node: LN) -> Iterator[T]:
810 indent = " " * (2 * self.tree_depth)
811 if isinstance(node, Node):
812 _type = type_repr(node.type)
813 out(f"{indent}{_type}", fg="yellow")
815 for child in node.children:
816 yield from self.visit(child)
819 out(f"{indent}/{_type}", fg="yellow", bold=False)
821 _type = token.tok_name.get(node.type, str(node.type))
822 out(f"{indent}{_type}", fg="blue", nl=False)
824 # We don't have to handle prefixes for `Node` objects since
825 # that delegates to the first child anyway.
826 out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
827 out(f" {node.value!r}", fg="blue", bold=False)
830 def show(cls, code: Union[str, Leaf, Node]) -> None:
831 """Pretty-print the lib2to3 AST of a given string of `code`.
833 Convenience method for debugging.
835 v: DebugVisitor[None] = DebugVisitor()
836 if isinstance(code, str):
837 code = lib2to3_parse(code)
841 WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
852 STANDALONE_COMMENT = 153
853 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
854 LOGIC_OPERATORS = {"and", "or"}
879 STARS = {token.STAR, token.DOUBLESTAR}
882 syms.argument, # double star in arglist
883 syms.trailer, # single argument to call
885 syms.varargslist, # lambdas
887 UNPACKING_PARENTS = {
888 syms.atom, # single element of a list or set literal
892 syms.testlist_star_expr,
927 COMPREHENSION_PRIORITY = 20
929 TERNARY_PRIORITY = 16
932 COMPARATOR_PRIORITY = 10
943 token.DOUBLESLASH: 4,
953 class BracketTracker:
954 """Keeps track of brackets on a line."""
957 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
958 delimiters: Dict[LeafID, Priority] = Factory(dict)
959 previous: Optional[Leaf] = None
960 _for_loop_depths: List[int] = Factory(list)
961 _lambda_argument_depths: List[int] = Factory(list)
963 def mark(self, leaf: Leaf) -> None:
964 """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
966 All leaves receive an int `bracket_depth` field that stores how deep
967 within brackets a given leaf is. 0 means there are no enclosing brackets
968 that started on this line.
970 If a leaf is itself a closing bracket, it receives an `opening_bracket`
971 field that it forms a pair with. This is a one-directional link to
972 avoid reference cycles.
974 If a leaf is a delimiter (a token on which Black can split the line if
975 needed) and it's on depth 0, its `id()` is stored in the tracker's
978 if leaf.type == token.COMMENT:
981 self.maybe_decrement_after_for_loop_variable(leaf)
982 self.maybe_decrement_after_lambda_arguments(leaf)
983 if leaf.type in CLOSING_BRACKETS:
985 opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
986 leaf.opening_bracket = opening_bracket
987 leaf.bracket_depth = self.depth
989 delim = is_split_before_delimiter(leaf, self.previous)
990 if delim and self.previous is not None:
991 self.delimiters[id(self.previous)] = delim
993 delim = is_split_after_delimiter(leaf, self.previous)
995 self.delimiters[id(leaf)] = delim
996 if leaf.type in OPENING_BRACKETS:
997 self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
1000 self.maybe_increment_lambda_arguments(leaf)
1001 self.maybe_increment_for_loop_variable(leaf)
1003 def any_open_brackets(self) -> bool:
1004 """Return True if there is an yet unmatched open bracket on the line."""
1005 return bool(self.bracket_match)
1007 def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> int:
1008 """Return the highest priority of a delimiter found on the line.
1010 Values are consistent with what `is_split_*_delimiter()` return.
1011 Raises ValueError on no delimiters.
1013 return max(v for k, v in self.delimiters.items() if k not in exclude)
1015 def delimiter_count_with_priority(self, priority: int = 0) -> int:
1016 """Return the number of delimiters with the given `priority`.
1018 If no `priority` is passed, defaults to max priority on the line.
1020 if not self.delimiters:
1023 priority = priority or self.max_delimiter_priority()
1024 return sum(1 for p in self.delimiters.values() if p == priority)
1026 def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
1027 """In a for loop, or comprehension, the variables are often unpacks.
1029 To avoid splitting on the comma in this situation, increase the depth of
1030 tokens between `for` and `in`.
1032 if leaf.type == token.NAME and leaf.value == "for":
1034 self._for_loop_depths.append(self.depth)
1039 def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
1040 """See `maybe_increment_for_loop_variable` above for explanation."""
1042 self._for_loop_depths
1043 and self._for_loop_depths[-1] == self.depth
1044 and leaf.type == token.NAME
1045 and leaf.value == "in"
1048 self._for_loop_depths.pop()
1053 def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
1054 """In a lambda expression, there might be more than one argument.
1056 To avoid splitting on the comma in this situation, increase the depth of
1057 tokens between `lambda` and `:`.
1059 if leaf.type == token.NAME and leaf.value == "lambda":
1061 self._lambda_argument_depths.append(self.depth)
1066 def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
1067 """See `maybe_increment_lambda_arguments` above for explanation."""
1069 self._lambda_argument_depths
1070 and self._lambda_argument_depths[-1] == self.depth
1071 and leaf.type == token.COLON
1074 self._lambda_argument_depths.pop()
1079 def get_open_lsqb(self) -> Optional[Leaf]:
1080 """Return the most recent opening square bracket (if any)."""
1081 return self.bracket_match.get((self.depth - 1, token.RSQB))
1086 """Holds leaves and comments. Can be printed with `str(line)`."""
1089 leaves: List[Leaf] = Factory(list)
1090 comments: Dict[LeafID, List[Leaf]] = Factory(dict) # keys ordered like `leaves`
1091 bracket_tracker: BracketTracker = Factory(BracketTracker)
1092 inside_brackets: bool = False
1093 should_explode: bool = False
1095 def append(self, leaf: Leaf, preformatted: bool = False) -> None:
1096 """Add a new `leaf` to the end of the line.
1098 Unless `preformatted` is True, the `leaf` will receive a new consistent
1099 whitespace prefix and metadata applied by :class:`BracketTracker`.
1100 Trailing commas are maybe removed, unpacked for loop variables are
1101 demoted from being delimiters.
1103 Inline comments are put aside.
1105 has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
1109 if token.COLON == leaf.type and self.is_class_paren_empty:
1110 del self.leaves[-2:]
1111 if self.leaves and not preformatted:
1112 # Note: at this point leaf.prefix should be empty except for
1113 # imports, for which we only preserve newlines.
1114 leaf.prefix += whitespace(
1115 leaf, complex_subscript=self.is_complex_subscript(leaf)
1117 if self.inside_brackets or not preformatted:
1118 self.bracket_tracker.mark(leaf)
1119 self.maybe_remove_trailing_comma(leaf)
1120 if not self.append_comment(leaf):
1121 self.leaves.append(leaf)
1123 def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
1124 """Like :func:`append()` but disallow invalid standalone comment structure.
1126 Raises ValueError when any `leaf` is appended after a standalone comment
1127 or when a standalone comment is not the first leaf on the line.
1129 if self.bracket_tracker.depth == 0:
1131 raise ValueError("cannot append to standalone comments")
1133 if self.leaves and leaf.type == STANDALONE_COMMENT:
1135 "cannot append standalone comments to a populated line"
1138 self.append(leaf, preformatted=preformatted)
1141 def is_comment(self) -> bool:
1142 """Is this line a standalone comment?"""
1143 return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
1146 def is_decorator(self) -> bool:
1147 """Is this line a decorator?"""
1148 return bool(self) and self.leaves[0].type == token.AT
1151 def is_import(self) -> bool:
1152 """Is this an import line?"""
1153 return bool(self) and is_import(self.leaves[0])
1156 def is_class(self) -> bool:
1157 """Is this line a class definition?"""
1160 and self.leaves[0].type == token.NAME
1161 and self.leaves[0].value == "class"
1165 def is_stub_class(self) -> bool:
1166 """Is this line a class definition with a body consisting only of "..."?"""
1167 return self.is_class and self.leaves[-3:] == [
1168 Leaf(token.DOT, ".") for _ in range(3)
1172 def is_def(self) -> bool:
1173 """Is this a function definition? (Also returns True for async defs.)"""
1175 first_leaf = self.leaves[0]
1180 second_leaf: Optional[Leaf] = self.leaves[1]
1183 return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
1184 first_leaf.type == token.ASYNC
1185 and second_leaf is not None
1186 and second_leaf.type == token.NAME
1187 and second_leaf.value == "def"
1191 def is_class_paren_empty(self) -> bool:
1192 """Is this a class with no base classes but using parentheses?
1194 Those are unnecessary and should be removed.
1198 and len(self.leaves) == 4
1200 and self.leaves[2].type == token.LPAR
1201 and self.leaves[2].value == "("
1202 and self.leaves[3].type == token.RPAR
1203 and self.leaves[3].value == ")"
1207 def is_triple_quoted_string(self) -> bool:
1208 """Is the line a triple quoted string?"""
1211 and self.leaves[0].type == token.STRING
1212 and self.leaves[0].value.startswith(('"""', "'''"))
1215 def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
1216 """If so, needs to be split before emitting."""
1217 for leaf in self.leaves:
1218 if leaf.type == STANDALONE_COMMENT:
1219 if leaf.bracket_depth <= depth_limit:
1223 def contains_inner_type_comments(self) -> bool:
1226 last_leaf = self.leaves[-1]
1227 ignored_ids.add(id(last_leaf))
1228 if last_leaf.type == token.COMMA:
1229 # When trailing commas are inserted by Black for consistency, comments
1230 # after the previous last element are not moved (they don't have to,
1231 # rendering will still be correct). So we ignore trailing commas.
1232 last_leaf = self.leaves[-2]
1233 ignored_ids.add(id(last_leaf))
1237 for leaf_id, comments in self.comments.items():
1238 if leaf_id in ignored_ids:
1241 for comment in comments:
1242 if is_type_comment(comment):
1247 def contains_multiline_strings(self) -> bool:
1248 for leaf in self.leaves:
1249 if is_multiline_string(leaf):
1254 def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
1255 """Remove trailing comma if there is one and it's safe."""
1258 and self.leaves[-1].type == token.COMMA
1259 and closing.type in CLOSING_BRACKETS
1263 if closing.type == token.RBRACE:
1264 self.remove_trailing_comma()
1267 if closing.type == token.RSQB:
1268 comma = self.leaves[-1]
1269 if comma.parent and comma.parent.type == syms.listmaker:
1270 self.remove_trailing_comma()
1273 # For parens let's check if it's safe to remove the comma.
1274 # Imports are always safe.
1276 self.remove_trailing_comma()
1279 # Otherwise, if the trailing one is the only one, we might mistakenly
1280 # change a tuple into a different type by removing the comma.
1281 depth = closing.bracket_depth + 1
1283 opening = closing.opening_bracket
1284 for _opening_index, leaf in enumerate(self.leaves):
1291 for leaf in self.leaves[_opening_index + 1 :]:
1295 bracket_depth = leaf.bracket_depth
1296 if bracket_depth == depth and leaf.type == token.COMMA:
1298 if leaf.parent and leaf.parent.type == syms.arglist:
1303 self.remove_trailing_comma()
1308 def append_comment(self, comment: Leaf) -> bool:
1309 """Add an inline or standalone comment to the line."""
1311 comment.type == STANDALONE_COMMENT
1312 and self.bracket_tracker.any_open_brackets()
1317 if comment.type != token.COMMENT:
1321 comment.type = STANDALONE_COMMENT
1325 self.comments.setdefault(id(self.leaves[-1]), []).append(comment)
1328 def comments_after(self, leaf: Leaf) -> List[Leaf]:
1329 """Generate comments that should appear directly after `leaf`."""
1330 return self.comments.get(id(leaf), [])
1332 def remove_trailing_comma(self) -> None:
1333 """Remove the trailing comma and moves the comments attached to it."""
1334 trailing_comma = self.leaves.pop()
1335 trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
1336 self.comments.setdefault(id(self.leaves[-1]), []).extend(
1337 trailing_comma_comments
1340 def is_complex_subscript(self, leaf: Leaf) -> bool:
1341 """Return True iff `leaf` is part of a slice with non-trivial exprs."""
1342 open_lsqb = self.bracket_tracker.get_open_lsqb()
1343 if open_lsqb is None:
1346 subscript_start = open_lsqb.next_sibling
1348 if isinstance(subscript_start, Node):
1349 if subscript_start.type == syms.listmaker:
1352 if subscript_start.type == syms.subscriptlist:
1353 subscript_start = child_towards(subscript_start, leaf)
1354 return subscript_start is not None and any(
1355 n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
1358 def __str__(self) -> str:
1359 """Render the line."""
1363 indent = " " * self.depth
1364 leaves = iter(self.leaves)
1365 first = next(leaves)
1366 res = f"{first.prefix}{indent}{first.value}"
1369 for comment in itertools.chain.from_iterable(self.comments.values()):
1373 def __bool__(self) -> bool:
1374 """Return True if the line has leaves or comments."""
1375 return bool(self.leaves or self.comments)
1379 class EmptyLineTracker:
1380 """Provides a stateful method that returns the number of potential extra
1381 empty lines needed before and after the currently processed line.
1383 Note: this tracker works on lines that haven't been split yet. It assumes
1384 the prefix of the first leaf consists of optional newlines. Those newlines
1385 are consumed by `maybe_empty_lines()` and included in the computation.
1388 is_pyi: bool = False
1389 previous_line: Optional[Line] = None
1390 previous_after: int = 0
1391 previous_defs: List[int] = Factory(list)
1393 def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1394 """Return the number of extra empty lines before and after the `current_line`.
1396 This is for separating `def`, `async def` and `class` with extra empty
1397 lines (two on module-level).
1399 before, after = self._maybe_empty_lines(current_line)
1400 before -= self.previous_after
1401 self.previous_after = after
1402 self.previous_line = current_line
1403 return before, after
1405 def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1407 if current_line.depth == 0:
1408 max_allowed = 1 if self.is_pyi else 2
1409 if current_line.leaves:
1410 # Consume the first leaf's extra newlines.
1411 first_leaf = current_line.leaves[0]
1412 before = first_leaf.prefix.count("\n")
1413 before = min(before, max_allowed)
1414 first_leaf.prefix = ""
1417 depth = current_line.depth
1418 while self.previous_defs and self.previous_defs[-1] >= depth:
1419 self.previous_defs.pop()
1421 before = 0 if depth else 1
1423 before = 1 if depth else 2
1424 if current_line.is_decorator or current_line.is_def or current_line.is_class:
1425 return self._maybe_empty_lines_for_class_or_def(current_line, before)
1429 and self.previous_line.is_import
1430 and not current_line.is_import
1431 and depth == self.previous_line.depth
1433 return (before or 1), 0
1437 and self.previous_line.is_class
1438 and current_line.is_triple_quoted_string
1444 def _maybe_empty_lines_for_class_or_def(
1445 self, current_line: Line, before: int
1446 ) -> Tuple[int, int]:
1447 if not current_line.is_decorator:
1448 self.previous_defs.append(current_line.depth)
1449 if self.previous_line is None:
1450 # Don't insert empty lines before the first line in the file.
1453 if self.previous_line.is_decorator:
1456 if self.previous_line.depth < current_line.depth and (
1457 self.previous_line.is_class or self.previous_line.is_def
1462 self.previous_line.is_comment
1463 and self.previous_line.depth == current_line.depth
1469 if self.previous_line.depth > current_line.depth:
1471 elif current_line.is_class or self.previous_line.is_class:
1472 if current_line.is_stub_class and self.previous_line.is_stub_class:
1473 # No blank line between classes with an empty body
1477 elif current_line.is_def and not self.previous_line.is_def:
1478 # Blank line between a block of functions and a block of non-functions
1484 if current_line.depth and newlines:
1490 class LineGenerator(Visitor[Line]):
1491 """Generates reformatted Line objects. Empty lines are not emitted.
1493 Note: destroys the tree it's visiting by mutating prefixes of its leaves
1494 in ways that will no longer stringify to valid Python code on the tree.
1497 is_pyi: bool = False
1498 normalize_strings: bool = True
1499 current_line: Line = Factory(Line)
1500 remove_u_prefix: bool = False
1502 def line(self, indent: int = 0) -> Iterator[Line]:
1505 If the line is empty, only emit if it makes sense.
1506 If the line is too long, split it first and then generate.
1508 If any lines were generated, set up a new current_line.
1510 if not self.current_line:
1511 self.current_line.depth += indent
1512 return # Line is empty, don't emit. Creating a new one unnecessary.
1514 complete_line = self.current_line
1515 self.current_line = Line(depth=complete_line.depth + indent)
1518 def visit_default(self, node: LN) -> Iterator[Line]:
1519 """Default `visit_*()` implementation. Recurses to children of `node`."""
1520 if isinstance(node, Leaf):
1521 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
1522 for comment in generate_comments(node):
1523 if any_open_brackets:
1524 # any comment within brackets is subject to splitting
1525 self.current_line.append(comment)
1526 elif comment.type == token.COMMENT:
1527 # regular trailing comment
1528 self.current_line.append(comment)
1529 yield from self.line()
1532 # regular standalone comment
1533 yield from self.line()
1535 self.current_line.append(comment)
1536 yield from self.line()
1538 normalize_prefix(node, inside_brackets=any_open_brackets)
1539 if self.normalize_strings and node.type == token.STRING:
1540 normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
1541 normalize_string_quotes(node)
1542 if node.type == token.NUMBER:
1543 normalize_numeric_literal(node)
1544 if node.type not in WHITESPACE:
1545 self.current_line.append(node)
1546 yield from super().visit_default(node)
1548 def visit_INDENT(self, node: Node) -> Iterator[Line]:
1549 """Increase indentation level, maybe yield a line."""
1550 # In blib2to3 INDENT never holds comments.
1551 yield from self.line(+1)
1552 yield from self.visit_default(node)
1554 def visit_DEDENT(self, node: Node) -> Iterator[Line]:
1555 """Decrease indentation level, maybe yield a line."""
1556 # The current line might still wait for trailing comments. At DEDENT time
1557 # there won't be any (they would be prefixes on the preceding NEWLINE).
1558 # Emit the line then.
1559 yield from self.line()
1561 # While DEDENT has no value, its prefix may contain standalone comments
1562 # that belong to the current indentation level. Get 'em.
1563 yield from self.visit_default(node)
1565 # Finally, emit the dedent.
1566 yield from self.line(-1)
1569 self, node: Node, keywords: Set[str], parens: Set[str]
1570 ) -> Iterator[Line]:
1571 """Visit a statement.
1573 This implementation is shared for `if`, `while`, `for`, `try`, `except`,
1574 `def`, `with`, `class`, `assert` and assignments.
1576 The relevant Python language `keywords` for a given statement will be
1577 NAME leaves within it. This methods puts those on a separate line.
1579 `parens` holds a set of string leaf values immediately after which
1580 invisible parens should be put.
1582 normalize_invisible_parens(node, parens_after=parens)
1583 for child in node.children:
1584 if child.type == token.NAME and child.value in keywords: # type: ignore
1585 yield from self.line()
1587 yield from self.visit(child)
1589 def visit_suite(self, node: Node) -> Iterator[Line]:
1590 """Visit a suite."""
1591 if self.is_pyi and is_stub_suite(node):
1592 yield from self.visit(node.children[2])
1594 yield from self.visit_default(node)
1596 def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
1597 """Visit a statement without nested statements."""
1598 is_suite_like = node.parent and node.parent.type in STATEMENT
1600 if self.is_pyi and is_stub_body(node):
1601 yield from self.visit_default(node)
1603 yield from self.line(+1)
1604 yield from self.visit_default(node)
1605 yield from self.line(-1)
1608 if not self.is_pyi or not node.parent or not is_stub_suite(node.parent):
1609 yield from self.line()
1610 yield from self.visit_default(node)
1612 def visit_async_stmt(self, node: Node) -> Iterator[Line]:
1613 """Visit `async def`, `async for`, `async with`."""
1614 yield from self.line()
1616 children = iter(node.children)
1617 for child in children:
1618 yield from self.visit(child)
1620 if child.type == token.ASYNC:
1623 internal_stmt = next(children)
1624 for child in internal_stmt.children:
1625 yield from self.visit(child)
1627 def visit_decorators(self, node: Node) -> Iterator[Line]:
1628 """Visit decorators."""
1629 for child in node.children:
1630 yield from self.line()
1631 yield from self.visit(child)
1633 def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
1634 """Remove a semicolon and put the other statement on a separate line."""
1635 yield from self.line()
1637 def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
1638 """End of file. Process outstanding comments and end with a newline."""
1639 yield from self.visit_default(leaf)
1640 yield from self.line()
1642 def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
1643 if not self.current_line.bracket_tracker.any_open_brackets():
1644 yield from self.line()
1645 yield from self.visit_default(leaf)
1647 def __attrs_post_init__(self) -> None:
1648 """You are in a twisty little maze of passages."""
1651 self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
1652 self.visit_if_stmt = partial(
1653 v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
1655 self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
1656 self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
1657 self.visit_try_stmt = partial(
1658 v, keywords={"try", "except", "else", "finally"}, parens=Ø
1660 self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
1661 self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
1662 self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
1663 self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
1664 self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
1665 self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
1666 self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
1667 self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
1668 self.visit_async_funcdef = self.visit_async_stmt
1669 self.visit_decorated = self.visit_decorators
1672 IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
1673 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
1674 OPENING_BRACKETS = set(BRACKET.keys())
1675 CLOSING_BRACKETS = set(BRACKET.values())
1676 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
1677 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
1680 def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901
1681 """Return whitespace prefix if needed for the given `leaf`.
1683 `complex_subscript` signals whether the given leaf is part of a subscription
1684 which has non-trivial arguments, like arithmetic expressions or function calls.
1692 if t in ALWAYS_NO_SPACE:
1695 if t == token.COMMENT:
1698 assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
1699 if t == token.COLON and p.type not in {
1706 prev = leaf.prev_sibling
1708 prevp = preceding_leaf(p)
1709 if not prevp or prevp.type in OPENING_BRACKETS:
1712 if t == token.COLON:
1713 if prevp.type == token.COLON:
1716 elif prevp.type != token.COMMA and not complex_subscript:
1721 if prevp.type == token.EQUAL:
1723 if prevp.parent.type in {
1731 elif prevp.parent.type == syms.typedargslist:
1732 # A bit hacky: if the equal sign has whitespace, it means we
1733 # previously found it's a typed argument. So, we're using
1737 elif prevp.type in STARS:
1738 if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
1741 elif prevp.type == token.COLON:
1742 if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
1743 return SPACE if complex_subscript else NO
1747 and prevp.parent.type == syms.factor
1748 and prevp.type in MATH_OPERATORS
1753 prevp.type == token.RIGHTSHIFT
1755 and prevp.parent.type == syms.shift_expr
1756 and prevp.prev_sibling
1757 and prevp.prev_sibling.type == token.NAME
1758 and prevp.prev_sibling.value == "print" # type: ignore
1760 # Python 2 print chevron
1763 elif prev.type in OPENING_BRACKETS:
1766 if p.type in {syms.parameters, syms.arglist}:
1767 # untyped function signatures or calls
1768 if not prev or prev.type != token.COMMA:
1771 elif p.type == syms.varargslist:
1773 if prev and prev.type != token.COMMA:
1776 elif p.type == syms.typedargslist:
1777 # typed function signatures
1781 if t == token.EQUAL:
1782 if prev.type != syms.tname:
1785 elif prev.type == token.EQUAL:
1786 # A bit hacky: if the equal sign has whitespace, it means we
1787 # previously found it's a typed argument. So, we're using that, too.
1790 elif prev.type != token.COMMA:
1793 elif p.type == syms.tname:
1796 prevp = preceding_leaf(p)
1797 if not prevp or prevp.type != token.COMMA:
1800 elif p.type == syms.trailer:
1801 # attributes and calls
1802 if t == token.LPAR or t == token.RPAR:
1807 prevp = preceding_leaf(p)
1808 if not prevp or prevp.type != token.NUMBER:
1811 elif t == token.LSQB:
1814 elif prev.type != token.COMMA:
1817 elif p.type == syms.argument:
1819 if t == token.EQUAL:
1823 prevp = preceding_leaf(p)
1824 if not prevp or prevp.type == token.LPAR:
1827 elif prev.type in {token.EQUAL} | STARS:
1830 elif p.type == syms.decorator:
1834 elif p.type == syms.dotted_name:
1838 prevp = preceding_leaf(p)
1839 if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
1842 elif p.type == syms.classdef:
1846 if prev and prev.type == token.LPAR:
1849 elif p.type in {syms.subscript, syms.sliceop}:
1852 assert p.parent is not None, "subscripts are always parented"
1853 if p.parent.type == syms.subscriptlist:
1858 elif not complex_subscript:
1861 elif p.type == syms.atom:
1862 if prev and t == token.DOT:
1863 # dots, but not the first one.
1866 elif p.type == syms.dictsetmaker:
1868 if prev and prev.type == token.DOUBLESTAR:
1871 elif p.type in {syms.factor, syms.star_expr}:
1874 prevp = preceding_leaf(p)
1875 if not prevp or prevp.type in OPENING_BRACKETS:
1878 prevp_parent = prevp.parent
1879 assert prevp_parent is not None
1880 if prevp.type == token.COLON and prevp_parent.type in {
1886 elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
1889 elif t in {token.NAME, token.NUMBER, token.STRING}:
1892 elif p.type == syms.import_from:
1894 if prev and prev.type == token.DOT:
1897 elif t == token.NAME:
1901 if prev and prev.type == token.DOT:
1904 elif p.type == syms.sliceop:
1910 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
1911 """Return the first leaf that precedes `node`, if any."""
1913 res = node.prev_sibling
1915 if isinstance(res, Leaf):
1919 return list(res.leaves())[-1]
1928 def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
1929 """Return the child of `ancestor` that contains `descendant`."""
1930 node: Optional[LN] = descendant
1931 while node and node.parent != ancestor:
1936 def container_of(leaf: Leaf) -> LN:
1937 """Return `leaf` or one of its ancestors that is the topmost container of it.
1939 By "container" we mean a node where `leaf` is the very first child.
1941 same_prefix = leaf.prefix
1942 container: LN = leaf
1944 parent = container.parent
1948 if parent.children[0].prefix != same_prefix:
1951 if parent.type == syms.file_input:
1954 if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
1961 def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> int:
1962 """Return the priority of the `leaf` delimiter, given a line break after it.
1964 The delimiter priorities returned here are from those delimiters that would
1965 cause a line break after themselves.
1967 Higher numbers are higher priority.
1969 if leaf.type == token.COMMA:
1970 return COMMA_PRIORITY
1975 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> int:
1976 """Return the priority of the `leaf` delimiter, given a line break before it.
1978 The delimiter priorities returned here are from those delimiters that would
1979 cause a line break before themselves.
1981 Higher numbers are higher priority.
1983 if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
1984 # * and ** might also be MATH_OPERATORS but in this case they are not.
1985 # Don't treat them as a delimiter.
1989 leaf.type == token.DOT
1991 and leaf.parent.type not in {syms.import_from, syms.dotted_name}
1992 and (previous is None or previous.type in CLOSING_BRACKETS)
1997 leaf.type in MATH_OPERATORS
1999 and leaf.parent.type not in {syms.factor, syms.star_expr}
2001 return MATH_PRIORITIES[leaf.type]
2003 if leaf.type in COMPARATORS:
2004 return COMPARATOR_PRIORITY
2007 leaf.type == token.STRING
2008 and previous is not None
2009 and previous.type == token.STRING
2011 return STRING_PRIORITY
2013 if leaf.type not in {token.NAME, token.ASYNC}:
2019 and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
2020 or leaf.type == token.ASYNC
2023 not isinstance(leaf.prev_sibling, Leaf)
2024 or leaf.prev_sibling.value != "async"
2026 return COMPREHENSION_PRIORITY
2031 and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
2033 return COMPREHENSION_PRIORITY
2035 if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
2036 return TERNARY_PRIORITY
2038 if leaf.value == "is":
2039 return COMPARATOR_PRIORITY
2044 and leaf.parent.type in {syms.comp_op, syms.comparison}
2046 previous is not None
2047 and previous.type == token.NAME
2048 and previous.value == "not"
2051 return COMPARATOR_PRIORITY
2056 and leaf.parent.type == syms.comp_op
2058 previous is not None
2059 and previous.type == token.NAME
2060 and previous.value == "is"
2063 return COMPARATOR_PRIORITY
2065 if leaf.value in LOGIC_OPERATORS and leaf.parent:
2066 return LOGIC_PRIORITY
2071 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
2072 FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
2075 def generate_comments(leaf: LN) -> Iterator[Leaf]:
2076 """Clean the prefix of the `leaf` and generate comments from it, if any.
2078 Comments in lib2to3 are shoved into the whitespace prefix. This happens
2079 in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
2080 move because it does away with modifying the grammar to include all the
2081 possible places in which comments can be placed.
2083 The sad consequence for us though is that comments don't "belong" anywhere.
2084 This is why this function generates simple parentless Leaf objects for
2085 comments. We simply don't know what the correct parent should be.
2087 No matter though, we can live without this. We really only need to
2088 differentiate between inline and standalone comments. The latter don't
2089 share the line with any code.
2091 Inline comments are emitted as regular token.COMMENT leaves. Standalone
2092 are emitted with a fake STANDALONE_COMMENT token identifier.
2094 for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
2095 yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
2100 """Describes a piece of syntax that is a comment.
2102 It's not a :class:`blib2to3.pytree.Leaf` so that:
2104 * it can be cached (`Leaf` objects should not be reused more than once as
2105 they store their lineno, column, prefix, and parent information);
2106 * `newlines` and `consumed` fields are kept separate from the `value`. This
2107 simplifies handling of special marker comments like ``# fmt: off/on``.
2110 type: int # token.COMMENT or STANDALONE_COMMENT
2111 value: str # content of the comment
2112 newlines: int # how many newlines before the comment
2113 consumed: int # how many characters of the original leaf's prefix did we consume
2116 @lru_cache(maxsize=4096)
2117 def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
2118 """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
2119 result: List[ProtoComment] = []
2120 if not prefix or "#" not in prefix:
2125 for index, line in enumerate(prefix.split("\n")):
2126 consumed += len(line) + 1 # adding the length of the split '\n'
2127 line = line.lstrip()
2130 if not line.startswith("#"):
2133 if index == 0 and not is_endmarker:
2134 comment_type = token.COMMENT # simple trailing comment
2136 comment_type = STANDALONE_COMMENT
2137 comment = make_comment(line)
2140 type=comment_type, value=comment, newlines=nlines, consumed=consumed
2147 def make_comment(content: str) -> str:
2148 """Return a consistently formatted comment from the given `content` string.
2150 All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
2151 space between the hash sign and the content.
2153 If `content` didn't start with a hash sign, one is provided.
2155 content = content.rstrip()
2159 if content[0] == "#":
2160 content = content[1:]
2161 if content and content[0] not in " !:#'%":
2162 content = " " + content
2163 return "#" + content
2169 inner: bool = False,
2170 features: Collection[Feature] = (),
2171 ) -> Iterator[Line]:
2172 """Split a `line` into potentially many lines.
2174 They should fit in the allotted `line_length` but might not be able to.
2175 `inner` signifies that there were a pair of brackets somewhere around the
2176 current `line`, possibly transitively. This means we can fallback to splitting
2177 by delimiters if the LHS/RHS don't yield any results.
2179 `features` are syntactical features that may be used in the output.
2185 line_str = str(line).strip("\n")
2188 not line.contains_inner_type_comments()
2189 and not line.should_explode
2190 and is_line_short_enough(line, line_length=line_length, line_str=line_str)
2195 split_funcs: List[SplitFunc]
2197 split_funcs = [left_hand_split]
2200 def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
2201 for omit in generate_trailers_to_omit(line, line_length):
2202 lines = list(right_hand_split(line, line_length, features, omit=omit))
2203 if is_line_short_enough(lines[0], line_length=line_length):
2207 # All splits failed, best effort split with no omits.
2208 # This mostly happens to multiline strings that are by definition
2209 # reported as not fitting a single line.
2210 yield from right_hand_split(line, line_length, features=features)
2212 if line.inside_brackets:
2213 split_funcs = [delimiter_split, standalone_comment_split, rhs]
2216 for split_func in split_funcs:
2217 # We are accumulating lines in `result` because we might want to abort
2218 # mission and return the original line in the end, or attempt a different
2220 result: List[Line] = []
2222 for l in split_func(line, features):
2223 if str(l).strip("\n") == line_str:
2224 raise CannotSplit("Split function returned an unchanged result")
2228 l, line_length=line_length, inner=True, features=features
2242 def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2243 """Split line into many lines, starting with the first matching bracket pair.
2245 Note: this usually looks weird, only use this for function definitions.
2246 Prefer RHS otherwise. This is why this function is not symmetrical with
2247 :func:`right_hand_split` which also handles optional parentheses.
2249 tail_leaves: List[Leaf] = []
2250 body_leaves: List[Leaf] = []
2251 head_leaves: List[Leaf] = []
2252 current_leaves = head_leaves
2253 matching_bracket = None
2254 for leaf in line.leaves:
2256 current_leaves is body_leaves
2257 and leaf.type in CLOSING_BRACKETS
2258 and leaf.opening_bracket is matching_bracket
2260 current_leaves = tail_leaves if body_leaves else head_leaves
2261 current_leaves.append(leaf)
2262 if current_leaves is head_leaves:
2263 if leaf.type in OPENING_BRACKETS:
2264 matching_bracket = leaf
2265 current_leaves = body_leaves
2266 if not matching_bracket:
2267 raise CannotSplit("No brackets found")
2269 head = bracket_split_build_line(head_leaves, line, matching_bracket)
2270 body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
2271 tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
2272 bracket_split_succeeded_or_raise(head, body, tail)
2273 for result in (head, body, tail):
2278 def right_hand_split(
2281 features: Collection[Feature] = (),
2282 omit: Collection[LeafID] = (),
2283 ) -> Iterator[Line]:
2284 """Split line into many lines, starting with the last matching bracket pair.
2286 If the split was by optional parentheses, attempt splitting without them, too.
2287 `omit` is a collection of closing bracket IDs that shouldn't be considered for
2290 Note: running this function modifies `bracket_depth` on the leaves of `line`.
2292 tail_leaves: List[Leaf] = []
2293 body_leaves: List[Leaf] = []
2294 head_leaves: List[Leaf] = []
2295 current_leaves = tail_leaves
2296 opening_bracket = None
2297 closing_bracket = None
2298 for leaf in reversed(line.leaves):
2299 if current_leaves is body_leaves:
2300 if leaf is opening_bracket:
2301 current_leaves = head_leaves if body_leaves else tail_leaves
2302 current_leaves.append(leaf)
2303 if current_leaves is tail_leaves:
2304 if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
2305 opening_bracket = leaf.opening_bracket
2306 closing_bracket = leaf
2307 current_leaves = body_leaves
2308 if not (opening_bracket and closing_bracket and head_leaves):
2309 # If there is no opening or closing_bracket that means the split failed and
2310 # all content is in the tail. Otherwise, if `head_leaves` are empty, it means
2311 # the matching `opening_bracket` wasn't available on `line` anymore.
2312 raise CannotSplit("No brackets found")
2314 tail_leaves.reverse()
2315 body_leaves.reverse()
2316 head_leaves.reverse()
2317 head = bracket_split_build_line(head_leaves, line, opening_bracket)
2318 body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
2319 tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
2320 bracket_split_succeeded_or_raise(head, body, tail)
2322 # the body shouldn't be exploded
2323 not body.should_explode
2324 # the opening bracket is an optional paren
2325 and opening_bracket.type == token.LPAR
2326 and not opening_bracket.value
2327 # the closing bracket is an optional paren
2328 and closing_bracket.type == token.RPAR
2329 and not closing_bracket.value
2330 # it's not an import (optional parens are the only thing we can split on
2331 # in this case; attempting a split without them is a waste of time)
2332 and not line.is_import
2333 # there are no standalone comments in the body
2334 and not body.contains_standalone_comments(0)
2335 # and we can actually remove the parens
2336 and can_omit_invisible_parens(body, line_length)
2338 omit = {id(closing_bracket), *omit}
2340 yield from right_hand_split(line, line_length, features=features, omit=omit)
2346 or is_line_short_enough(body, line_length=line_length)
2349 "Splitting failed, body is still too long and can't be split."
2352 elif head.contains_multiline_strings() or tail.contains_multiline_strings():
2354 "The current optional pair of parentheses is bound to fail to "
2355 "satisfy the splitting algorithm because the head or the tail "
2356 "contains multiline strings which by definition never fit one "
2360 ensure_visible(opening_bracket)
2361 ensure_visible(closing_bracket)
2362 for result in (head, body, tail):
2367 def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
2368 """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
2370 Do nothing otherwise.
2372 A left- or right-hand split is based on a pair of brackets. Content before
2373 (and including) the opening bracket is left on one line, content inside the
2374 brackets is put on a separate line, and finally content starting with and
2375 following the closing bracket is put on a separate line.
2377 Those are called `head`, `body`, and `tail`, respectively. If the split
2378 produced the same line (all content in `head`) or ended up with an empty `body`
2379 and the `tail` is just the closing bracket, then it's considered failed.
2381 tail_len = len(str(tail).strip())
2384 raise CannotSplit("Splitting brackets produced the same line")
2388 f"Splitting brackets on an empty body to save "
2389 f"{tail_len} characters is not worth it"
2393 def bracket_split_build_line(
2394 leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
2396 """Return a new line with given `leaves` and respective comments from `original`.
2398 If `is_body` is True, the result line is one-indented inside brackets and as such
2399 has its first leaf's prefix normalized and a trailing comma added when expected.
2401 result = Line(depth=original.depth)
2403 result.inside_brackets = True
2406 # Since body is a new indent level, remove spurious leading whitespace.
2407 normalize_prefix(leaves[0], inside_brackets=True)
2408 # Ensure a trailing comma for imports, but be careful not to add one after
2410 if original.is_import:
2411 for i in range(len(leaves) - 1, -1, -1):
2412 if leaves[i].type == STANDALONE_COMMENT:
2414 elif leaves[i].type == token.COMMA:
2417 leaves.insert(i + 1, Leaf(token.COMMA, ","))
2421 result.append(leaf, preformatted=True)
2422 for comment_after in original.comments_after(leaf):
2423 result.append(comment_after, preformatted=True)
2425 result.should_explode = should_explode(result, opening_bracket)
2429 def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc:
2430 """Normalize prefix of the first leaf in every line returned by `split_func`.
2432 This is a decorator over relevant split functions.
2436 def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2437 for l in split_func(line, features):
2438 normalize_prefix(l.leaves[0], inside_brackets=True)
2441 return split_wrapper
2444 @dont_increase_indentation
2445 def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
2446 """Split according to delimiters of the highest priority.
2448 If the appropriate Features are given, the split will add trailing commas
2449 also in function signatures and calls that contain `*` and `**`.
2452 last_leaf = line.leaves[-1]
2454 raise CannotSplit("Line empty")
2456 bt = line.bracket_tracker
2458 delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
2460 raise CannotSplit("No delimiters found")
2462 if delimiter_priority == DOT_PRIORITY:
2463 if bt.delimiter_count_with_priority(delimiter_priority) == 1:
2464 raise CannotSplit("Splitting a single attribute from its owner looks wrong")
2466 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2467 lowest_depth = sys.maxsize
2468 trailing_comma_safe = True
2470 def append_to_line(leaf: Leaf) -> Iterator[Line]:
2471 """Append `leaf` to current line or to new line if appending impossible."""
2472 nonlocal current_line
2474 current_line.append_safe(leaf, preformatted=True)
2478 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2479 current_line.append(leaf)
2481 for leaf in line.leaves:
2482 yield from append_to_line(leaf)
2484 for comment_after in line.comments_after(leaf):
2485 yield from append_to_line(comment_after)
2487 lowest_depth = min(lowest_depth, leaf.bracket_depth)
2488 if leaf.bracket_depth == lowest_depth:
2489 if is_vararg(leaf, within={syms.typedargslist}):
2490 trailing_comma_safe = (
2491 trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
2493 elif is_vararg(leaf, within={syms.arglist, syms.argument}):
2494 trailing_comma_safe = (
2495 trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
2498 leaf_priority = bt.delimiters.get(id(leaf))
2499 if leaf_priority == delimiter_priority:
2502 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2506 and delimiter_priority == COMMA_PRIORITY
2507 and current_line.leaves[-1].type != token.COMMA
2508 and current_line.leaves[-1].type != STANDALONE_COMMENT
2510 current_line.append(Leaf(token.COMMA, ","))
2514 @dont_increase_indentation
2515 def standalone_comment_split(
2516 line: Line, features: Collection[Feature] = ()
2517 ) -> Iterator[Line]:
2518 """Split standalone comments from the rest of the line."""
2519 if not line.contains_standalone_comments(0):
2520 raise CannotSplit("Line does not have any standalone comments")
2522 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2524 def append_to_line(leaf: Leaf) -> Iterator[Line]:
2525 """Append `leaf` to current line or to new line if appending impossible."""
2526 nonlocal current_line
2528 current_line.append_safe(leaf, preformatted=True)
2532 current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
2533 current_line.append(leaf)
2535 for leaf in line.leaves:
2536 yield from append_to_line(leaf)
2538 for comment_after in line.comments_after(leaf):
2539 yield from append_to_line(comment_after)
2545 def is_import(leaf: Leaf) -> bool:
2546 """Return True if the given leaf starts an import statement."""
2553 (v == "import" and p and p.type == syms.import_name)
2554 or (v == "from" and p and p.type == syms.import_from)
2559 def is_type_comment(leaf: Leaf) -> bool:
2560 """Return True if the given leaf is a special comment.
2561 Only returns true for type comments for now."""
2564 return t in {token.COMMENT, t == STANDALONE_COMMENT} and v.startswith("# type:")
2567 def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
2568 """Leave existing extra newlines if not `inside_brackets`. Remove everything
2571 Note: don't use backslashes for formatting or you'll lose your voting rights.
2573 if not inside_brackets:
2574 spl = leaf.prefix.split("#")
2575 if "\\" not in spl[0]:
2576 nl_count = spl[-1].count("\n")
2579 leaf.prefix = "\n" * nl_count
2585 def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
2586 """Make all string prefixes lowercase.
2588 If remove_u_prefix is given, also removes any u prefix from the string.
2590 Note: Mutates its argument.
2592 match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL)
2593 assert match is not None, f"failed to match string {leaf.value!r}"
2594 orig_prefix = match.group(1)
2595 new_prefix = orig_prefix.lower()
2597 new_prefix = new_prefix.replace("u", "")
2598 leaf.value = f"{new_prefix}{match.group(2)}"
2601 def normalize_string_quotes(leaf: Leaf) -> None:
2602 """Prefer double quotes but only if it doesn't cause more escaping.
2604 Adds or removes backslashes as appropriate. Doesn't parse and fix
2605 strings nested in f-strings (yet).
2607 Note: Mutates its argument.
2609 value = leaf.value.lstrip("furbFURB")
2610 if value[:3] == '"""':
2613 elif value[:3] == "'''":
2616 elif value[0] == '"':
2622 first_quote_pos = leaf.value.find(orig_quote)
2623 if first_quote_pos == -1:
2624 return # There's an internal error
2626 prefix = leaf.value[:first_quote_pos]
2627 unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
2628 escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
2629 escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
2630 body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
2631 if "r" in prefix.casefold():
2632 if unescaped_new_quote.search(body):
2633 # There's at least one unescaped new_quote in this raw string
2634 # so converting is impossible
2637 # Do not introduce or remove backslashes in raw strings
2640 # remove unnecessary escapes
2641 new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
2642 if body != new_body:
2643 # Consider the string without unnecessary escapes as the original
2645 leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
2646 new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
2647 new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
2648 if "f" in prefix.casefold():
2649 matches = re.findall(r"[^{]\{(.*?)\}[^}]", new_body)
2652 # Do not introduce backslashes in interpolated expressions
2654 if new_quote == '"""' and new_body[-1:] == '"':
2656 new_body = new_body[:-1] + '\\"'
2657 orig_escape_count = body.count("\\")
2658 new_escape_count = new_body.count("\\")
2659 if new_escape_count > orig_escape_count:
2660 return # Do not introduce more escaping
2662 if new_escape_count == orig_escape_count and orig_quote == '"':
2663 return # Prefer double quotes
2665 leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
2668 def normalize_numeric_literal(leaf: Leaf) -> None:
2669 """Normalizes numeric (float, int, and complex) literals.
2671 All letters used in the representation are normalized to lowercase (except
2672 in Python 2 long literals).
2674 text = leaf.value.lower()
2675 if text.startswith(("0o", "0b")):
2676 # Leave octal and binary literals alone.
2678 elif text.startswith("0x"):
2679 # Change hex literals to upper case.
2680 before, after = text[:2], text[2:]
2681 text = f"{before}{after.upper()}"
2683 before, after = text.split("e")
2685 if after.startswith("-"):
2688 elif after.startswith("+"):
2690 before = format_float_or_int_string(before)
2691 text = f"{before}e{sign}{after}"
2692 elif text.endswith(("j", "l")):
2695 # Capitalize in "2L" because "l" looks too similar to "1".
2698 text = f"{format_float_or_int_string(number)}{suffix}"
2700 text = format_float_or_int_string(text)
2704 def format_float_or_int_string(text: str) -> str:
2705 """Formats a float string like "1.0"."""
2709 before, after = text.split(".")
2710 return f"{before or 0}.{after or 0}"
2713 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
2714 """Make existing optional parentheses invisible or create new ones.
2716 `parens_after` is a set of string leaf values immeditely after which parens
2719 Standardizes on visible parentheses for single-element tuples, and keeps
2720 existing visible parentheses for other tuples and generator expressions.
2722 for pc in list_comments(node.prefix, is_endmarker=False):
2723 if pc.value in FMT_OFF:
2724 # This `node` has a prefix with `# fmt: off`, don't mess with parens.
2728 for index, child in enumerate(list(node.children)):
2729 # Add parentheses around long tuple unpacking in assignments.
2732 and isinstance(child, Node)
2733 and child.type == syms.testlist_star_expr
2738 if child.type == syms.atom:
2739 if maybe_make_parens_invisible_in_atom(child, parent=node):
2740 lpar = Leaf(token.LPAR, "")
2741 rpar = Leaf(token.RPAR, "")
2742 index = child.remove() or 0
2743 node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
2744 elif is_one_tuple(child):
2745 # wrap child in visible parentheses
2746 lpar = Leaf(token.LPAR, "(")
2747 rpar = Leaf(token.RPAR, ")")
2749 node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
2750 elif node.type == syms.import_from:
2751 # "import from" nodes store parentheses directly as part of
2753 if child.type == token.LPAR:
2754 # make parentheses invisible
2755 child.value = "" # type: ignore
2756 node.children[-1].value = "" # type: ignore
2757 elif child.type != token.STAR:
2758 # insert invisible parentheses
2759 node.insert_child(index, Leaf(token.LPAR, ""))
2760 node.append_child(Leaf(token.RPAR, ""))
2763 elif not (isinstance(child, Leaf) and is_multiline_string(child)):
2764 # wrap child in invisible parentheses
2765 lpar = Leaf(token.LPAR, "")
2766 rpar = Leaf(token.RPAR, "")
2767 index = child.remove() or 0
2768 prefix = child.prefix
2770 new_child = Node(syms.atom, [lpar, child, rpar])
2771 new_child.prefix = prefix
2772 node.insert_child(index, new_child)
2774 check_lpar = isinstance(child, Leaf) and child.value in parens_after
2777 def normalize_fmt_off(node: Node) -> None:
2778 """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
2781 try_again = convert_one_fmt_off_pair(node)
2784 def convert_one_fmt_off_pair(node: Node) -> bool:
2785 """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
2787 Returns True if a pair was converted.
2789 for leaf in node.leaves():
2790 previous_consumed = 0
2791 for comment in list_comments(leaf.prefix, is_endmarker=False):
2792 if comment.value in FMT_OFF:
2793 # We only want standalone comments. If there's no previous leaf or
2794 # the previous leaf is indentation, it's a standalone comment in
2796 if comment.type != STANDALONE_COMMENT:
2797 prev = preceding_leaf(leaf)
2798 if prev and prev.type not in WHITESPACE:
2801 ignored_nodes = list(generate_ignored_nodes(leaf))
2802 if not ignored_nodes:
2805 first = ignored_nodes[0] # Can be a container node with the `leaf`.
2806 parent = first.parent
2807 prefix = first.prefix
2808 first.prefix = prefix[comment.consumed :]
2810 comment.value + "\n" + "".join(str(n) for n in ignored_nodes)
2812 if hidden_value.endswith("\n"):
2813 # That happens when one of the `ignored_nodes` ended with a NEWLINE
2814 # leaf (possibly followed by a DEDENT).
2815 hidden_value = hidden_value[:-1]
2817 for ignored in ignored_nodes:
2818 index = ignored.remove()
2819 if first_idx is None:
2821 assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
2822 assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
2823 parent.insert_child(
2828 prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
2833 previous_consumed = comment.consumed
2838 def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]:
2839 """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
2841 Stops at the end of the block.
2843 container: Optional[LN] = container_of(leaf)
2844 while container is not None and container.type != token.ENDMARKER:
2845 for comment in list_comments(container.prefix, is_endmarker=False):
2846 if comment.value in FMT_ON:
2851 container = container.next_sibling
2854 def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
2855 """If it's safe, make the parens in the atom `node` invisible, recursively.
2857 Returns whether the node should itself be wrapped in invisible parentheses.
2861 node.type != syms.atom
2862 or is_empty_tuple(node)
2863 or is_one_tuple(node)
2864 or (is_yield(node) and parent.type != syms.expr_stmt)
2865 or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
2869 first = node.children[0]
2870 last = node.children[-1]
2871 if first.type == token.LPAR and last.type == token.RPAR:
2872 # make parentheses invisible
2873 first.value = "" # type: ignore
2874 last.value = "" # type: ignore
2875 if len(node.children) > 1:
2876 maybe_make_parens_invisible_in_atom(node.children[1], parent=parent)
2882 def is_empty_tuple(node: LN) -> bool:
2883 """Return True if `node` holds an empty tuple."""
2885 node.type == syms.atom
2886 and len(node.children) == 2
2887 and node.children[0].type == token.LPAR
2888 and node.children[1].type == token.RPAR
2892 def is_one_tuple(node: LN) -> bool:
2893 """Return True if `node` holds a tuple with one element, with or without parens."""
2894 if node.type == syms.atom:
2895 if len(node.children) != 3:
2898 lpar, gexp, rpar = node.children
2900 lpar.type == token.LPAR
2901 and gexp.type == syms.testlist_gexp
2902 and rpar.type == token.RPAR
2906 return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
2909 node.type in IMPLICIT_TUPLE
2910 and len(node.children) == 2
2911 and node.children[1].type == token.COMMA
2915 def is_yield(node: LN) -> bool:
2916 """Return True if `node` holds a `yield` or `yield from` expression."""
2917 if node.type == syms.yield_expr:
2920 if node.type == token.NAME and node.value == "yield": # type: ignore
2923 if node.type != syms.atom:
2926 if len(node.children) != 3:
2929 lpar, expr, rpar = node.children
2930 if lpar.type == token.LPAR and rpar.type == token.RPAR:
2931 return is_yield(expr)
2936 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
2937 """Return True if `leaf` is a star or double star in a vararg or kwarg.
2939 If `within` includes VARARGS_PARENTS, this applies to function signatures.
2940 If `within` includes UNPACKING_PARENTS, it applies to right hand-side
2941 extended iterable unpacking (PEP 3132) and additional unpacking
2942 generalizations (PEP 448).
2944 if leaf.type not in STARS or not leaf.parent:
2948 if p.type == syms.star_expr:
2949 # Star expressions are also used as assignment targets in extended
2950 # iterable unpacking (PEP 3132). See what its parent is instead.
2956 return p.type in within
2959 def is_multiline_string(leaf: Leaf) -> bool:
2960 """Return True if `leaf` is a multiline string that actually spans many lines."""
2961 value = leaf.value.lstrip("furbFURB")
2962 return value[:3] in {'"""', "'''"} and "\n" in value
2965 def is_stub_suite(node: Node) -> bool:
2966 """Return True if `node` is a suite with a stub body."""
2968 len(node.children) != 4
2969 or node.children[0].type != token.NEWLINE
2970 or node.children[1].type != token.INDENT
2971 or node.children[3].type != token.DEDENT
2975 return is_stub_body(node.children[2])
2978 def is_stub_body(node: LN) -> bool:
2979 """Return True if `node` is a simple statement containing an ellipsis."""
2980 if not isinstance(node, Node) or node.type != syms.simple_stmt:
2983 if len(node.children) != 2:
2986 child = node.children[0]
2988 child.type == syms.atom
2989 and len(child.children) == 3
2990 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
2994 def max_delimiter_priority_in_atom(node: LN) -> int:
2995 """Return maximum delimiter priority inside `node`.
2997 This is specific to atoms with contents contained in a pair of parentheses.
2998 If `node` isn't an atom or there are no enclosing parentheses, returns 0.
3000 if node.type != syms.atom:
3003 first = node.children[0]
3004 last = node.children[-1]
3005 if not (first.type == token.LPAR and last.type == token.RPAR):
3008 bt = BracketTracker()
3009 for c in node.children[1:-1]:
3010 if isinstance(c, Leaf):
3013 for leaf in c.leaves():
3016 return bt.max_delimiter_priority()
3022 def ensure_visible(leaf: Leaf) -> None:
3023 """Make sure parentheses are visible.
3025 They could be invisible as part of some statements (see
3026 :func:`normalize_invible_parens` and :func:`visit_import_from`).
3028 if leaf.type == token.LPAR:
3030 elif leaf.type == token.RPAR:
3034 def should_explode(line: Line, opening_bracket: Leaf) -> bool:
3035 """Should `line` immediately be split with `delimiter_split()` after RHS?"""
3038 opening_bracket.parent
3039 and opening_bracket.parent.type in {syms.atom, syms.import_from}
3040 and opening_bracket.value in "[{("
3045 last_leaf = line.leaves[-1]
3046 exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set()
3047 max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
3048 except (IndexError, ValueError):
3051 return max_priority == COMMA_PRIORITY
3054 def get_features_used(node: Node) -> Set[Feature]:
3055 """Return a set of (relatively) new Python features used in this file.
3057 Currently looking for:
3059 - underscores in numeric literals; and
3060 - trailing commas after * or ** in function signatures and calls.
3062 features: Set[Feature] = set()
3063 for n in node.pre_order():
3064 if n.type == token.STRING:
3065 value_head = n.value[:2] # type: ignore
3066 if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
3067 features.add(Feature.F_STRINGS)
3069 elif n.type == token.NUMBER:
3070 if "_" in n.value: # type: ignore
3071 features.add(Feature.NUMERIC_UNDERSCORES)
3074 n.type in {syms.typedargslist, syms.arglist}
3076 and n.children[-1].type == token.COMMA
3078 if n.type == syms.typedargslist:
3079 feature = Feature.TRAILING_COMMA_IN_DEF
3081 feature = Feature.TRAILING_COMMA_IN_CALL
3083 for ch in n.children:
3084 if ch.type in STARS:
3085 features.add(feature)
3087 if ch.type == syms.argument:
3088 for argch in ch.children:
3089 if argch.type in STARS:
3090 features.add(feature)
3095 def detect_target_versions(node: Node) -> Set[TargetVersion]:
3096 """Detect the version to target based on the nodes used."""
3097 features = get_features_used(node)
3099 version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
3103 def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
3104 """Generate sets of closing bracket IDs that should be omitted in a RHS.
3106 Brackets can be omitted if the entire trailer up to and including
3107 a preceding closing bracket fits in one line.
3109 Yielded sets are cumulative (contain results of previous yields, too). First
3113 omit: Set[LeafID] = set()
3116 length = 4 * line.depth
3117 opening_bracket = None
3118 closing_bracket = None
3119 inner_brackets: Set[LeafID] = set()
3120 for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
3121 length += leaf_length
3122 if length > line_length:
3125 has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
3126 if leaf.type == STANDALONE_COMMENT or has_inline_comment:
3130 if leaf is opening_bracket:
3131 opening_bracket = None
3132 elif leaf.type in CLOSING_BRACKETS:
3133 inner_brackets.add(id(leaf))
3134 elif leaf.type in CLOSING_BRACKETS:
3135 if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS:
3136 # Empty brackets would fail a split so treat them as "inner"
3137 # brackets (e.g. only add them to the `omit` set if another
3138 # pair of brackets was good enough.
3139 inner_brackets.add(id(leaf))
3143 omit.add(id(closing_bracket))
3144 omit.update(inner_brackets)
3145 inner_brackets.clear()
3149 opening_bracket = leaf.opening_bracket
3150 closing_bracket = leaf
3153 def get_future_imports(node: Node) -> Set[str]:
3154 """Return a set of __future__ imports in the file."""
3155 imports: Set[str] = set()
3157 def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
3158 for child in children:
3159 if isinstance(child, Leaf):
3160 if child.type == token.NAME:
3162 elif child.type == syms.import_as_name:
3163 orig_name = child.children[0]
3164 assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
3165 assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
3166 yield orig_name.value
3167 elif child.type == syms.import_as_names:
3168 yield from get_imports_from_children(child.children)
3170 raise AssertionError("Invalid syntax parsing imports")
3172 for child in node.children:
3173 if child.type != syms.simple_stmt:
3175 first_child = child.children[0]
3176 if isinstance(first_child, Leaf):
3177 # Continue looking if we see a docstring; otherwise stop.
3179 len(child.children) == 2
3180 and first_child.type == token.STRING
3181 and child.children[1].type == token.NEWLINE
3186 elif first_child.type == syms.import_from:
3187 module_name = first_child.children[1]
3188 if not isinstance(module_name, Leaf) or module_name.value != "__future__":
3190 imports |= set(get_imports_from_children(first_child.children[3:]))
3196 def gen_python_files_in_dir(
3199 include: Pattern[str],
3200 exclude: Pattern[str],
3202 ) -> Iterator[Path]:
3203 """Generate all files under `path` whose paths are not excluded by the
3204 `exclude` regex, but are included by the `include` regex.
3206 Symbolic links pointing outside of the `root` directory are ignored.
3208 `report` is where output about exclusions goes.
3210 assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
3211 for child in path.iterdir():
3213 normalized_path = "/" + child.resolve().relative_to(root).as_posix()
3215 if child.is_symlink():
3216 report.path_ignored(
3217 child, f"is a symbolic link that points outside {root}"
3224 normalized_path += "/"
3225 exclude_match = exclude.search(normalized_path)
3226 if exclude_match and exclude_match.group(0):
3227 report.path_ignored(child, f"matches the --exclude regular expression")
3231 yield from gen_python_files_in_dir(child, root, include, exclude, report)
3233 elif child.is_file():
3234 include_match = include.search(normalized_path)
3240 def find_project_root(srcs: Iterable[str]) -> Path:
3241 """Return a directory containing .git, .hg, or pyproject.toml.
3243 That directory can be one of the directories passed in `srcs` or their
3246 If no directory in the tree contains a marker that would specify it's the
3247 project root, the root of the file system is returned.
3250 return Path("/").resolve()
3252 common_base = min(Path(src).resolve() for src in srcs)
3253 if common_base.is_dir():
3254 # Append a fake file so `parents` below returns `common_base_dir`, too.
3255 common_base /= "fake-file"
3256 for directory in common_base.parents:
3257 if (directory / ".git").is_dir():
3260 if (directory / ".hg").is_dir():
3263 if (directory / "pyproject.toml").is_file():
3271 """Provides a reformatting counter. Can be rendered with `str(report)`."""
3275 verbose: bool = False
3276 change_count: int = 0
3278 failure_count: int = 0
3280 def done(self, src: Path, changed: Changed) -> None:
3281 """Increment the counter for successful reformatting. Write out a message."""
3282 if changed is Changed.YES:
3283 reformatted = "would reformat" if self.check else "reformatted"
3284 if self.verbose or not self.quiet:
3285 out(f"{reformatted} {src}")
3286 self.change_count += 1
3289 if changed is Changed.NO:
3290 msg = f"{src} already well formatted, good job."
3292 msg = f"{src} wasn't modified on disk since last run."
3293 out(msg, bold=False)
3294 self.same_count += 1
3296 def failed(self, src: Path, message: str) -> None:
3297 """Increment the counter for failed reformatting. Write out a message."""
3298 err(f"error: cannot format {src}: {message}")
3299 self.failure_count += 1
3301 def path_ignored(self, path: Path, message: str) -> None:
3303 out(f"{path} ignored: {message}", bold=False)
3306 def return_code(self) -> int:
3307 """Return the exit code that the app should use.
3309 This considers the current state of changed files and failures:
3310 - if there were any failures, return 123;
3311 - if any files were changed and --check is being used, return 1;
3312 - otherwise return 0.
3314 # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
3315 # 126 we have special return codes reserved by the shell.
3316 if self.failure_count:
3319 elif self.change_count and self.check:
3324 def __str__(self) -> str:
3325 """Render a color report of the current state.
3327 Use `click.unstyle` to remove colors.
3330 reformatted = "would be reformatted"
3331 unchanged = "would be left unchanged"
3332 failed = "would fail to reformat"
3334 reformatted = "reformatted"
3335 unchanged = "left unchanged"
3336 failed = "failed to reformat"
3338 if self.change_count:
3339 s = "s" if self.change_count > 1 else ""
3341 click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
3344 s = "s" if self.same_count > 1 else ""
3345 report.append(f"{self.same_count} file{s} {unchanged}")
3346 if self.failure_count:
3347 s = "s" if self.failure_count > 1 else ""
3349 click.style(f"{self.failure_count} file{s} {failed}", fg="red")
3351 return ", ".join(report) + "."
3354 def assert_equivalent(src: str, dst: str) -> None:
3355 """Raise AssertionError if `src` and `dst` aren't equivalent."""
3360 def _v(node: ast.AST, depth: int = 0) -> Iterator[str]:
3361 """Simple visitor generating strings to compare ASTs by content."""
3362 yield f"{' ' * depth}{node.__class__.__name__}("
3364 for field in sorted(node._fields):
3366 value = getattr(node, field)
3367 except AttributeError:
3370 yield f"{' ' * (depth+1)}{field}="
3372 if isinstance(value, list):
3374 # Ignore nested tuples within del statements, because we may insert
3375 # parentheses and they change the AST.
3378 and isinstance(node, ast.Delete)
3379 and isinstance(item, ast.Tuple)
3381 for item in item.elts:
3382 yield from _v(item, depth + 2)
3383 elif isinstance(item, ast.AST):
3384 yield from _v(item, depth + 2)
3386 elif isinstance(value, ast.AST):
3387 yield from _v(value, depth + 2)
3390 yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}"
3392 yield f"{' ' * depth}) # /{node.__class__.__name__}"
3395 src_ast = ast.parse(src)
3396 except Exception as exc:
3397 major, minor = sys.version_info[:2]
3398 raise AssertionError(
3399 f"cannot use --safe with this file; failed to parse source file "
3400 f"with Python {major}.{minor}'s builtin AST. Re-run with --fast "
3401 f"or stop using deprecated Python 2 syntax. AST error message: {exc}"
3405 dst_ast = ast.parse(dst)
3406 except Exception as exc:
3407 log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
3408 raise AssertionError(
3409 f"INTERNAL ERROR: Black produced invalid code: {exc}. "
3410 f"Please report a bug on https://github.com/python/black/issues. "
3411 f"This invalid output might be helpful: {log}"
3414 src_ast_str = "\n".join(_v(src_ast))
3415 dst_ast_str = "\n".join(_v(dst_ast))
3416 if src_ast_str != dst_ast_str:
3417 log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
3418 raise AssertionError(
3419 f"INTERNAL ERROR: Black produced code that is not equivalent to "
3421 f"Please report a bug on https://github.com/python/black/issues. "
3422 f"This diff might be helpful: {log}"
3426 def assert_stable(src: str, dst: str, mode: FileMode) -> None:
3427 """Raise AssertionError if `dst` reformats differently the second time."""
3428 newdst = format_str(dst, mode=mode)
3431 diff(src, dst, "source", "first pass"),
3432 diff(dst, newdst, "first pass", "second pass"),
3434 raise AssertionError(
3435 f"INTERNAL ERROR: Black produced different code on the second pass "
3436 f"of the formatter. "
3437 f"Please report a bug on https://github.com/python/black/issues. "
3438 f"This diff might be helpful: {log}"
3442 def dump_to_file(*output: str) -> str:
3443 """Dump `output` to a temporary file. Return path to the file."""
3446 with tempfile.NamedTemporaryFile(
3447 mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
3449 for lines in output:
3451 if lines and lines[-1] != "\n":
3456 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
3457 """Return a unified diff string between strings `a` and `b`."""
3460 a_lines = [line + "\n" for line in a.split("\n")]
3461 b_lines = [line + "\n" for line in b.split("\n")]
3463 difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
3467 def cancel(tasks: Iterable[asyncio.Task]) -> None:
3468 """asyncio signal handler that cancels all `tasks` and reports to stderr."""
3474 def shutdown(loop: BaseEventLoop) -> None:
3475 """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
3477 if sys.version_info[:2] >= (3, 7):
3478 all_tasks = asyncio.all_tasks
3480 all_tasks = asyncio.Task.all_tasks
3481 # This part is borrowed from asyncio/runners.py in Python 3.7b2.
3482 to_cancel = [task for task in all_tasks(loop) if not task.done()]
3486 for task in to_cancel:
3488 loop.run_until_complete(
3489 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
3492 # `concurrent.futures.Future` objects cannot be cancelled once they
3493 # are already running. There might be some when the `shutdown()` happened.
3494 # Silence their logger's spew about the event loop being closed.
3495 cf_logger = logging.getLogger("concurrent.futures")
3496 cf_logger.setLevel(logging.CRITICAL)
3500 def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
3501 """Replace `regex` with `replacement` twice on `original`.
3503 This is used by string normalization to perform replaces on
3504 overlapping matches.
3506 return regex.sub(replacement, regex.sub(replacement, original))
3509 def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
3510 """Compile a regular expression string in `regex`.
3512 If it contains newlines, use verbose mode.
3515 regex = "(?x)" + regex
3516 return re.compile(regex)
3519 def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
3520 """Like `reversed(enumerate(sequence))` if that were possible."""
3521 index = len(sequence) - 1
3522 for element in reversed(sequence):
3523 yield (index, element)
3527 def enumerate_with_length(
3528 line: Line, reversed: bool = False
3529 ) -> Iterator[Tuple[Index, Leaf, int]]:
3530 """Return an enumeration of leaves with their length.
3532 Stops prematurely on multiline strings and standalone comments.
3535 Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
3536 enumerate_reversed if reversed else enumerate,
3538 for index, leaf in op(line.leaves):
3539 length = len(leaf.prefix) + len(leaf.value)
3540 if "\n" in leaf.value:
3541 return # Multiline strings, we can't continue.
3543 comment: Optional[Leaf]
3544 for comment in line.comments_after(leaf):
3545 length += len(comment.value)
3547 yield index, leaf, length
3550 def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
3551 """Return True if `line` is no longer than `line_length`.
3553 Uses the provided `line_str` rendering, if any, otherwise computes a new one.
3556 line_str = str(line).strip("\n")
3558 len(line_str) <= line_length
3559 and "\n" not in line_str # multiline strings
3560 and not line.contains_standalone_comments()
3564 def can_be_split(line: Line) -> bool:
3565 """Return False if the line cannot be split *for sure*.
3567 This is not an exhaustive search but a cheap heuristic that we can use to
3568 avoid some unfortunate formattings (mostly around wrapping unsplittable code
3569 in unnecessary parentheses).
3571 leaves = line.leaves
3575 if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
3579 for leaf in leaves[-2::-1]:
3580 if leaf.type in OPENING_BRACKETS:
3581 if next.type not in CLOSING_BRACKETS:
3585 elif leaf.type == token.DOT:
3587 elif leaf.type == token.NAME:
3588 if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
3591 elif leaf.type not in CLOSING_BRACKETS:
3594 if dot_count > 1 and call_count > 1:
3600 def can_omit_invisible_parens(line: Line, line_length: int) -> bool:
3601 """Does `line` have a shape safe to reformat without optional parens around it?
3603 Returns True for only a subset of potentially nice looking formattings but
3604 the point is to not return false positives that end up producing lines that
3607 bt = line.bracket_tracker
3608 if not bt.delimiters:
3609 # Without delimiters the optional parentheses are useless.
3612 max_priority = bt.max_delimiter_priority()
3613 if bt.delimiter_count_with_priority(max_priority) > 1:
3614 # With more than one delimiter of a kind the optional parentheses read better.
3617 if max_priority == DOT_PRIORITY:
3618 # A single stranded method call doesn't require optional parentheses.
3621 assert len(line.leaves) >= 2, "Stranded delimiter"
3623 first = line.leaves[0]
3624 second = line.leaves[1]
3625 penultimate = line.leaves[-2]
3626 last = line.leaves[-1]
3628 # With a single delimiter, omit if the expression starts or ends with
3630 if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
3632 length = 4 * line.depth
3633 for _index, leaf, leaf_length in enumerate_with_length(line):
3634 if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
3637 length += leaf_length
3638 if length > line_length:
3641 if leaf.type in OPENING_BRACKETS:
3642 # There are brackets we can further split on.
3646 # checked the entire string and line length wasn't exceeded
3647 if len(line.leaves) == _index + 1:
3650 # Note: we are not returning False here because a line might have *both*
3651 # a leading opening bracket and a trailing closing bracket. If the
3652 # opening bracket doesn't match our rule, maybe the closing will.
3655 last.type == token.RPAR
3656 or last.type == token.RBRACE
3658 # don't use indexing for omitting optional parentheses;
3660 last.type == token.RSQB
3662 and last.parent.type != syms.trailer
3665 if penultimate.type in OPENING_BRACKETS:
3666 # Empty brackets don't help.
3669 if is_multiline_string(first):
3670 # Additional wrapping of a multiline string in this situation is
3674 length = 4 * line.depth
3675 seen_other_brackets = False
3676 for _index, leaf, leaf_length in enumerate_with_length(line):
3677 length += leaf_length
3678 if leaf is last.opening_bracket:
3679 if seen_other_brackets or length <= line_length:
3682 elif leaf.type in OPENING_BRACKETS:
3683 # There are brackets we can further split on.
3684 seen_other_brackets = True
3689 def get_cache_file(mode: FileMode) -> Path:
3690 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
3693 def read_cache(mode: FileMode) -> Cache:
3694 """Read the cache if it exists and is well formed.
3696 If it is not well formed, the call to write_cache later should resolve the issue.
3698 cache_file = get_cache_file(mode)
3699 if not cache_file.exists():
3702 with cache_file.open("rb") as fobj:
3704 cache: Cache = pickle.load(fobj)
3705 except pickle.UnpicklingError:
3711 def get_cache_info(path: Path) -> CacheInfo:
3712 """Return the information used to check if a file is already formatted or not."""
3714 return stat.st_mtime, stat.st_size
3717 def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
3718 """Split an iterable of paths in `sources` into two sets.
3720 The first contains paths of files that modified on disk or are not in the
3721 cache. The other contains paths to non-modified files.
3723 todo, done = set(), set()
3726 if cache.get(src) != get_cache_info(src):
3733 def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None:
3734 """Update the cache file."""
3735 cache_file = get_cache_file(mode)
3737 CACHE_DIR.mkdir(parents=True, exist_ok=True)
3738 new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
3739 with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
3740 pickle.dump(new_cache, f, protocol=pickle.HIGHEST_PROTOCOL)
3741 os.replace(f.name, cache_file)
3746 def patch_click() -> None:
3747 """Make Click not crash.
3749 On certain misconfigured environments, Python 3 selects the ASCII encoding as the
3750 default which restricts paths that it can access during the lifetime of the
3751 application. Click refuses to work in this scenario by raising a RuntimeError.
3753 In case of Black the likelihood that non-ASCII characters are going to be used in
3754 file paths is minimal since it's Python source code. Moreover, this crash was
3755 spurious on Python 3.7 thanks to PEP 538 and PEP 540.
3758 from click import core
3759 from click import _unicodefun # type: ignore
3760 except ModuleNotFoundError:
3763 for module in (core, _unicodefun):
3764 if hasattr(module, "_verify_python3_env"):
3765 module._verify_python3_env = lambda: None
3768 def patched_main() -> None:
3774 if __name__ == "__main__":