All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
3 from abc import ABC, abstractmethod
4 from collections import defaultdict
5 from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
6 from contextlib import contextmanager
7 from datetime import datetime
9 from functools import lru_cache, partial, wraps
13 from multiprocessing import Manager, freeze_support
15 from pathlib import Path
45 from mypy_extensions import mypyc_attr
47 from appdirs import user_cache_dir
48 from dataclasses import dataclass, field, replace
53 from typed_ast import ast3, ast27
55 if sys.version_info < (3, 8):
57 "The typed_ast package is not installed.\n"
58 "You can install it with `python3 -m pip install typed-ast`.",
65 from pathspec import PathSpec
68 from blib2to3.pytree import Node, Leaf, type_repr
69 from blib2to3 import pygram, pytree
70 from blib2to3.pgen2 import driver, token
71 from blib2to3.pgen2.grammar import Grammar
72 from blib2to3.pgen2.parse import ParseError
74 from _black_version import version as __version__
76 if sys.version_info < (3, 8):
77 from typing_extensions import Final
79 from typing import Final
82 import colorama # noqa: F401
84 DEFAULT_LINE_LENGTH = 88
85 DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950
86 DEFAULT_INCLUDES = r"\.pyi?$"
87 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
88 STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__"
90 STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters.
104 LN = Union[Leaf, Node]
105 Transformer = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
108 CacheInfo = Tuple[Timestamp, FileSize]
109 Cache = Dict[str, CacheInfo]
110 out = partial(click.secho, bold=True, err=True)
111 err = partial(click.secho, fg="red", err=True)
113 pygram.initialize(CACHE_DIR)
114 syms = pygram.python_symbols
117 class NothingChanged(UserWarning):
118 """Raised when reformatted code is the same as source."""
121 class CannotTransform(Exception):
122 """Base class for errors raised by Transformers."""
125 class CannotSplit(CannotTransform):
126 """A readable split that fits the allotted line length is impossible."""
129 class InvalidInput(ValueError):
130 """Raised when input source code fails all parse attempts."""
133 class BracketMatchError(KeyError):
134 """Raised when an opening bracket is unable to be matched to a closing bracket."""
138 E = TypeVar("E", bound=Exception)
141 class Ok(Generic[T]):
142 def __init__(self, value: T) -> None:
149 class Err(Generic[E]):
150 def __init__(self, e: E) -> None:
157 # The 'Result' return type is used to implement an error-handling model heavily
158 # influenced by that used by the Rust programming language
159 # (see https://doc.rust-lang.org/book/ch09-00-error-handling.html).
160 Result = Union[Ok[T], Err[E]]
161 TResult = Result[T, CannotTransform] # (T)ransform Result
162 TMatchResult = TResult[Index]
165 class WriteBack(Enum):
173 def from_configuration(
174 cls, *, check: bool, diff: bool, color: bool = False
176 if check and not diff:
180 return cls.COLOR_DIFF
182 return cls.DIFF if diff else cls.YES
191 class TargetVersion(Enum):
201 def is_python2(self) -> bool:
202 return self is TargetVersion.PY27
206 # All string literals are unicode
209 NUMERIC_UNDERSCORES = 3
210 TRAILING_COMMA_IN_CALL = 4
211 TRAILING_COMMA_IN_DEF = 5
212 # The following two feature-flags are mutually exclusive, and exactly one should be
213 # set for every version of python.
214 ASYNC_IDENTIFIERS = 6
216 ASSIGNMENT_EXPRESSIONS = 8
217 POS_ONLY_ARGUMENTS = 9
218 RELAXED_DECORATORS = 10
219 FORCE_OPTIONAL_PARENTHESES = 50
222 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
223 TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS},
224 TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
225 TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
226 TargetVersion.PY35: {
227 Feature.UNICODE_LITERALS,
228 Feature.TRAILING_COMMA_IN_CALL,
229 Feature.ASYNC_IDENTIFIERS,
231 TargetVersion.PY36: {
232 Feature.UNICODE_LITERALS,
234 Feature.NUMERIC_UNDERSCORES,
235 Feature.TRAILING_COMMA_IN_CALL,
236 Feature.TRAILING_COMMA_IN_DEF,
237 Feature.ASYNC_IDENTIFIERS,
239 TargetVersion.PY37: {
240 Feature.UNICODE_LITERALS,
242 Feature.NUMERIC_UNDERSCORES,
243 Feature.TRAILING_COMMA_IN_CALL,
244 Feature.TRAILING_COMMA_IN_DEF,
245 Feature.ASYNC_KEYWORDS,
247 TargetVersion.PY38: {
248 Feature.UNICODE_LITERALS,
250 Feature.NUMERIC_UNDERSCORES,
251 Feature.TRAILING_COMMA_IN_CALL,
252 Feature.TRAILING_COMMA_IN_DEF,
253 Feature.ASYNC_KEYWORDS,
254 Feature.ASSIGNMENT_EXPRESSIONS,
255 Feature.POS_ONLY_ARGUMENTS,
257 TargetVersion.PY39: {
258 Feature.UNICODE_LITERALS,
260 Feature.NUMERIC_UNDERSCORES,
261 Feature.TRAILING_COMMA_IN_CALL,
262 Feature.TRAILING_COMMA_IN_DEF,
263 Feature.ASYNC_KEYWORDS,
264 Feature.ASSIGNMENT_EXPRESSIONS,
265 Feature.RELAXED_DECORATORS,
266 Feature.POS_ONLY_ARGUMENTS,
273 target_versions: Set[TargetVersion] = field(default_factory=set)
274 line_length: int = DEFAULT_LINE_LENGTH
275 string_normalization: bool = True
276 magic_trailing_comma: bool = True
277 experimental_string_processing: bool = False
280 def get_cache_key(self) -> str:
281 if self.target_versions:
282 version_str = ",".join(
284 for version in sorted(self.target_versions, key=lambda v: v.value)
290 str(self.line_length),
291 str(int(self.string_normalization)),
292 str(int(self.is_pyi)),
294 return ".".join(parts)
297 # Legacy name, left for integrations.
301 def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
302 return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
305 def find_pyproject_toml(path_search_start: Tuple[str, ...]) -> Optional[str]:
306 """Find the absolute filepath to a pyproject.toml if it exists"""
307 path_project_root = find_project_root(path_search_start)
308 path_pyproject_toml = path_project_root / "pyproject.toml"
309 if path_pyproject_toml.is_file():
310 return str(path_pyproject_toml)
312 path_user_pyproject_toml = find_user_pyproject_toml()
313 return str(path_user_pyproject_toml) if path_user_pyproject_toml.is_file() else None
316 def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
317 """Parse a pyproject toml file, pulling out relevant parts for Black
319 If parsing fails, will raise a toml.TomlDecodeError
321 pyproject_toml = toml.load(path_config)
322 config = pyproject_toml.get("tool", {}).get("black", {})
323 return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
326 def read_pyproject_toml(
327 ctx: click.Context, param: click.Parameter, value: Optional[str]
329 """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
331 Returns the path to a successfully found and read configuration file, None
335 value = find_pyproject_toml(ctx.params.get("src", ()))
340 config = parse_pyproject_toml(value)
341 except (toml.TomlDecodeError, OSError) as e:
342 raise click.FileError(
343 filename=value, hint=f"Error reading configuration file: {e}"
349 # Sanitize the values to be Click friendly. For more information please see:
350 # https://github.com/psf/black/issues/1458
351 # https://github.com/pallets/click/issues/1567
353 k: str(v) if not isinstance(v, (list, dict)) else v
354 for k, v in config.items()
357 target_version = config.get("target_version")
358 if target_version is not None and not isinstance(target_version, list):
359 raise click.BadOptionUsage(
360 "target-version", "Config key target-version must be a list"
363 default_map: Dict[str, Any] = {}
365 default_map.update(ctx.default_map)
366 default_map.update(config)
368 ctx.default_map = default_map
372 def target_version_option_callback(
373 c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
374 ) -> List[TargetVersion]:
375 """Compute the target versions from a --target-version flag.
377 This is its own function because mypy couldn't infer the type correctly
378 when it was a lambda, causing mypyc trouble.
380 return [TargetVersion[val.upper()] for val in v]
385 param: click.Parameter,
386 value: Optional[str],
387 ) -> Optional[Pattern]:
389 return re_compile_maybe_verbose(value) if value is not None else None
391 raise click.BadParameter("Not a valid regular expression")
394 @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
395 @click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
400 default=DEFAULT_LINE_LENGTH,
401 help="How many characters per line to allow.",
407 type=click.Choice([v.name.lower() for v in TargetVersion]),
408 callback=target_version_option_callback,
411 "Python versions that should be supported by Black's output. [default: per-file"
419 "Format all input files like typing stubs regardless of file extension (useful"
420 " when piping source on standard input)."
425 "--skip-string-normalization",
427 help="Don't normalize string quotes or prefixes.",
431 "--skip-magic-trailing-comma",
433 help="Don't use trailing commas as a reason to split lines.",
436 "--experimental-string-processing",
440 "Experimental option that performs more normalization on string literals."
441 " Currently disabled because it leads to some crashes."
448 "Don't write the files back, just return the status. Return code 0 means"
449 " nothing would change. Return code 1 means some files would be reformatted."
450 " Return code 123 means there was an internal error."
456 help="Don't write the files back, just output a diff for each file on stdout.",
459 "--color/--no-color",
461 help="Show colored diff. Only applies when `--diff` is given.",
466 help="If --fast given, skip temporary sanity checks. [default: --safe]",
471 default=DEFAULT_INCLUDES,
472 callback=validate_regex,
474 "A regular expression that matches files and directories that should be"
475 " included on recursive searches. An empty value means all files are included"
476 " regardless of the name. Use forward slashes for directories on all platforms"
477 " (Windows, too). Exclusions are calculated first, inclusions later."
484 default=DEFAULT_EXCLUDES,
485 callback=validate_regex,
487 "A regular expression that matches files and directories that should be"
488 " excluded on recursive searches. An empty value means no paths are excluded."
489 " Use forward slashes for directories on all platforms (Windows, too)."
490 " Exclusions are calculated first, inclusions later."
497 callback=validate_regex,
499 "Like --exclude, but adds additional files and directories on top of the"
500 " excluded ones. (Useful if you simply want to add to the default)"
506 callback=validate_regex,
508 "Like --exclude, but files and directories matching this regex will be "
509 "excluded even when they are passed explicitly as arguments."
516 "The name of the file when passing it through stdin. Useful to make "
517 "sure Black will respect --force-exclude option on some "
518 "editors that rely on using stdin."
526 "Don't emit non-error messages to stderr. Errors are still emitted; silence"
527 " those with 2>/dev/null."
535 "Also emit messages to stderr about files that were not changed or were ignored"
536 " due to exclusion patterns."
539 @click.version_option(version=__version__)
544 exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
559 callback=read_pyproject_toml,
560 help="Read configuration from FILE path.",
567 target_version: List[TargetVersion],
573 skip_string_normalization: bool,
574 skip_magic_trailing_comma: bool,
575 experimental_string_processing: bool,
580 extend_exclude: Optional[Pattern],
581 force_exclude: Optional[Pattern],
582 stdin_filename: Optional[str],
583 src: Tuple[str, ...],
584 config: Optional[str],
586 """The uncompromising code formatter."""
587 write_back = WriteBack.from_configuration(check=check, diff=diff, color=color)
589 versions = set(target_version)
591 # We'll autodetect later.
594 target_versions=versions,
595 line_length=line_length,
597 string_normalization=not skip_string_normalization,
598 magic_trailing_comma=not skip_magic_trailing_comma,
599 experimental_string_processing=experimental_string_processing,
601 if config and verbose:
602 out(f"Using configuration from {config}.", bold=False, fg="blue")
604 print(format_str(code, mode=mode))
606 report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
607 sources = get_sources(
614 extend_exclude=extend_exclude,
615 force_exclude=force_exclude,
617 stdin_filename=stdin_filename,
622 "No Python files are present to be formatted. Nothing to do 😴",
628 if len(sources) == 1:
632 write_back=write_back,
638 sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
641 if verbose or not quiet:
642 out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨")
643 click.secho(str(report), err=True)
644 ctx.exit(report.return_code)
650 src: Tuple[str, ...],
653 include: Pattern[str],
654 exclude: Pattern[str],
655 extend_exclude: Optional[Pattern[str]],
656 force_exclude: Optional[Pattern[str]],
658 stdin_filename: Optional[str],
660 """Compute the set of files to be formatted."""
662 root = find_project_root(src)
663 sources: Set[Path] = set()
664 path_empty(src, "No Path provided. Nothing to do 😴", quiet, verbose, ctx)
665 gitignore = get_gitignore(root)
668 if s == "-" and stdin_filename:
669 p = Path(stdin_filename)
675 if is_stdin or p.is_file():
676 normalized_path = normalize_path_maybe_ignore(p, root, report)
677 if normalized_path is None:
680 normalized_path = "/" + normalized_path
681 # Hard-exclude any files that matches the `--force-exclude` regex.
683 force_exclude_match = force_exclude.search(normalized_path)
685 force_exclude_match = None
686 if force_exclude_match and force_exclude_match.group(0):
687 report.path_ignored(p, "matches the --force-exclude regular expression")
691 p = Path(f"{STDIN_PLACEHOLDER}{str(p)}")
710 err(f"invalid path: {s}")
715 src: Sized, msg: str, quiet: bool, verbose: bool, ctx: click.Context
718 Exit if there is no `src` provided for formatting
720 if not src and (verbose or not quiet):
726 src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
728 """Reformat a single file under `src` without spawning child processes.
730 `fast`, `write_back`, and `mode` options are passed to
731 :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
738 elif str(src).startswith(STDIN_PLACEHOLDER):
740 # Use the original name again in case we want to print something
742 src = Path(str(src)[len(STDIN_PLACEHOLDER) :])
747 if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
748 changed = Changed.YES
751 if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
752 cache = read_cache(mode)
753 res_src = src.resolve()
754 res_src_s = str(res_src)
755 if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src):
756 changed = Changed.CACHED
757 if changed is not Changed.CACHED and format_file_in_place(
758 src, fast=fast, write_back=write_back, mode=mode
760 changed = Changed.YES
761 if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
762 write_back is WriteBack.CHECK and changed is Changed.NO
764 write_cache(cache, [src], mode)
765 report.done(src, changed)
766 except Exception as exc:
768 traceback.print_exc()
769 report.failed(src, str(exc))
773 sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
775 """Reformat multiple files using a ProcessPoolExecutor."""
777 loop = asyncio.get_event_loop()
778 worker_count = os.cpu_count()
779 if sys.platform == "win32":
780 # Work around https://bugs.python.org/issue26903
781 worker_count = min(worker_count, 60)
783 executor = ProcessPoolExecutor(max_workers=worker_count)
784 except (ImportError, OSError):
785 # we arrive here if the underlying system does not support multi-processing
786 # like in AWS Lambda or Termux, in which case we gracefully fallback to
787 # a ThreadPoolExecutor with just a single worker (more workers would not do us
788 # any good due to the Global Interpreter Lock)
789 executor = ThreadPoolExecutor(max_workers=1)
792 loop.run_until_complete(
796 write_back=write_back,
805 if executor is not None:
809 async def schedule_formatting(
812 write_back: WriteBack,
815 loop: asyncio.AbstractEventLoop,
818 """Run formatting of `sources` in parallel using the provided `executor`.
820 (Use ProcessPoolExecutors for actual parallelism.)
822 `write_back`, `fast`, and `mode` options are passed to
823 :func:`format_file_in_place`.
826 if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
827 cache = read_cache(mode)
828 sources, cached = filter_cached(cache, sources)
829 for src in sorted(cached):
830 report.done(src, Changed.CACHED)
835 sources_to_cache = []
837 if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
838 # For diff output, we need locks to ensure we don't interleave output
839 # from different processes.
841 lock = manager.Lock()
843 asyncio.ensure_future(
844 loop.run_in_executor(
845 executor, format_file_in_place, src, fast, mode, write_back, lock
848 for src in sorted(sources)
850 pending = tasks.keys()
852 loop.add_signal_handler(signal.SIGINT, cancel, pending)
853 loop.add_signal_handler(signal.SIGTERM, cancel, pending)
854 except NotImplementedError:
855 # There are no good alternatives for these on Windows.
858 done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
860 src = tasks.pop(task)
862 cancelled.append(task)
863 elif task.exception():
864 report.failed(src, str(task.exception()))
866 changed = Changed.YES if task.result() else Changed.NO
867 # If the file was written back or was successfully checked as
868 # well-formatted, store this information in the cache.
869 if write_back is WriteBack.YES or (
870 write_back is WriteBack.CHECK and changed is Changed.NO
872 sources_to_cache.append(src)
873 report.done(src, changed)
875 await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
877 write_cache(cache, sources_to_cache, mode)
880 def format_file_in_place(
884 write_back: WriteBack = WriteBack.NO,
885 lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
887 """Format file under `src` path. Return True if changed.
889 If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
891 `mode` and `fast` options are passed to :func:`format_file_contents`.
893 if src.suffix == ".pyi":
894 mode = replace(mode, is_pyi=True)
896 then = datetime.utcfromtimestamp(src.stat().st_mtime)
897 with open(src, "rb") as buf:
898 src_contents, encoding, newline = decode_bytes(buf.read())
900 dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
901 except NothingChanged:
904 if write_back == WriteBack.YES:
905 with open(src, "w", encoding=encoding, newline=newline) as f:
906 f.write(dst_contents)
907 elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
908 now = datetime.utcnow()
909 src_name = f"{src}\t{then} +0000"
910 dst_name = f"{src}\t{now} +0000"
911 diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
913 if write_back == WriteBack.COLOR_DIFF:
914 diff_contents = color_diff(diff_contents)
916 with lock or nullcontext():
917 f = io.TextIOWrapper(
923 f = wrap_stream_for_windows(f)
924 f.write(diff_contents)
930 def color_diff(contents: str) -> str:
931 """Inject the ANSI color codes to the diff."""
932 lines = contents.split("\n")
933 for i, line in enumerate(lines):
934 if line.startswith("+++") or line.startswith("---"):
935 line = "\033[1;37m" + line + "\033[0m" # bold white, reset
936 elif line.startswith("@@"):
937 line = "\033[36m" + line + "\033[0m" # cyan, reset
938 elif line.startswith("+"):
939 line = "\033[32m" + line + "\033[0m" # green, reset
940 elif line.startswith("-"):
941 line = "\033[31m" + line + "\033[0m" # red, reset
943 return "\n".join(lines)
946 def wrap_stream_for_windows(
948 ) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
950 Wrap stream with colorama's wrap_stream so colors are shown on Windows.
952 If `colorama` is unavailable, the original stream is returned unmodified.
953 Otherwise, the `wrap_stream()` function determines whether the stream needs
954 to be wrapped for a Windows environment and will accordingly either return
955 an `AnsiToWin32` wrapper or the original stream.
958 from colorama.initialise import wrap_stream
962 # Set `strip=False` to avoid needing to modify test_express_diff_with_color.
963 return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
966 def format_stdin_to_stdout(
967 fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: Mode
969 """Format file on stdin. Return True if changed.
971 If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
972 write a diff to stdout. The `mode` argument is passed to
973 :func:`format_file_contents`.
975 then = datetime.utcnow()
976 src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
979 dst = format_file_contents(src, fast=fast, mode=mode)
982 except NothingChanged:
986 f = io.TextIOWrapper(
987 sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
989 if write_back == WriteBack.YES:
991 elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
992 now = datetime.utcnow()
993 src_name = f"STDIN\t{then} +0000"
994 dst_name = f"STDOUT\t{now} +0000"
995 d = diff(src, dst, src_name, dst_name)
996 if write_back == WriteBack.COLOR_DIFF:
998 f = wrap_stream_for_windows(f)
1003 def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
1004 """Reformat contents of a file and return new contents.
1006 If `fast` is False, additionally confirm that the reformatted code is
1007 valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
1008 `mode` is passed to :func:`format_str`.
1010 if not src_contents.strip():
1011 raise NothingChanged
1013 dst_contents = format_str(src_contents, mode=mode)
1014 if src_contents == dst_contents:
1015 raise NothingChanged
1018 assert_equivalent(src_contents, dst_contents)
1019 assert_stable(src_contents, dst_contents, mode=mode)
1023 def format_str(src_contents: str, *, mode: Mode) -> FileContent:
1024 """Reformat a string and return new contents.
1026 `mode` determines formatting options, such as how many characters per line are
1030 >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode()))
1031 def f(arg: str = "") -> None:
1034 A more complex example:
1037 ... black.format_str(
1038 ... "def f(arg:str='')->None: hey",
1039 ... mode=black.Mode(
1040 ... target_versions={black.TargetVersion.PY36},
1042 ... string_normalization=False,
1053 src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
1055 future_imports = get_future_imports(src_node)
1056 if mode.target_versions:
1057 versions = mode.target_versions
1059 versions = detect_target_versions(src_node)
1060 normalize_fmt_off(src_node)
1061 lines = LineGenerator(
1063 remove_u_prefix="unicode_literals" in future_imports
1064 or supports_feature(versions, Feature.UNICODE_LITERALS),
1066 elt = EmptyLineTracker(is_pyi=mode.is_pyi)
1067 empty_line = Line(mode=mode)
1069 split_line_features = {
1071 for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
1072 if supports_feature(versions, feature)
1074 for current_line in lines.visit(src_node):
1075 dst_contents.append(str(empty_line) * after)
1076 before, after = elt.maybe_empty_lines(current_line)
1077 dst_contents.append(str(empty_line) * before)
1078 for line in transform_line(
1079 current_line, mode=mode, features=split_line_features
1081 dst_contents.append(str(line))
1082 return "".join(dst_contents)
1085 def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
1086 """Return a tuple of (decoded_contents, encoding, newline).
1088 `newline` is either CRLF or LF but `decoded_contents` is decoded with
1089 universal newlines (i.e. only contains LF).
1091 srcbuf = io.BytesIO(src)
1092 encoding, lines = tokenize.detect_encoding(srcbuf.readline)
1094 return "", encoding, "\n"
1096 newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
1098 with io.TextIOWrapper(srcbuf, encoding) as tiow:
1099 return tiow.read(), encoding, newline
1102 def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
1103 if not target_versions:
1104 # No target_version specified, so try all grammars.
1107 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
1109 pygram.python_grammar_no_print_statement_no_exec_statement,
1110 # Python 2.7 with future print_function import
1111 pygram.python_grammar_no_print_statement,
1113 pygram.python_grammar,
1116 if all(version.is_python2() for version in target_versions):
1117 # Python 2-only code, so try Python 2 grammars.
1119 # Python 2.7 with future print_function import
1120 pygram.python_grammar_no_print_statement,
1122 pygram.python_grammar,
1125 # Python 3-compatible code, so only try Python 3 grammar.
1127 # If we have to parse both, try to parse async as a keyword first
1128 if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS):
1131 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords
1133 if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
1135 grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
1136 # At least one of the above branches must have been taken, because every Python
1137 # version has exactly one of the two 'ASYNC_*' flags
1141 def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
1142 """Given a string with source, return the lib2to3 Node."""
1143 if not src_txt.endswith("\n"):
1146 for grammar in get_grammars(set(target_versions)):
1147 drv = driver.Driver(grammar, pytree.convert)
1149 result = drv.parse_string(src_txt, True)
1152 except ParseError as pe:
1153 lineno, column = pe.context[1]
1154 lines = src_txt.splitlines()
1156 faulty_line = lines[lineno - 1]
1158 faulty_line = "<line number missing in source>"
1159 exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
1163 if isinstance(result, Leaf):
1164 result = Node(syms.file_input, [result])
1168 def lib2to3_unparse(node: Node) -> str:
1169 """Given a lib2to3 node, return its string representation."""
1174 class Visitor(Generic[T]):
1175 """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
1177 def visit(self, node: LN) -> Iterator[T]:
1178 """Main method to visit `node` and its children.
1180 It tries to find a `visit_*()` method for the given `node.type`, like
1181 `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
1182 If no dedicated `visit_*()` method is found, chooses `visit_default()`
1185 Then yields objects of type `T` from the selected visitor.
1188 name = token.tok_name[node.type]
1190 name = str(type_repr(node.type))
1191 # We explicitly branch on whether a visitor exists (instead of
1192 # using self.visit_default as the default arg to getattr) in order
1193 # to save needing to create a bound method object and so mypyc can
1194 # generate a native call to visit_default.
1195 visitf = getattr(self, f"visit_{name}", None)
1197 yield from visitf(node)
1199 yield from self.visit_default(node)
1201 def visit_default(self, node: LN) -> Iterator[T]:
1202 """Default `visit_*()` implementation. Recurses to children of `node`."""
1203 if isinstance(node, Node):
1204 for child in node.children:
1205 yield from self.visit(child)
1209 class DebugVisitor(Visitor[T]):
1212 def visit_default(self, node: LN) -> Iterator[T]:
1213 indent = " " * (2 * self.tree_depth)
1214 if isinstance(node, Node):
1215 _type = type_repr(node.type)
1216 out(f"{indent}{_type}", fg="yellow")
1217 self.tree_depth += 1
1218 for child in node.children:
1219 yield from self.visit(child)
1221 self.tree_depth -= 1
1222 out(f"{indent}/{_type}", fg="yellow", bold=False)
1224 _type = token.tok_name.get(node.type, str(node.type))
1225 out(f"{indent}{_type}", fg="blue", nl=False)
1227 # We don't have to handle prefixes for `Node` objects since
1228 # that delegates to the first child anyway.
1229 out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
1230 out(f" {node.value!r}", fg="blue", bold=False)
1233 def show(cls, code: Union[str, Leaf, Node]) -> None:
1234 """Pretty-print the lib2to3 AST of a given string of `code`.
1236 Convenience method for debugging.
1238 v: DebugVisitor[None] = DebugVisitor()
1239 if isinstance(code, str):
1240 code = lib2to3_parse(code)
1244 WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE}
1245 STATEMENT: Final = {
1255 STANDALONE_COMMENT: Final = 153
1256 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
1257 LOGIC_OPERATORS: Final = {"and", "or"}
1258 COMPARATORS: Final = {
1266 MATH_OPERATORS: Final = {
1282 STARS: Final = {token.STAR, token.DOUBLESTAR}
1283 VARARGS_SPECIALS: Final = STARS | {token.SLASH}
1284 VARARGS_PARENTS: Final = {
1286 syms.argument, # double star in arglist
1287 syms.trailer, # single argument to call
1289 syms.varargslist, # lambdas
1291 UNPACKING_PARENTS: Final = {
1292 syms.atom, # single element of a list or set literal
1296 syms.testlist_star_expr,
1298 TEST_DESCENDANTS: Final = {
1315 ASSIGNMENTS: Final = {
1331 COMPREHENSION_PRIORITY: Final = 20
1332 COMMA_PRIORITY: Final = 18
1333 TERNARY_PRIORITY: Final = 16
1334 LOGIC_PRIORITY: Final = 14
1335 STRING_PRIORITY: Final = 12
1336 COMPARATOR_PRIORITY: Final = 10
1337 MATH_PRIORITIES: Final = {
1339 token.CIRCUMFLEX: 8,
1342 token.RIGHTSHIFT: 6,
1347 token.DOUBLESLASH: 4,
1351 token.DOUBLESTAR: 2,
1353 DOT_PRIORITY: Final = 1
1357 class BracketTracker:
1358 """Keeps track of brackets on a line."""
1361 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
1362 delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
1363 previous: Optional[Leaf] = None
1364 _for_loop_depths: List[int] = field(default_factory=list)
1365 _lambda_argument_depths: List[int] = field(default_factory=list)
1366 invisible: List[Leaf] = field(default_factory=list)
1368 def mark(self, leaf: Leaf) -> None:
1369 """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
1371 All leaves receive an int `bracket_depth` field that stores how deep
1372 within brackets a given leaf is. 0 means there are no enclosing brackets
1373 that started on this line.
1375 If a leaf is itself a closing bracket, it receives an `opening_bracket`
1376 field that it forms a pair with. This is a one-directional link to
1377 avoid reference cycles.
1379 If a leaf is a delimiter (a token on which Black can split the line if
1380 needed) and it's on depth 0, its `id()` is stored in the tracker's
1383 if leaf.type == token.COMMENT:
1386 self.maybe_decrement_after_for_loop_variable(leaf)
1387 self.maybe_decrement_after_lambda_arguments(leaf)
1388 if leaf.type in CLOSING_BRACKETS:
1391 opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
1392 except KeyError as e:
1393 raise BracketMatchError(
1394 "Unable to match a closing bracket to the following opening"
1397 leaf.opening_bracket = opening_bracket
1399 self.invisible.append(leaf)
1400 leaf.bracket_depth = self.depth
1402 delim = is_split_before_delimiter(leaf, self.previous)
1403 if delim and self.previous is not None:
1404 self.delimiters[id(self.previous)] = delim
1406 delim = is_split_after_delimiter(leaf, self.previous)
1408 self.delimiters[id(leaf)] = delim
1409 if leaf.type in OPENING_BRACKETS:
1410 self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
1413 self.invisible.append(leaf)
1414 self.previous = leaf
1415 self.maybe_increment_lambda_arguments(leaf)
1416 self.maybe_increment_for_loop_variable(leaf)
1418 def any_open_brackets(self) -> bool:
1419 """Return True if there is an yet unmatched open bracket on the line."""
1420 return bool(self.bracket_match)
1422 def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
1423 """Return the highest priority of a delimiter found on the line.
1425 Values are consistent with what `is_split_*_delimiter()` return.
1426 Raises ValueError on no delimiters.
1428 return max(v for k, v in self.delimiters.items() if k not in exclude)
1430 def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
1431 """Return the number of delimiters with the given `priority`.
1433 If no `priority` is passed, defaults to max priority on the line.
1435 if not self.delimiters:
1438 priority = priority or self.max_delimiter_priority()
1439 return sum(1 for p in self.delimiters.values() if p == priority)
1441 def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
1442 """In a for loop, or comprehension, the variables are often unpacks.
1444 To avoid splitting on the comma in this situation, increase the depth of
1445 tokens between `for` and `in`.
1447 if leaf.type == token.NAME and leaf.value == "for":
1449 self._for_loop_depths.append(self.depth)
1454 def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
1455 """See `maybe_increment_for_loop_variable` above for explanation."""
1457 self._for_loop_depths
1458 and self._for_loop_depths[-1] == self.depth
1459 and leaf.type == token.NAME
1460 and leaf.value == "in"
1463 self._for_loop_depths.pop()
1468 def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
1469 """In a lambda expression, there might be more than one argument.
1471 To avoid splitting on the comma in this situation, increase the depth of
1472 tokens between `lambda` and `:`.
1474 if leaf.type == token.NAME and leaf.value == "lambda":
1476 self._lambda_argument_depths.append(self.depth)
1481 def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
1482 """See `maybe_increment_lambda_arguments` above for explanation."""
1484 self._lambda_argument_depths
1485 and self._lambda_argument_depths[-1] == self.depth
1486 and leaf.type == token.COLON
1489 self._lambda_argument_depths.pop()
1494 def get_open_lsqb(self) -> Optional[Leaf]:
1495 """Return the most recent opening square bracket (if any)."""
1496 return self.bracket_match.get((self.depth - 1, token.RSQB))
1501 """Holds leaves and comments. Can be printed with `str(line)`."""
1505 leaves: List[Leaf] = field(default_factory=list)
1506 # keys ordered like `leaves`
1507 comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
1508 bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
1509 inside_brackets: bool = False
1510 should_split_rhs: bool = False
1511 magic_trailing_comma: Optional[Leaf] = None
1513 def append(self, leaf: Leaf, preformatted: bool = False) -> None:
1514 """Add a new `leaf` to the end of the line.
1516 Unless `preformatted` is True, the `leaf` will receive a new consistent
1517 whitespace prefix and metadata applied by :class:`BracketTracker`.
1518 Trailing commas are maybe removed, unpacked for loop variables are
1519 demoted from being delimiters.
1521 Inline comments are put aside.
1523 has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
1527 if token.COLON == leaf.type and self.is_class_paren_empty:
1528 del self.leaves[-2:]
1529 if self.leaves and not preformatted:
1530 # Note: at this point leaf.prefix should be empty except for
1531 # imports, for which we only preserve newlines.
1532 leaf.prefix += whitespace(
1533 leaf, complex_subscript=self.is_complex_subscript(leaf)
1535 if self.inside_brackets or not preformatted:
1536 self.bracket_tracker.mark(leaf)
1537 if self.mode.magic_trailing_comma:
1538 if self.has_magic_trailing_comma(leaf):
1539 self.magic_trailing_comma = leaf
1540 elif self.has_magic_trailing_comma(leaf, ensure_removable=True):
1541 self.remove_trailing_comma()
1542 if not self.append_comment(leaf):
1543 self.leaves.append(leaf)
1545 def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
1546 """Like :func:`append()` but disallow invalid standalone comment structure.
1548 Raises ValueError when any `leaf` is appended after a standalone comment
1549 or when a standalone comment is not the first leaf on the line.
1551 if self.bracket_tracker.depth == 0:
1553 raise ValueError("cannot append to standalone comments")
1555 if self.leaves and leaf.type == STANDALONE_COMMENT:
1557 "cannot append standalone comments to a populated line"
1560 self.append(leaf, preformatted=preformatted)
1563 def is_comment(self) -> bool:
1564 """Is this line a standalone comment?"""
1565 return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
1568 def is_decorator(self) -> bool:
1569 """Is this line a decorator?"""
1570 return bool(self) and self.leaves[0].type == token.AT
1573 def is_import(self) -> bool:
1574 """Is this an import line?"""
1575 return bool(self) and is_import(self.leaves[0])
1578 def is_class(self) -> bool:
1579 """Is this line a class definition?"""
1582 and self.leaves[0].type == token.NAME
1583 and self.leaves[0].value == "class"
1587 def is_stub_class(self) -> bool:
1588 """Is this line a class definition with a body consisting only of "..."?"""
1589 return self.is_class and self.leaves[-3:] == [
1590 Leaf(token.DOT, ".") for _ in range(3)
1594 def is_def(self) -> bool:
1595 """Is this a function definition? (Also returns True for async defs.)"""
1597 first_leaf = self.leaves[0]
1602 second_leaf: Optional[Leaf] = self.leaves[1]
1605 return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
1606 first_leaf.type == token.ASYNC
1607 and second_leaf is not None
1608 and second_leaf.type == token.NAME
1609 and second_leaf.value == "def"
1613 def is_class_paren_empty(self) -> bool:
1614 """Is this a class with no base classes but using parentheses?
1616 Those are unnecessary and should be removed.
1620 and len(self.leaves) == 4
1622 and self.leaves[2].type == token.LPAR
1623 and self.leaves[2].value == "("
1624 and self.leaves[3].type == token.RPAR
1625 and self.leaves[3].value == ")"
1629 def is_triple_quoted_string(self) -> bool:
1630 """Is the line a triple quoted string?"""
1633 and self.leaves[0].type == token.STRING
1634 and self.leaves[0].value.startswith(('"""', "'''"))
1637 def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
1638 """If so, needs to be split before emitting."""
1639 for leaf in self.leaves:
1640 if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:
1645 def contains_uncollapsable_type_comments(self) -> bool:
1648 last_leaf = self.leaves[-1]
1649 ignored_ids.add(id(last_leaf))
1650 if last_leaf.type == token.COMMA or (
1651 last_leaf.type == token.RPAR and not last_leaf.value
1653 # When trailing commas or optional parens are inserted by Black for
1654 # consistency, comments after the previous last element are not moved
1655 # (they don't have to, rendering will still be correct). So we ignore
1656 # trailing commas and invisible.
1657 last_leaf = self.leaves[-2]
1658 ignored_ids.add(id(last_leaf))
1662 # A type comment is uncollapsable if it is attached to a leaf
1663 # that isn't at the end of the line (since that could cause it
1664 # to get associated to a different argument) or if there are
1665 # comments before it (since that could cause it to get hidden
1667 comment_seen = False
1668 for leaf_id, comments in self.comments.items():
1669 for comment in comments:
1670 if is_type_comment(comment):
1671 if comment_seen or (
1672 not is_type_comment(comment, " ignore")
1673 and leaf_id not in ignored_ids
1681 def contains_unsplittable_type_ignore(self) -> bool:
1685 # If a 'type: ignore' is attached to the end of a line, we
1686 # can't split the line, because we can't know which of the
1687 # subexpressions the ignore was meant to apply to.
1689 # We only want this to apply to actual physical lines from the
1690 # original source, though: we don't want the presence of a
1691 # 'type: ignore' at the end of a multiline expression to
1692 # justify pushing it all onto one line. Thus we
1693 # (unfortunately) need to check the actual source lines and
1694 # only report an unsplittable 'type: ignore' if this line was
1695 # one line in the original code.
1697 # Grab the first and last line numbers, skipping generated leaves
1698 first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)
1700 (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0
1703 if first_line == last_line:
1704 # We look at the last two leaves since a comma or an
1705 # invisible paren could have been added at the end of the
1707 for node in self.leaves[-2:]:
1708 for comment in self.comments.get(id(node), []):
1709 if is_type_comment(comment, " ignore"):
1714 def contains_multiline_strings(self) -> bool:
1715 return any(is_multiline_string(leaf) for leaf in self.leaves)
1717 def has_magic_trailing_comma(
1718 self, closing: Leaf, ensure_removable: bool = False
1720 """Return True if we have a magic trailing comma, that is when:
1721 - there's a trailing comma here
1722 - it's not a one-tuple
1723 Additionally, if ensure_removable:
1724 - it's not from square bracket indexing
1727 closing.type in CLOSING_BRACKETS
1729 and self.leaves[-1].type == token.COMMA
1733 if closing.type == token.RBRACE:
1736 if closing.type == token.RSQB:
1737 if not ensure_removable:
1739 comma = self.leaves[-1]
1740 return bool(comma.parent and comma.parent.type == syms.listmaker)
1745 if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves):
1750 def append_comment(self, comment: Leaf) -> bool:
1751 """Add an inline or standalone comment to the line."""
1753 comment.type == STANDALONE_COMMENT
1754 and self.bracket_tracker.any_open_brackets()
1759 if comment.type != token.COMMENT:
1763 comment.type = STANDALONE_COMMENT
1767 last_leaf = self.leaves[-1]
1769 last_leaf.type == token.RPAR
1770 and not last_leaf.value
1771 and last_leaf.parent
1772 and len(list(last_leaf.parent.leaves())) <= 3
1773 and not is_type_comment(comment)
1775 # Comments on an optional parens wrapping a single leaf should belong to
1776 # the wrapped node except if it's a type comment. Pinning the comment like
1777 # this avoids unstable formatting caused by comment migration.
1778 if len(self.leaves) < 2:
1779 comment.type = STANDALONE_COMMENT
1783 last_leaf = self.leaves[-2]
1784 self.comments.setdefault(id(last_leaf), []).append(comment)
1787 def comments_after(self, leaf: Leaf) -> List[Leaf]:
1788 """Generate comments that should appear directly after `leaf`."""
1789 return self.comments.get(id(leaf), [])
1791 def remove_trailing_comma(self) -> None:
1792 """Remove the trailing comma and moves the comments attached to it."""
1793 trailing_comma = self.leaves.pop()
1794 trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
1795 self.comments.setdefault(id(self.leaves[-1]), []).extend(
1796 trailing_comma_comments
1799 def is_complex_subscript(self, leaf: Leaf) -> bool:
1800 """Return True iff `leaf` is part of a slice with non-trivial exprs."""
1801 open_lsqb = self.bracket_tracker.get_open_lsqb()
1802 if open_lsqb is None:
1805 subscript_start = open_lsqb.next_sibling
1807 if isinstance(subscript_start, Node):
1808 if subscript_start.type == syms.listmaker:
1811 if subscript_start.type == syms.subscriptlist:
1812 subscript_start = child_towards(subscript_start, leaf)
1813 return subscript_start is not None and any(
1814 n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
1817 def clone(self) -> "Line":
1821 inside_brackets=self.inside_brackets,
1822 should_split_rhs=self.should_split_rhs,
1823 magic_trailing_comma=self.magic_trailing_comma,
1826 def __str__(self) -> str:
1827 """Render the line."""
1831 indent = " " * self.depth
1832 leaves = iter(self.leaves)
1833 first = next(leaves)
1834 res = f"{first.prefix}{indent}{first.value}"
1837 for comment in itertools.chain.from_iterable(self.comments.values()):
1842 def __bool__(self) -> bool:
1843 """Return True if the line has leaves or comments."""
1844 return bool(self.leaves or self.comments)
1848 class EmptyLineTracker:
1849 """Provides a stateful method that returns the number of potential extra
1850 empty lines needed before and after the currently processed line.
1852 Note: this tracker works on lines that haven't been split yet. It assumes
1853 the prefix of the first leaf consists of optional newlines. Those newlines
1854 are consumed by `maybe_empty_lines()` and included in the computation.
1857 is_pyi: bool = False
1858 previous_line: Optional[Line] = None
1859 previous_after: int = 0
1860 previous_defs: List[int] = field(default_factory=list)
1862 def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1863 """Return the number of extra empty lines before and after the `current_line`.
1865 This is for separating `def`, `async def` and `class` with extra empty
1866 lines (two on module-level).
1868 before, after = self._maybe_empty_lines(current_line)
1870 # Black should not insert empty lines at the beginning
1873 if self.previous_line is None
1874 else before - self.previous_after
1876 self.previous_after = after
1877 self.previous_line = current_line
1878 return before, after
1880 def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1882 if current_line.depth == 0:
1883 max_allowed = 1 if self.is_pyi else 2
1884 if current_line.leaves:
1885 # Consume the first leaf's extra newlines.
1886 first_leaf = current_line.leaves[0]
1887 before = first_leaf.prefix.count("\n")
1888 before = min(before, max_allowed)
1889 first_leaf.prefix = ""
1892 depth = current_line.depth
1893 while self.previous_defs and self.previous_defs[-1] >= depth:
1894 self.previous_defs.pop()
1896 before = 0 if depth else 1
1898 before = 1 if depth else 2
1899 if current_line.is_decorator or current_line.is_def or current_line.is_class:
1900 return self._maybe_empty_lines_for_class_or_def(current_line, before)
1904 and self.previous_line.is_import
1905 and not current_line.is_import
1906 and depth == self.previous_line.depth
1908 return (before or 1), 0
1912 and self.previous_line.is_class
1913 and current_line.is_triple_quoted_string
1919 def _maybe_empty_lines_for_class_or_def(
1920 self, current_line: Line, before: int
1921 ) -> Tuple[int, int]:
1922 if not current_line.is_decorator:
1923 self.previous_defs.append(current_line.depth)
1924 if self.previous_line is None:
1925 # Don't insert empty lines before the first line in the file.
1928 if self.previous_line.is_decorator:
1929 if self.is_pyi and current_line.is_stub_class:
1930 # Insert an empty line after a decorated stub class
1935 if self.previous_line.depth < current_line.depth and (
1936 self.previous_line.is_class or self.previous_line.is_def
1941 self.previous_line.is_comment
1942 and self.previous_line.depth == current_line.depth
1948 if self.previous_line.depth > current_line.depth:
1950 elif current_line.is_class or self.previous_line.is_class:
1951 if current_line.is_stub_class and self.previous_line.is_stub_class:
1952 # No blank line between classes with an empty body
1957 current_line.is_def or current_line.is_decorator
1958 ) and not self.previous_line.is_def:
1959 # Blank line between a block of functions (maybe with preceding
1960 # decorators) and a block of non-functions
1966 if current_line.depth and newlines:
1972 class LineGenerator(Visitor[Line]):
1973 """Generates reformatted Line objects. Empty lines are not emitted.
1975 Note: destroys the tree it's visiting by mutating prefixes of its leaves
1976 in ways that will no longer stringify to valid Python code on the tree.
1980 remove_u_prefix: bool = False
1981 current_line: Line = field(init=False)
1983 def line(self, indent: int = 0) -> Iterator[Line]:
1986 If the line is empty, only emit if it makes sense.
1987 If the line is too long, split it first and then generate.
1989 If any lines were generated, set up a new current_line.
1991 if not self.current_line:
1992 self.current_line.depth += indent
1993 return # Line is empty, don't emit. Creating a new one unnecessary.
1995 complete_line = self.current_line
1996 self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
1999 def visit_default(self, node: LN) -> Iterator[Line]:
2000 """Default `visit_*()` implementation. Recurses to children of `node`."""
2001 if isinstance(node, Leaf):
2002 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
2003 for comment in generate_comments(node):
2004 if any_open_brackets:
2005 # any comment within brackets is subject to splitting
2006 self.current_line.append(comment)
2007 elif comment.type == token.COMMENT:
2008 # regular trailing comment
2009 self.current_line.append(comment)
2010 yield from self.line()
2013 # regular standalone comment
2014 yield from self.line()
2016 self.current_line.append(comment)
2017 yield from self.line()
2019 normalize_prefix(node, inside_brackets=any_open_brackets)
2020 if self.mode.string_normalization and node.type == token.STRING:
2021 normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
2022 normalize_string_quotes(node)
2023 if node.type == token.NUMBER:
2024 normalize_numeric_literal(node)
2025 if node.type not in WHITESPACE:
2026 self.current_line.append(node)
2027 yield from super().visit_default(node)
2029 def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
2030 """Increase indentation level, maybe yield a line."""
2031 # In blib2to3 INDENT never holds comments.
2032 yield from self.line(+1)
2033 yield from self.visit_default(node)
2035 def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
2036 """Decrease indentation level, maybe yield a line."""
2037 # The current line might still wait for trailing comments. At DEDENT time
2038 # there won't be any (they would be prefixes on the preceding NEWLINE).
2039 # Emit the line then.
2040 yield from self.line()
2042 # While DEDENT has no value, its prefix may contain standalone comments
2043 # that belong to the current indentation level. Get 'em.
2044 yield from self.visit_default(node)
2046 # Finally, emit the dedent.
2047 yield from self.line(-1)
2050 self, node: Node, keywords: Set[str], parens: Set[str]
2051 ) -> Iterator[Line]:
2052 """Visit a statement.
2054 This implementation is shared for `if`, `while`, `for`, `try`, `except`,
2055 `def`, `with`, `class`, `assert` and assignments.
2057 The relevant Python language `keywords` for a given statement will be
2058 NAME leaves within it. This methods puts those on a separate line.
2060 `parens` holds a set of string leaf values immediately after which
2061 invisible parens should be put.
2063 normalize_invisible_parens(node, parens_after=parens)
2064 for child in node.children:
2065 if child.type == token.NAME and child.value in keywords: # type: ignore
2066 yield from self.line()
2068 yield from self.visit(child)
2070 def visit_suite(self, node: Node) -> Iterator[Line]:
2071 """Visit a suite."""
2072 if self.mode.is_pyi and is_stub_suite(node):
2073 yield from self.visit(node.children[2])
2075 yield from self.visit_default(node)
2077 def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
2078 """Visit a statement without nested statements."""
2079 if first_child_is_arith(node):
2080 wrap_in_parentheses(node, node.children[0], visible=False)
2081 is_suite_like = node.parent and node.parent.type in STATEMENT
2083 if self.mode.is_pyi and is_stub_body(node):
2084 yield from self.visit_default(node)
2086 yield from self.line(+1)
2087 yield from self.visit_default(node)
2088 yield from self.line(-1)
2092 not self.mode.is_pyi
2094 or not is_stub_suite(node.parent)
2096 yield from self.line()
2097 yield from self.visit_default(node)
2099 def visit_async_stmt(self, node: Node) -> Iterator[Line]:
2100 """Visit `async def`, `async for`, `async with`."""
2101 yield from self.line()
2103 children = iter(node.children)
2104 for child in children:
2105 yield from self.visit(child)
2107 if child.type == token.ASYNC:
2110 internal_stmt = next(children)
2111 for child in internal_stmt.children:
2112 yield from self.visit(child)
2114 def visit_decorators(self, node: Node) -> Iterator[Line]:
2115 """Visit decorators."""
2116 for child in node.children:
2117 yield from self.line()
2118 yield from self.visit(child)
2120 def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
2121 """Remove a semicolon and put the other statement on a separate line."""
2122 yield from self.line()
2124 def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
2125 """End of file. Process outstanding comments and end with a newline."""
2126 yield from self.visit_default(leaf)
2127 yield from self.line()
2129 def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
2130 if not self.current_line.bracket_tracker.any_open_brackets():
2131 yield from self.line()
2132 yield from self.visit_default(leaf)
2134 def visit_factor(self, node: Node) -> Iterator[Line]:
2135 """Force parentheses between a unary op and a binary power:
2137 -2 ** 8 -> -(2 ** 8)
2139 _operator, operand = node.children
2141 operand.type == syms.power
2142 and len(operand.children) == 3
2143 and operand.children[1].type == token.DOUBLESTAR
2145 lpar = Leaf(token.LPAR, "(")
2146 rpar = Leaf(token.RPAR, ")")
2147 index = operand.remove() or 0
2148 node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
2149 yield from self.visit_default(node)
2151 def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
2152 if is_docstring(leaf) and "\\\n" not in leaf.value:
2153 # We're ignoring docstrings with backslash newline escapes because changing
2154 # indentation of those changes the AST representation of the code.
2155 prefix = get_string_prefix(leaf.value)
2156 lead_len = len(prefix) + 3
2158 indent = " " * 4 * self.current_line.depth
2159 docstring = fix_docstring(leaf.value[lead_len:tail_len], indent)
2161 if leaf.value[lead_len - 1] == docstring[0]:
2162 docstring = " " + docstring
2163 if leaf.value[tail_len + 1] == docstring[-1]:
2164 docstring = docstring + " "
2165 leaf.value = leaf.value[0:lead_len] + docstring + leaf.value[tail_len:]
2167 yield from self.visit_default(leaf)
2169 def __post_init__(self) -> None:
2170 """You are in a twisty little maze of passages."""
2171 self.current_line = Line(mode=self.mode)
2175 self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
2176 self.visit_if_stmt = partial(
2177 v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
2179 self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
2180 self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
2181 self.visit_try_stmt = partial(
2182 v, keywords={"try", "except", "else", "finally"}, parens=Ø
2184 self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
2185 self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
2186 self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
2187 self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
2188 self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
2189 self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
2190 self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
2191 self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
2192 self.visit_async_funcdef = self.visit_async_stmt
2193 self.visit_decorated = self.visit_decorators
2196 IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
2197 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
2198 OPENING_BRACKETS = set(BRACKET.keys())
2199 CLOSING_BRACKETS = set(BRACKET.values())
2200 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
2201 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
2204 def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901
2205 """Return whitespace prefix if needed for the given `leaf`.
2207 `complex_subscript` signals whether the given leaf is part of a subscription
2208 which has non-trivial arguments, like arithmetic expressions or function calls.
2216 if t in ALWAYS_NO_SPACE:
2219 if t == token.COMMENT:
2222 assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
2223 if t == token.COLON and p.type not in {
2230 prev = leaf.prev_sibling
2232 prevp = preceding_leaf(p)
2233 if not prevp or prevp.type in OPENING_BRACKETS:
2236 if t == token.COLON:
2237 if prevp.type == token.COLON:
2240 elif prevp.type != token.COMMA and not complex_subscript:
2245 if prevp.type == token.EQUAL:
2247 if prevp.parent.type in {
2255 elif prevp.parent.type == syms.typedargslist:
2256 # A bit hacky: if the equal sign has whitespace, it means we
2257 # previously found it's a typed argument. So, we're using
2261 elif prevp.type in VARARGS_SPECIALS:
2262 if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
2265 elif prevp.type == token.COLON:
2266 if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
2267 return SPACE if complex_subscript else NO
2271 and prevp.parent.type == syms.factor
2272 and prevp.type in MATH_OPERATORS
2277 prevp.type == token.RIGHTSHIFT
2279 and prevp.parent.type == syms.shift_expr
2280 and prevp.prev_sibling
2281 and prevp.prev_sibling.type == token.NAME
2282 and prevp.prev_sibling.value == "print" # type: ignore
2284 # Python 2 print chevron
2286 elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator:
2287 # no space in decorators
2290 elif prev.type in OPENING_BRACKETS:
2293 if p.type in {syms.parameters, syms.arglist}:
2294 # untyped function signatures or calls
2295 if not prev or prev.type != token.COMMA:
2298 elif p.type == syms.varargslist:
2300 if prev and prev.type != token.COMMA:
2303 elif p.type == syms.typedargslist:
2304 # typed function signatures
2308 if t == token.EQUAL:
2309 if prev.type != syms.tname:
2312 elif prev.type == token.EQUAL:
2313 # A bit hacky: if the equal sign has whitespace, it means we
2314 # previously found it's a typed argument. So, we're using that, too.
2317 elif prev.type != token.COMMA:
2320 elif p.type == syms.tname:
2323 prevp = preceding_leaf(p)
2324 if not prevp or prevp.type != token.COMMA:
2327 elif p.type == syms.trailer:
2328 # attributes and calls
2329 if t == token.LPAR or t == token.RPAR:
2334 prevp = preceding_leaf(p)
2335 if not prevp or prevp.type != token.NUMBER:
2338 elif t == token.LSQB:
2341 elif prev.type != token.COMMA:
2344 elif p.type == syms.argument:
2346 if t == token.EQUAL:
2350 prevp = preceding_leaf(p)
2351 if not prevp or prevp.type == token.LPAR:
2354 elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
2357 elif p.type == syms.decorator:
2361 elif p.type == syms.dotted_name:
2365 prevp = preceding_leaf(p)
2366 if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
2369 elif p.type == syms.classdef:
2373 if prev and prev.type == token.LPAR:
2376 elif p.type in {syms.subscript, syms.sliceop}:
2379 assert p.parent is not None, "subscripts are always parented"
2380 if p.parent.type == syms.subscriptlist:
2385 elif not complex_subscript:
2388 elif p.type == syms.atom:
2389 if prev and t == token.DOT:
2390 # dots, but not the first one.
2393 elif p.type == syms.dictsetmaker:
2395 if prev and prev.type == token.DOUBLESTAR:
2398 elif p.type in {syms.factor, syms.star_expr}:
2401 prevp = preceding_leaf(p)
2402 if not prevp or prevp.type in OPENING_BRACKETS:
2405 prevp_parent = prevp.parent
2406 assert prevp_parent is not None
2407 if prevp.type == token.COLON and prevp_parent.type in {
2413 elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
2416 elif t in {token.NAME, token.NUMBER, token.STRING}:
2419 elif p.type == syms.import_from:
2421 if prev and prev.type == token.DOT:
2424 elif t == token.NAME:
2428 if prev and prev.type == token.DOT:
2431 elif p.type == syms.sliceop:
2437 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
2438 """Return the first leaf that precedes `node`, if any."""
2440 res = node.prev_sibling
2442 if isinstance(res, Leaf):
2446 return list(res.leaves())[-1]
2455 def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
2456 """Return if the `node` and its previous siblings match types against the provided
2457 list of tokens; the provided `node`has its type matched against the last element in
2458 the list. `None` can be used as the first element to declare that the start of the
2459 list is anchored at the start of its parent's children."""
2462 if tokens[-1] is None:
2466 if node.type != tokens[-1]:
2468 return prev_siblings_are(node.prev_sibling, tokens[:-1])
2471 def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
2472 """Return the child of `ancestor` that contains `descendant`."""
2473 node: Optional[LN] = descendant
2474 while node and node.parent != ancestor:
2479 def container_of(leaf: Leaf) -> LN:
2480 """Return `leaf` or one of its ancestors that is the topmost container of it.
2482 By "container" we mean a node where `leaf` is the very first child.
2484 same_prefix = leaf.prefix
2485 container: LN = leaf
2487 parent = container.parent
2491 if parent.children[0].prefix != same_prefix:
2494 if parent.type == syms.file_input:
2497 if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
2504 def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2505 """Return the priority of the `leaf` delimiter, given a line break after it.
2507 The delimiter priorities returned here are from those delimiters that would
2508 cause a line break after themselves.
2510 Higher numbers are higher priority.
2512 if leaf.type == token.COMMA:
2513 return COMMA_PRIORITY
2518 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2519 """Return the priority of the `leaf` delimiter, given a line break before it.
2521 The delimiter priorities returned here are from those delimiters that would
2522 cause a line break before themselves.
2524 Higher numbers are higher priority.
2526 if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
2527 # * and ** might also be MATH_OPERATORS but in this case they are not.
2528 # Don't treat them as a delimiter.
2532 leaf.type == token.DOT
2534 and leaf.parent.type not in {syms.import_from, syms.dotted_name}
2535 and (previous is None or previous.type in CLOSING_BRACKETS)
2540 leaf.type in MATH_OPERATORS
2542 and leaf.parent.type not in {syms.factor, syms.star_expr}
2544 return MATH_PRIORITIES[leaf.type]
2546 if leaf.type in COMPARATORS:
2547 return COMPARATOR_PRIORITY
2550 leaf.type == token.STRING
2551 and previous is not None
2552 and previous.type == token.STRING
2554 return STRING_PRIORITY
2556 if leaf.type not in {token.NAME, token.ASYNC}:
2562 and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
2563 or leaf.type == token.ASYNC
2566 not isinstance(leaf.prev_sibling, Leaf)
2567 or leaf.prev_sibling.value != "async"
2569 return COMPREHENSION_PRIORITY
2574 and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
2576 return COMPREHENSION_PRIORITY
2578 if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
2579 return TERNARY_PRIORITY
2581 if leaf.value == "is":
2582 return COMPARATOR_PRIORITY
2587 and leaf.parent.type in {syms.comp_op, syms.comparison}
2589 previous is not None
2590 and previous.type == token.NAME
2591 and previous.value == "not"
2594 return COMPARATOR_PRIORITY
2599 and leaf.parent.type == syms.comp_op
2601 previous is not None
2602 and previous.type == token.NAME
2603 and previous.value == "is"
2606 return COMPARATOR_PRIORITY
2608 if leaf.value in LOGIC_OPERATORS and leaf.parent:
2609 return LOGIC_PRIORITY
2614 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
2615 FMT_SKIP = {"# fmt: skip", "# fmt:skip"}
2616 FMT_PASS = {*FMT_OFF, *FMT_SKIP}
2617 FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
2620 def generate_comments(leaf: LN) -> Iterator[Leaf]:
2621 """Clean the prefix of the `leaf` and generate comments from it, if any.
2623 Comments in lib2to3 are shoved into the whitespace prefix. This happens
2624 in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
2625 move because it does away with modifying the grammar to include all the
2626 possible places in which comments can be placed.
2628 The sad consequence for us though is that comments don't "belong" anywhere.
2629 This is why this function generates simple parentless Leaf objects for
2630 comments. We simply don't know what the correct parent should be.
2632 No matter though, we can live without this. We really only need to
2633 differentiate between inline and standalone comments. The latter don't
2634 share the line with any code.
2636 Inline comments are emitted as regular token.COMMENT leaves. Standalone
2637 are emitted with a fake STANDALONE_COMMENT token identifier.
2639 for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
2640 yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
2645 """Describes a piece of syntax that is a comment.
2647 It's not a :class:`blib2to3.pytree.Leaf` so that:
2649 * it can be cached (`Leaf` objects should not be reused more than once as
2650 they store their lineno, column, prefix, and parent information);
2651 * `newlines` and `consumed` fields are kept separate from the `value`. This
2652 simplifies handling of special marker comments like ``# fmt: off/on``.
2655 type: int # token.COMMENT or STANDALONE_COMMENT
2656 value: str # content of the comment
2657 newlines: int # how many newlines before the comment
2658 consumed: int # how many characters of the original leaf's prefix did we consume
2661 @lru_cache(maxsize=4096)
2662 def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
2663 """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
2664 result: List[ProtoComment] = []
2665 if not prefix or "#" not in prefix:
2671 for index, line in enumerate(re.split("\r?\n", prefix)):
2672 consumed += len(line) + 1 # adding the length of the split '\n'
2673 line = line.lstrip()
2676 if not line.startswith("#"):
2677 # Escaped newlines outside of a comment are not really newlines at
2678 # all. We treat a single-line comment following an escaped newline
2679 # as a simple trailing comment.
2680 if line.endswith("\\"):
2684 if index == ignored_lines and not is_endmarker:
2685 comment_type = token.COMMENT # simple trailing comment
2687 comment_type = STANDALONE_COMMENT
2688 comment = make_comment(line)
2691 type=comment_type, value=comment, newlines=nlines, consumed=consumed
2698 def make_comment(content: str) -> str:
2699 """Return a consistently formatted comment from the given `content` string.
2701 All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
2702 space between the hash sign and the content.
2704 If `content` didn't start with a hash sign, one is provided.
2706 content = content.rstrip()
2710 if content[0] == "#":
2711 content = content[1:]
2712 NON_BREAKING_SPACE = " "
2715 and content[0] == NON_BREAKING_SPACE
2716 and not content.lstrip().startswith("type:")
2718 content = " " + content[1:] # Replace NBSP by a simple space
2719 if content and content[0] not in " !:#'%":
2720 content = " " + content
2721 return "#" + content
2725 line: Line, mode: Mode, features: Collection[Feature] = ()
2726 ) -> Iterator[Line]:
2727 """Transform a `line`, potentially splitting it into many lines.
2729 They should fit in the allotted `line_length` but might not be able to.
2731 `features` are syntactical features that may be used in the output.
2737 line_str = line_to_string(line)
2739 def init_st(ST: Type[StringTransformer]) -> StringTransformer:
2740 """Initialize StringTransformer"""
2741 return ST(mode.line_length, mode.string_normalization)
2743 string_merge = init_st(StringMerger)
2744 string_paren_strip = init_st(StringParenStripper)
2745 string_split = init_st(StringSplitter)
2746 string_paren_wrap = init_st(StringParenWrapper)
2748 transformers: List[Transformer]
2750 not line.contains_uncollapsable_type_comments()
2751 and not line.should_split_rhs
2752 and not line.magic_trailing_comma
2754 is_line_short_enough(line, line_length=mode.line_length, line_str=line_str)
2755 or line.contains_unsplittable_type_ignore()
2757 and not (line.inside_brackets and line.contains_standalone_comments())
2759 # Only apply basic string preprocessing, since lines shouldn't be split here.
2760 if mode.experimental_string_processing:
2761 transformers = [string_merge, string_paren_strip]
2765 transformers = [left_hand_split]
2768 def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
2769 """Wraps calls to `right_hand_split`.
2771 The calls increasingly `omit` right-hand trailers (bracket pairs with
2772 content), meaning the trailers get glued together to split on another
2773 bracket pair instead.
2775 for omit in generate_trailers_to_omit(line, mode.line_length):
2777 right_hand_split(line, mode.line_length, features, omit=omit)
2779 # Note: this check is only able to figure out if the first line of the
2780 # *current* transformation fits in the line length. This is true only
2781 # for simple cases. All others require running more transforms via
2782 # `transform_line()`. This check doesn't know if those would succeed.
2783 if is_line_short_enough(lines[0], line_length=mode.line_length):
2787 # All splits failed, best effort split with no omits.
2788 # This mostly happens to multiline strings that are by definition
2789 # reported as not fitting a single line, as well as lines that contain
2790 # trailing commas (those have to be exploded).
2791 yield from right_hand_split(
2792 line, line_length=mode.line_length, features=features
2795 if mode.experimental_string_processing:
2796 if line.inside_brackets:
2802 standalone_comment_split,
2815 if line.inside_brackets:
2816 transformers = [delimiter_split, standalone_comment_split, rhs]
2818 transformers = [rhs]
2820 for transform in transformers:
2821 # We are accumulating lines in `result` because we might want to abort
2822 # mission and return the original line in the end, or attempt a different
2825 result = run_transformer(line, transform, mode, features, line_str=line_str)
2826 except CannotTransform:
2836 @dataclass # type: ignore
2837 class StringTransformer(ABC):
2839 An implementation of the Transformer protocol that relies on its
2840 subclasses overriding the template methods `do_match(...)` and
2841 `do_transform(...)`.
2843 This Transformer works exclusively on strings (for example, by merging
2846 The following sections can be found among the docstrings of each concrete
2847 StringTransformer subclass.
2850 Which requirements must be met of the given Line for this
2851 StringTransformer to be applied?
2854 If the given Line meets all of the above requirements, which string
2855 transformations can you expect to be applied to it by this
2859 What contractual agreements does this StringTransformer have with other
2860 StringTransfomers? Such collaborations should be eliminated/minimized
2861 as much as possible.
2865 normalize_strings: bool
2866 __name__ = "StringTransformer"
2869 def do_match(self, line: Line) -> TMatchResult:
2872 * Ok(string_idx) such that `line.leaves[string_idx]` is our target
2873 string, if a match was able to be made.
2875 * Err(CannotTransform), if a match was not able to be made.
2879 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
2882 * Ok(new_line) where new_line is the new transformed line.
2884 * Err(CannotTransform) if the transformation failed for some reason. The
2885 `do_match(...)` template method should usually be used to reject
2886 the form of the given Line, but in some cases it is difficult to
2887 know whether or not a Line meets the StringTransformer's
2888 requirements until the transformation is already midway.
2891 This method should NOT mutate @line directly, but it MAY mutate the
2892 Line's underlying Node structure. (WARNING: If the underlying Node
2893 structure IS altered, then this method should NOT be allowed to
2894 yield an CannotTransform after that point.)
2897 def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]:
2899 StringTransformer instances have a call signature that mirrors that of
2900 the Transformer type.
2903 CannotTransform(...) if the concrete StringTransformer class is unable
2906 # Optimization to avoid calling `self.do_match(...)` when the line does
2907 # not contain any string.
2908 if not any(leaf.type == token.STRING for leaf in line.leaves):
2909 raise CannotTransform("There are no strings in this line.")
2911 match_result = self.do_match(line)
2913 if isinstance(match_result, Err):
2914 cant_transform = match_result.err()
2915 raise CannotTransform(
2916 f"The string transformer {self.__class__.__name__} does not recognize"
2917 " this line as one that it can transform."
2918 ) from cant_transform
2920 string_idx = match_result.ok()
2922 for line_result in self.do_transform(line, string_idx):
2923 if isinstance(line_result, Err):
2924 cant_transform = line_result.err()
2925 raise CannotTransform(
2926 "StringTransformer failed while attempting to transform string."
2927 ) from cant_transform
2928 line = line_result.ok()
2934 """A custom (i.e. manual) string split.
2936 A single CustomSplit instance represents a single substring.
2939 Consider the following string:
2946 This string will correspond to the following three CustomSplit instances:
2948 CustomSplit(False, 16)
2949 CustomSplit(False, 17)
2950 CustomSplit(True, 16)
2958 class CustomSplitMapMixin:
2960 This mixin class is used to map merged strings to a sequence of
2961 CustomSplits, which will then be used to re-split the strings iff none of
2962 the resultant substrings go over the configured max line length.
2965 _Key = Tuple[StringID, str]
2966 _CUSTOM_SPLIT_MAP: Dict[_Key, Tuple[CustomSplit, ...]] = defaultdict(tuple)
2969 def _get_key(string: str) -> "CustomSplitMapMixin._Key":
2972 A unique identifier that is used internally to map @string to a
2973 group of custom splits.
2975 return (id(string), string)
2977 def add_custom_splits(
2978 self, string: str, custom_splits: Iterable[CustomSplit]
2980 """Custom Split Map Setter Method
2983 Adds a mapping from @string to the custom splits @custom_splits.
2985 key = self._get_key(string)
2986 self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
2988 def pop_custom_splits(self, string: str) -> List[CustomSplit]:
2989 """Custom Split Map Getter Method
2992 * A list of the custom splits that are mapped to @string, if any
2998 Deletes the mapping between @string and its associated custom
2999 splits (which are returned to the caller).
3001 key = self._get_key(string)
3003 custom_splits = self._CUSTOM_SPLIT_MAP[key]
3004 del self._CUSTOM_SPLIT_MAP[key]
3006 return list(custom_splits)
3008 def has_custom_splits(self, string: str) -> bool:
3011 True iff @string is associated with a set of custom splits.
3013 key = self._get_key(string)
3014 return key in self._CUSTOM_SPLIT_MAP
3017 class StringMerger(CustomSplitMapMixin, StringTransformer):
3018 """StringTransformer that merges strings together.
3021 (A) The line contains adjacent strings such that ALL of the validation checks
3022 listed in StringMerger.__validate_msg(...)'s docstring pass.
3024 (B) The line contains a string which uses line continuation backslashes.
3027 Depending on which of the two requirements above where met, either:
3029 (A) The string group associated with the target string is merged.
3031 (B) All line-continuation backslashes are removed from the target string.
3034 StringMerger provides custom split information to StringSplitter.
3037 def do_match(self, line: Line) -> TMatchResult:
3040 is_valid_index = is_valid_index_factory(LL)
3042 for (i, leaf) in enumerate(LL):
3044 leaf.type == token.STRING
3045 and is_valid_index(i + 1)
3046 and LL[i + 1].type == token.STRING
3050 if leaf.type == token.STRING and "\\\n" in leaf.value:
3053 return TErr("This line has no strings that need merging.")
3055 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
3057 rblc_result = self.__remove_backslash_line_continuation_chars(
3058 new_line, string_idx
3060 if isinstance(rblc_result, Ok):
3061 new_line = rblc_result.ok()
3063 msg_result = self.__merge_string_group(new_line, string_idx)
3064 if isinstance(msg_result, Ok):
3065 new_line = msg_result.ok()
3067 if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
3068 msg_cant_transform = msg_result.err()
3069 rblc_cant_transform = rblc_result.err()
3070 cant_transform = CannotTransform(
3071 "StringMerger failed to merge any strings in this line."
3074 # Chain the errors together using `__cause__`.
3075 msg_cant_transform.__cause__ = rblc_cant_transform
3076 cant_transform.__cause__ = msg_cant_transform
3078 yield Err(cant_transform)
3083 def __remove_backslash_line_continuation_chars(
3084 line: Line, string_idx: int
3087 Merge strings that were split across multiple lines using
3088 line-continuation backslashes.
3091 Ok(new_line), if @line contains backslash line-continuation
3094 Err(CannotTransform), otherwise.
3098 string_leaf = LL[string_idx]
3100 string_leaf.type == token.STRING
3101 and "\\\n" in string_leaf.value
3102 and not has_triple_quotes(string_leaf.value)
3105 f"String leaf {string_leaf} does not contain any backslash line"
3106 " continuation characters."
3109 new_line = line.clone()
3110 new_line.comments = line.comments.copy()
3111 append_leaves(new_line, line, LL)
3113 new_string_leaf = new_line.leaves[string_idx]
3114 new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
3118 def __merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]:
3120 Merges string group (i.e. set of adjacent strings) where the first
3121 string in the group is `line.leaves[string_idx]`.
3124 Ok(new_line), if ALL of the validation checks found in
3125 __validate_msg(...) pass.
3127 Err(CannotTransform), otherwise.
3131 is_valid_index = is_valid_index_factory(LL)
3133 vresult = self.__validate_msg(line, string_idx)
3134 if isinstance(vresult, Err):
3137 # If the string group is wrapped inside an Atom node, we must make sure
3138 # to later replace that Atom with our new (merged) string leaf.
3139 atom_node = LL[string_idx].parent
3141 # We will place BREAK_MARK in between every two substrings that we
3142 # merge. We will then later go through our final result and use the
3143 # various instances of BREAK_MARK we find to add the right values to
3144 # the custom split map.
3145 BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
3147 QUOTE = LL[string_idx].value[-1]
3149 def make_naked(string: str, string_prefix: str) -> str:
3150 """Strip @string (i.e. make it a "naked" string)
3153 * assert_is_leaf_string(@string)
3156 A string that is identical to @string except that
3157 @string_prefix has been stripped, the surrounding QUOTE
3158 characters have been removed, and any remaining QUOTE
3159 characters have been escaped.
3161 assert_is_leaf_string(string)
3163 RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
3164 naked_string = string[len(string_prefix) + 1 : -1]
3165 naked_string = re.sub(
3166 "(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
3170 # Holds the CustomSplit objects that will later be added to the custom
3174 # Temporary storage for the 'has_prefix' part of the CustomSplit objects.
3177 # Sets the 'prefix' variable. This is the prefix that the final merged
3179 next_str_idx = string_idx
3183 and is_valid_index(next_str_idx)
3184 and LL[next_str_idx].type == token.STRING
3186 prefix = get_string_prefix(LL[next_str_idx].value)
3189 # The next loop merges the string group. The final string will be
3192 # The following convenience variables are used:
3197 # NSS: naked next string
3201 next_str_idx = string_idx
3202 while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
3205 SS = LL[next_str_idx].value
3206 next_prefix = get_string_prefix(SS)
3208 # If this is an f-string group but this substring is not prefixed
3210 if "f" in prefix and "f" not in next_prefix:
3211 # Then we must escape any braces contained in this substring.
3212 SS = re.subf(r"(\{|\})", "{1}{1}", SS)
3214 NSS = make_naked(SS, next_prefix)
3216 has_prefix = bool(next_prefix)
3217 prefix_tracker.append(has_prefix)
3219 S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
3220 NS = make_naked(S, prefix)
3224 S_leaf = Leaf(token.STRING, S)
3225 if self.normalize_strings:
3226 normalize_string_quotes(S_leaf)
3228 # Fill the 'custom_splits' list with the appropriate CustomSplit objects.
3229 temp_string = S_leaf.value[len(prefix) + 1 : -1]
3230 for has_prefix in prefix_tracker:
3231 mark_idx = temp_string.find(BREAK_MARK)
3234 ), "Logic error while filling the custom string breakpoint cache."
3236 temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
3237 breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
3238 custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
3240 string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
3242 if atom_node is not None:
3243 replace_child(atom_node, string_leaf)
3245 # Build the final line ('new_line') that this method will later return.
3246 new_line = line.clone()
3247 for (i, leaf) in enumerate(LL):
3249 new_line.append(string_leaf)
3251 if string_idx <= i < string_idx + num_of_strings:
3252 for comment_leaf in line.comments_after(LL[i]):
3253 new_line.append(comment_leaf, preformatted=True)
3256 append_leaves(new_line, line, [leaf])
3258 self.add_custom_splits(string_leaf.value, custom_splits)
3262 def __validate_msg(line: Line, string_idx: int) -> TResult[None]:
3263 """Validate (M)erge (S)tring (G)roup
3265 Transform-time string validation logic for __merge_string_group(...).
3268 * Ok(None), if ALL validation checks (listed below) pass.
3270 * Err(CannotTransform), if any of the following are true:
3271 - The target string group does not contain ANY stand-alone comments.
3272 - The target string is not in a string group (i.e. it has no
3274 - The string group has more than one inline comment.
3275 - The string group has an inline comment that appears to be a pragma.
3276 - The set of all string prefixes in the string group is of
3277 length greater than one and is not equal to {"", "f"}.
3278 - The string group consists of raw strings.
3280 # We first check for "inner" stand-alone comments (i.e. stand-alone
3281 # comments that have a string leaf before them AND after them).
3284 found_sa_comment = False
3285 is_valid_index = is_valid_index_factory(line.leaves)
3286 while is_valid_index(i) and line.leaves[i].type in [
3290 if line.leaves[i].type == STANDALONE_COMMENT:
3291 found_sa_comment = True
3292 elif found_sa_comment:
3294 "StringMerger does NOT merge string groups which contain "
3295 "stand-alone comments."
3300 num_of_inline_string_comments = 0
3301 set_of_prefixes = set()
3303 for leaf in line.leaves[string_idx:]:
3304 if leaf.type != token.STRING:
3305 # If the string group is trailed by a comma, we count the
3306 # comments trailing the comma to be one of the string group's
3308 if leaf.type == token.COMMA and id(leaf) in line.comments:
3309 num_of_inline_string_comments += 1
3312 if has_triple_quotes(leaf.value):
3313 return TErr("StringMerger does NOT merge multiline strings.")
3316 prefix = get_string_prefix(leaf.value)
3318 return TErr("StringMerger does NOT merge raw strings.")
3320 set_of_prefixes.add(prefix)
3322 if id(leaf) in line.comments:
3323 num_of_inline_string_comments += 1
3324 if contains_pragma_comment(line.comments[id(leaf)]):
3325 return TErr("Cannot merge strings which have pragma comments.")
3327 if num_of_strings < 2:
3329 f"Not enough strings to merge (num_of_strings={num_of_strings})."
3332 if num_of_inline_string_comments > 1:
3334 f"Too many inline string comments ({num_of_inline_string_comments})."
3337 if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
3338 return TErr(f"Too many different prefixes ({set_of_prefixes}).")
3343 class StringParenStripper(StringTransformer):
3344 """StringTransformer that strips surrounding parentheses from strings.
3347 The line contains a string which is surrounded by parentheses and:
3348 - The target string is NOT the only argument to a function call.
3349 - The target string is NOT a "pointless" string.
3350 - If the target string contains a PERCENT, the brackets are not
3351 preceeded or followed by an operator with higher precedence than
3355 The parentheses mentioned in the 'Requirements' section are stripped.
3358 StringParenStripper has its own inherent usefulness, but it is also
3359 relied on to clean up the parentheses created by StringParenWrapper (in
3360 the event that they are no longer needed).
3363 def do_match(self, line: Line) -> TMatchResult:
3366 is_valid_index = is_valid_index_factory(LL)
3368 for (idx, leaf) in enumerate(LL):
3369 # Should be a string...
3370 if leaf.type != token.STRING:
3373 # If this is a "pointless" string...
3376 and leaf.parent.parent
3377 and leaf.parent.parent.type == syms.simple_stmt
3381 # Should be preceded by a non-empty LPAR...
3383 not is_valid_index(idx - 1)
3384 or LL[idx - 1].type != token.LPAR
3385 or is_empty_lpar(LL[idx - 1])
3389 # That LPAR should NOT be preceded by a function name or a closing
3390 # bracket (which could be a function which returns a function or a
3391 # list/dictionary that contains a function)...
3392 if is_valid_index(idx - 2) and (
3393 LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
3399 # Skip the string trailer, if one exists.
3400 string_parser = StringParser()
3401 next_idx = string_parser.parse(LL, string_idx)
3403 # if the leaves in the parsed string include a PERCENT, we need to
3404 # make sure the initial LPAR is NOT preceded by an operator with
3405 # higher or equal precedence to PERCENT
3406 if is_valid_index(idx - 2):
3407 # mypy can't quite follow unless we name this
3408 before_lpar = LL[idx - 2]
3409 if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and (
3426 # only unary PLUS/MINUS
3428 and before_lpar.parent.type == syms.factor
3429 and (before_lpar.type in {token.PLUS, token.MINUS})
3434 # Should be followed by a non-empty RPAR...
3436 is_valid_index(next_idx)
3437 and LL[next_idx].type == token.RPAR
3438 and not is_empty_rpar(LL[next_idx])
3440 # That RPAR should NOT be followed by anything with higher
3441 # precedence than PERCENT
3442 if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in {
3450 return Ok(string_idx)
3452 return TErr("This line has no strings wrapped in parens.")
3454 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
3457 string_parser = StringParser()
3458 rpar_idx = string_parser.parse(LL, string_idx)
3460 for leaf in (LL[string_idx - 1], LL[rpar_idx]):
3461 if line.comments_after(leaf):
3463 "Will not strip parentheses which have comments attached to them."
3467 new_line = line.clone()
3468 new_line.comments = line.comments.copy()
3470 append_leaves(new_line, line, LL[: string_idx - 1])
3471 except BracketMatchError:
3472 # HACK: I believe there is currently a bug somewhere in
3473 # right_hand_split() that is causing brackets to not be tracked
3474 # properly by a shared BracketTracker.
3475 append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True)
3477 string_leaf = Leaf(token.STRING, LL[string_idx].value)
3478 LL[string_idx - 1].remove()
3479 replace_child(LL[string_idx], string_leaf)
3480 new_line.append(string_leaf)
3483 new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :]
3486 LL[rpar_idx].remove()
3491 class BaseStringSplitter(StringTransformer):
3493 Abstract class for StringTransformers which transform a Line's strings by splitting
3494 them or placing them on their own lines where necessary to avoid going over
3495 the configured line length.
3498 * The target string value is responsible for the line going over the
3499 line length limit. It follows that after all of black's other line
3500 split methods have been exhausted, this line (or one of the resulting
3501 lines after all line splits are performed) would still be over the
3502 line_length limit unless we split this string.
3504 * The target string is NOT a "pointless" string (i.e. a string that has
3505 no parent or siblings).
3507 * The target string is not followed by an inline comment that appears
3510 * The target string is not a multiline (i.e. triple-quote) string.
3514 def do_splitter_match(self, line: Line) -> TMatchResult:
3516 BaseStringSplitter asks its clients to override this method instead of
3517 `StringTransformer.do_match(...)`.
3519 Follows the same protocol as `StringTransformer.do_match(...)`.
3521 Refer to `help(StringTransformer.do_match)` for more information.
3524 def do_match(self, line: Line) -> TMatchResult:
3525 match_result = self.do_splitter_match(line)
3526 if isinstance(match_result, Err):
3529 string_idx = match_result.ok()
3530 vresult = self.__validate(line, string_idx)
3531 if isinstance(vresult, Err):
3536 def __validate(self, line: Line, string_idx: int) -> TResult[None]:
3538 Checks that @line meets all of the requirements listed in this classes'
3539 docstring. Refer to `help(BaseStringSplitter)` for a detailed
3540 description of those requirements.
3543 * Ok(None), if ALL of the requirements are met.
3545 * Err(CannotTransform), if ANY of the requirements are NOT met.
3549 string_leaf = LL[string_idx]
3551 max_string_length = self.__get_max_string_length(line, string_idx)
3552 if len(string_leaf.value) <= max_string_length:
3554 "The string itself is not what is causing this line to be too long."
3557 if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
3562 f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
3566 if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
3567 line.comments[id(line.leaves[string_idx])]
3570 "Line appears to end with an inline pragma comment. Splitting the line"
3571 " could modify the pragma's behavior."
3574 if has_triple_quotes(string_leaf.value):
3575 return TErr("We cannot split multiline strings.")
3579 def __get_max_string_length(self, line: Line, string_idx: int) -> int:
3581 Calculates the max string length used when attempting to determine
3582 whether or not the target string is responsible for causing the line to
3583 go over the line length limit.
3585 WARNING: This method is tightly coupled to both StringSplitter and
3586 (especially) StringParenWrapper. There is probably a better way to
3587 accomplish what is being done here.
3590 max_string_length: such that `line.leaves[string_idx].value >
3591 max_string_length` implies that the target string IS responsible
3592 for causing this line to exceed the line length limit.
3596 is_valid_index = is_valid_index_factory(LL)
3598 # We use the shorthand "WMA4" in comments to abbreviate "We must
3599 # account for". When giving examples, we use STRING to mean some/any
3602 # Finally, we use the following convenience variables:
3604 # P: The leaf that is before the target string leaf.
3605 # N: The leaf that is after the target string leaf.
3606 # NN: The leaf that is after N.
3608 # WMA4 the whitespace at the beginning of the line.
3609 offset = line.depth * 4
3611 if is_valid_index(string_idx - 1):
3612 p_idx = string_idx - 1
3614 LL[string_idx - 1].type == token.LPAR
3615 and LL[string_idx - 1].value == ""
3618 # If the previous leaf is an empty LPAR placeholder, we should skip it.
3622 if P.type == token.PLUS:
3623 # WMA4 a space and a '+' character (e.g. `+ STRING`).
3626 if P.type == token.COMMA:
3627 # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
3630 if P.type in [token.COLON, token.EQUAL, token.NAME]:
3631 # This conditional branch is meant to handle dictionary keys,
3632 # variable assignments, 'return STRING' statement lines, and
3633 # 'else STRING' ternary expression lines.
3635 # WMA4 a single space.
3638 # WMA4 the lengths of any leaves that came before that space,
3639 # but after any closing bracket before that space.
3640 for leaf in reversed(LL[: p_idx + 1]):
3641 offset += len(str(leaf))
3642 if leaf.type in CLOSING_BRACKETS:
3645 if is_valid_index(string_idx + 1):
3646 N = LL[string_idx + 1]
3647 if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
3648 # If the next leaf is an empty RPAR placeholder, we should skip it.
3649 N = LL[string_idx + 2]
3651 if N.type == token.COMMA:
3652 # WMA4 a single comma at the end of the string (e.g `STRING,`).
3655 if is_valid_index(string_idx + 2):
3656 NN = LL[string_idx + 2]
3658 if N.type == token.DOT and NN.type == token.NAME:
3659 # This conditional branch is meant to handle method calls invoked
3660 # off of a string literal up to and including the LPAR character.
3662 # WMA4 the '.' character.
3666 is_valid_index(string_idx + 3)
3667 and LL[string_idx + 3].type == token.LPAR
3669 # WMA4 the left parenthesis character.
3672 # WMA4 the length of the method's name.
3673 offset += len(NN.value)
3675 has_comments = False
3676 for comment_leaf in line.comments_after(LL[string_idx]):
3677 if not has_comments:
3679 # WMA4 two spaces before the '#' character.
3682 # WMA4 the length of the inline comment.
3683 offset += len(comment_leaf.value)
3685 max_string_length = self.line_length - offset
3686 return max_string_length
3689 class StringSplitter(CustomSplitMapMixin, BaseStringSplitter):
3691 StringTransformer that splits "atom" strings (i.e. strings which exist on
3692 lines by themselves).
3695 * The line consists ONLY of a single string (with the exception of a
3696 '+' symbol which MAY exist at the start of the line), MAYBE a string
3697 trailer, and MAYBE a trailing comma.
3699 * All of the requirements listed in BaseStringSplitter's docstring.
3702 The string mentioned in the 'Requirements' section is split into as
3703 many substrings as necessary to adhere to the configured line length.
3705 In the final set of substrings, no substring should be smaller than
3706 MIN_SUBSTR_SIZE characters.
3708 The string will ONLY be split on spaces (i.e. each new substring should
3709 start with a space). Note that the string will NOT be split on a space
3710 which is escaped with a backslash.
3712 If the string is an f-string, it will NOT be split in the middle of an
3713 f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x
3714 else bar()} is an f-expression).
3716 If the string that is being split has an associated set of custom split
3717 records and those custom splits will NOT result in any line going over
3718 the configured line length, those custom splits are used. Otherwise the
3719 string is split as late as possible (from left-to-right) while still
3720 adhering to the transformation rules listed above.
3723 StringSplitter relies on StringMerger to construct the appropriate
3724 CustomSplit objects and add them to the custom split map.
3728 # Matches an "f-expression" (e.g. {var}) that might be found in an f-string.
3730 (?<!\{) (?:\{\{)* \{ (?!\{)
3737 (?<!\}) \} (?:\}\})* (?!\})
3740 def do_splitter_match(self, line: Line) -> TMatchResult:
3743 is_valid_index = is_valid_index_factory(LL)
3747 # The first leaf MAY be a '+' symbol...
3748 if is_valid_index(idx) and LL[idx].type == token.PLUS:
3751 # The next/first leaf MAY be an empty LPAR...
3752 if is_valid_index(idx) and is_empty_lpar(LL[idx]):
3755 # The next/first leaf MUST be a string...
3756 if not is_valid_index(idx) or LL[idx].type != token.STRING:
3757 return TErr("Line does not start with a string.")
3761 # Skip the string trailer, if one exists.
3762 string_parser = StringParser()
3763 idx = string_parser.parse(LL, string_idx)
3765 # That string MAY be followed by an empty RPAR...
3766 if is_valid_index(idx) and is_empty_rpar(LL[idx]):
3769 # That string / empty RPAR leaf MAY be followed by a comma...
3770 if is_valid_index(idx) and LL[idx].type == token.COMMA:
3773 # But no more leaves are allowed...
3774 if is_valid_index(idx):
3775 return TErr("This line does not end with a string.")
3777 return Ok(string_idx)
3779 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
3782 QUOTE = LL[string_idx].value[-1]
3784 is_valid_index = is_valid_index_factory(LL)
3785 insert_str_child = insert_str_child_factory(LL[string_idx])
3787 prefix = get_string_prefix(LL[string_idx].value)
3789 # We MAY choose to drop the 'f' prefix from substrings that don't
3790 # contain any f-expressions, but ONLY if the original f-string
3791 # contains at least one f-expression. Otherwise, we will alter the AST
3793 drop_pointless_f_prefix = ("f" in prefix) and re.search(
3794 self.RE_FEXPR, LL[string_idx].value, re.VERBOSE
3797 first_string_line = True
3798 starts_with_plus = LL[0].type == token.PLUS
3800 def line_needs_plus() -> bool:
3801 return first_string_line and starts_with_plus
3803 def maybe_append_plus(new_line: Line) -> None:
3806 If @line starts with a plus and this is the first line we are
3807 constructing, this function appends a PLUS leaf to @new_line
3808 and replaces the old PLUS leaf in the node structure. Otherwise
3809 this function does nothing.
3811 if line_needs_plus():
3812 plus_leaf = Leaf(token.PLUS, "+")
3813 replace_child(LL[0], plus_leaf)
3814 new_line.append(plus_leaf)
3817 is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA
3820 def max_last_string() -> int:
3823 The max allowed length of the string value used for the last
3824 line we will construct.
3826 result = self.line_length
3827 result -= line.depth * 4
3828 result -= 1 if ends_with_comma else 0
3829 result -= 2 if line_needs_plus() else 0
3832 # --- Calculate Max Break Index (for string value)
3833 # We start with the line length limit
3834 max_break_idx = self.line_length
3835 # The last index of a string of length N is N-1.
3837 # Leading whitespace is not present in the string value (e.g. Leaf.value).
3838 max_break_idx -= line.depth * 4
3839 if max_break_idx < 0:
3841 f"Unable to split {LL[string_idx].value} at such high of a line depth:"
3846 # Check if StringMerger registered any custom splits.
3847 custom_splits = self.pop_custom_splits(LL[string_idx].value)
3848 # We use them ONLY if none of them would produce lines that exceed the
3850 use_custom_breakpoints = bool(
3852 and all(csplit.break_idx <= max_break_idx for csplit in custom_splits)
3855 # Temporary storage for the remaining chunk of the string line that
3856 # can't fit onto the line currently being constructed.
3857 rest_value = LL[string_idx].value
3859 def more_splits_should_be_made() -> bool:
3862 True iff `rest_value` (the remaining string value from the last
3863 split), should be split again.
3865 if use_custom_breakpoints:
3866 return len(custom_splits) > 1
3868 return len(rest_value) > max_last_string()
3870 string_line_results: List[Ok[Line]] = []
3871 while more_splits_should_be_made():
3872 if use_custom_breakpoints:
3873 # Custom User Split (manual)
3874 csplit = custom_splits.pop(0)
3875 break_idx = csplit.break_idx
3877 # Algorithmic Split (automatic)
3878 max_bidx = max_break_idx - 2 if line_needs_plus() else max_break_idx
3879 maybe_break_idx = self.__get_break_idx(rest_value, max_bidx)
3880 if maybe_break_idx is None:
3881 # If we are unable to algorithmically determine a good split
3882 # and this string has custom splits registered to it, we
3883 # fall back to using them--which means we have to start
3884 # over from the beginning.
3886 rest_value = LL[string_idx].value
3887 string_line_results = []
3888 first_string_line = True
3889 use_custom_breakpoints = True
3892 # Otherwise, we stop splitting here.
3895 break_idx = maybe_break_idx
3897 # --- Construct `next_value`
3898 next_value = rest_value[:break_idx] + QUOTE
3900 # Are we allowed to try to drop a pointless 'f' prefix?
3901 drop_pointless_f_prefix
3902 # If we are, will we be successful?
3903 and next_value != self.__normalize_f_string(next_value, prefix)
3905 # If the current custom split did NOT originally use a prefix,
3906 # then `csplit.break_idx` will be off by one after removing
3910 if use_custom_breakpoints and not csplit.has_prefix
3913 next_value = rest_value[:break_idx] + QUOTE
3914 next_value = self.__normalize_f_string(next_value, prefix)
3916 # --- Construct `next_leaf`
3917 next_leaf = Leaf(token.STRING, next_value)
3918 insert_str_child(next_leaf)
3919 self.__maybe_normalize_string_quotes(next_leaf)
3921 # --- Construct `next_line`
3922 next_line = line.clone()
3923 maybe_append_plus(next_line)
3924 next_line.append(next_leaf)
3925 string_line_results.append(Ok(next_line))
3927 rest_value = prefix + QUOTE + rest_value[break_idx:]
3928 first_string_line = False
3930 yield from string_line_results
3932 if drop_pointless_f_prefix:
3933 rest_value = self.__normalize_f_string(rest_value, prefix)
3935 rest_leaf = Leaf(token.STRING, rest_value)
3936 insert_str_child(rest_leaf)
3938 # NOTE: I could not find a test case that verifies that the following
3939 # line is actually necessary, but it seems to be. Otherwise we risk
3940 # not normalizing the last substring, right?
3941 self.__maybe_normalize_string_quotes(rest_leaf)
3943 last_line = line.clone()
3944 maybe_append_plus(last_line)
3946 # If there are any leaves to the right of the target string...
3947 if is_valid_index(string_idx + 1):
3948 # We use `temp_value` here to determine how long the last line
3949 # would be if we were to append all the leaves to the right of the
3950 # target string to the last string line.
3951 temp_value = rest_value
3952 for leaf in LL[string_idx + 1 :]:
3953 temp_value += str(leaf)
3954 if leaf.type == token.LPAR:
3957 # Try to fit them all on the same line with the last substring...
3959 len(temp_value) <= max_last_string()
3960 or LL[string_idx + 1].type == token.COMMA
3962 last_line.append(rest_leaf)
3963 append_leaves(last_line, line, LL[string_idx + 1 :])
3965 # Otherwise, place the last substring on one line and everything
3966 # else on a line below that...
3968 last_line.append(rest_leaf)
3971 non_string_line = line.clone()
3972 append_leaves(non_string_line, line, LL[string_idx + 1 :])
3973 yield Ok(non_string_line)
3974 # Else the target string was the last leaf...
3976 last_line.append(rest_leaf)
3977 last_line.comments = line.comments.copy()
3980 def __get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]:
3982 This method contains the algorithm that StringSplitter uses to
3983 determine which character to split each string at.
3986 @string: The substring that we are attempting to split.
3987 @max_break_idx: The ideal break index. We will return this value if it
3988 meets all the necessary conditions. In the likely event that it
3989 doesn't we will try to find the closest index BELOW @max_break_idx
3990 that does. If that fails, we will expand our search by also
3991 considering all valid indices ABOVE @max_break_idx.
3994 * assert_is_leaf_string(@string)
3995 * 0 <= @max_break_idx < len(@string)
3998 break_idx, if an index is able to be found that meets all of the
3999 conditions listed in the 'Transformations' section of this classes'
4004 is_valid_index = is_valid_index_factory(string)
4006 assert is_valid_index(max_break_idx)
4007 assert_is_leaf_string(string)
4009 _fexpr_slices: Optional[List[Tuple[Index, Index]]] = None
4011 def fexpr_slices() -> Iterator[Tuple[Index, Index]]:
4014 All ranges of @string which, if @string were to be split there,
4015 would result in the splitting of an f-expression (which is NOT
4018 nonlocal _fexpr_slices
4020 if _fexpr_slices is None:
4022 for match in re.finditer(self.RE_FEXPR, string, re.VERBOSE):
4023 _fexpr_slices.append(match.span())
4025 yield from _fexpr_slices
4027 is_fstring = "f" in get_string_prefix(string)
4029 def breaks_fstring_expression(i: Index) -> bool:
4032 True iff returning @i would result in the splitting of an
4033 f-expression (which is NOT allowed).
4038 for (start, end) in fexpr_slices():
4039 if start <= i < end:
4044 def passes_all_checks(i: Index) -> bool:
4047 True iff ALL of the conditions listed in the 'Transformations'
4048 section of this classes' docstring would be be met by returning @i.
4050 is_space = string[i] == " "
4052 is_not_escaped = True
4054 while is_valid_index(j) and string[j] == "\\":
4055 is_not_escaped = not is_not_escaped
4059 len(string[i:]) >= self.MIN_SUBSTR_SIZE
4060 and len(string[:i]) >= self.MIN_SUBSTR_SIZE
4066 and not breaks_fstring_expression(i)
4069 # First, we check all indices BELOW @max_break_idx.
4070 break_idx = max_break_idx
4071 while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx):
4074 if not passes_all_checks(break_idx):
4075 # If that fails, we check all indices ABOVE @max_break_idx.
4077 # If we are able to find a valid index here, the next line is going
4078 # to be longer than the specified line length, but it's probably
4079 # better than doing nothing at all.
4080 break_idx = max_break_idx + 1
4081 while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx):
4084 if not is_valid_index(break_idx) or not passes_all_checks(break_idx):
4089 def __maybe_normalize_string_quotes(self, leaf: Leaf) -> None:
4090 if self.normalize_strings:
4091 normalize_string_quotes(leaf)
4093 def __normalize_f_string(self, string: str, prefix: str) -> str:
4096 * assert_is_leaf_string(@string)
4099 * If @string is an f-string that contains no f-expressions, we
4100 return a string identical to @string except that the 'f' prefix
4101 has been stripped and all double braces (i.e. '{{' or '}}') have
4102 been normalized (i.e. turned into '{' or '}').
4104 * Otherwise, we return @string.
4106 assert_is_leaf_string(string)
4108 if "f" in prefix and not re.search(self.RE_FEXPR, string, re.VERBOSE):
4109 new_prefix = prefix.replace("f", "")
4111 temp = string[len(prefix) :]
4112 temp = re.sub(r"\{\{", "{", temp)
4113 temp = re.sub(r"\}\}", "}", temp)
4116 return f"{new_prefix}{new_string}"
4121 class StringParenWrapper(CustomSplitMapMixin, BaseStringSplitter):
4123 StringTransformer that splits non-"atom" strings (i.e. strings that do not
4124 exist on lines by themselves).
4127 All of the requirements listed in BaseStringSplitter's docstring in
4128 addition to the requirements listed below:
4130 * The line is a return/yield statement, which returns/yields a string.
4132 * The line is part of a ternary expression (e.g. `x = y if cond else
4133 z`) such that the line starts with `else <string>`, where <string> is
4136 * The line is an assert statement, which ends with a string.
4138 * The line is an assignment statement (e.g. `x = <string>` or `x +=
4139 <string>`) such that the variable is being assigned the value of some
4142 * The line is a dictionary key assignment where some valid key is being
4143 assigned the value of some string.
4146 The chosen string is wrapped in parentheses and then split at the LPAR.
4148 We then have one line which ends with an LPAR and another line that
4149 starts with the chosen string. The latter line is then split again at
4150 the RPAR. This results in the RPAR (and possibly a trailing comma)
4151 being placed on its own line.
4153 NOTE: If any leaves exist to the right of the chosen string (except
4154 for a trailing comma, which would be placed after the RPAR), those
4155 leaves are placed inside the parentheses. In effect, the chosen
4156 string is not necessarily being "wrapped" by parentheses. We can,
4157 however, count on the LPAR being placed directly before the chosen
4160 In other words, StringParenWrapper creates "atom" strings. These
4161 can then be split again by StringSplitter, if necessary.
4164 In the event that a string line split by StringParenWrapper is
4165 changed such that it no longer needs to be given its own line,
4166 StringParenWrapper relies on StringParenStripper to clean up the
4167 parentheses it created.
4170 def do_splitter_match(self, line: Line) -> TMatchResult:
4174 self._return_match(LL)
4175 or self._else_match(LL)
4176 or self._assert_match(LL)
4177 or self._assign_match(LL)
4178 or self._dict_match(LL)
4181 if string_idx is not None:
4182 string_value = line.leaves[string_idx].value
4183 # If the string has no spaces...
4184 if " " not in string_value:
4185 # And will still violate the line length limit when split...
4186 max_string_length = self.line_length - ((line.depth + 1) * 4)
4187 if len(string_value) > max_string_length:
4188 # And has no associated custom splits...
4189 if not self.has_custom_splits(string_value):
4190 # Then we should NOT put this string on its own line.
4192 "We do not wrap long strings in parentheses when the"
4193 " resultant line would still be over the specified line"
4194 " length and can't be split further by StringSplitter."
4196 return Ok(string_idx)
4198 return TErr("This line does not contain any non-atomic strings.")
4201 def _return_match(LL: List[Leaf]) -> Optional[int]:
4204 string_idx such that @LL[string_idx] is equal to our target (i.e.
4205 matched) string, if this line matches the return/yield statement
4206 requirements listed in the 'Requirements' section of this classes'
4211 # If this line is apart of a return/yield statement and the first leaf
4212 # contains either the "return" or "yield" keywords...
4213 if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
4215 ].value in ["return", "yield"]:
4216 is_valid_index = is_valid_index_factory(LL)
4218 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
4219 # The next visible leaf MUST contain a string...
4220 if is_valid_index(idx) and LL[idx].type == token.STRING:
4226 def _else_match(LL: List[Leaf]) -> Optional[int]:
4229 string_idx such that @LL[string_idx] is equal to our target (i.e.
4230 matched) string, if this line matches the ternary expression
4231 requirements listed in the 'Requirements' section of this classes'
4236 # If this line is apart of a ternary expression and the first leaf
4237 # contains the "else" keyword...
4239 parent_type(LL[0]) == syms.test
4240 and LL[0].type == token.NAME
4241 and LL[0].value == "else"
4243 is_valid_index = is_valid_index_factory(LL)
4245 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
4246 # The next visible leaf MUST contain a string...
4247 if is_valid_index(idx) and LL[idx].type == token.STRING:
4253 def _assert_match(LL: List[Leaf]) -> Optional[int]:
4256 string_idx such that @LL[string_idx] is equal to our target (i.e.
4257 matched) string, if this line matches the assert statement
4258 requirements listed in the 'Requirements' section of this classes'
4263 # If this line is apart of an assert statement and the first leaf
4264 # contains the "assert" keyword...
4265 if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
4266 is_valid_index = is_valid_index_factory(LL)
4268 for (i, leaf) in enumerate(LL):
4269 # We MUST find a comma...
4270 if leaf.type == token.COMMA:
4271 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
4273 # That comma MUST be followed by a string...
4274 if is_valid_index(idx) and LL[idx].type == token.STRING:
4277 # Skip the string trailer, if one exists.
4278 string_parser = StringParser()
4279 idx = string_parser.parse(LL, string_idx)
4281 # But no more leaves are allowed...
4282 if not is_valid_index(idx):
4288 def _assign_match(LL: List[Leaf]) -> Optional[int]:
4291 string_idx such that @LL[string_idx] is equal to our target (i.e.
4292 matched) string, if this line matches the assignment statement
4293 requirements listed in the 'Requirements' section of this classes'
4298 # If this line is apart of an expression statement or is a function
4299 # argument AND the first leaf contains a variable name...
4301 parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
4302 and LL[0].type == token.NAME
4304 is_valid_index = is_valid_index_factory(LL)
4306 for (i, leaf) in enumerate(LL):
4307 # We MUST find either an '=' or '+=' symbol...
4308 if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
4309 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
4311 # That symbol MUST be followed by a string...
4312 if is_valid_index(idx) and LL[idx].type == token.STRING:
4315 # Skip the string trailer, if one exists.
4316 string_parser = StringParser()
4317 idx = string_parser.parse(LL, string_idx)
4319 # The next leaf MAY be a comma iff this line is apart
4320 # of a function argument...
4322 parent_type(LL[0]) == syms.argument
4323 and is_valid_index(idx)
4324 and LL[idx].type == token.COMMA
4328 # But no more leaves are allowed...
4329 if not is_valid_index(idx):
4335 def _dict_match(LL: List[Leaf]) -> Optional[int]:
4338 string_idx such that @LL[string_idx] is equal to our target (i.e.
4339 matched) string, if this line matches the dictionary key assignment
4340 statement requirements listed in the 'Requirements' section of this
4345 # If this line is apart of a dictionary key assignment...
4346 if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]:
4347 is_valid_index = is_valid_index_factory(LL)
4349 for (i, leaf) in enumerate(LL):
4350 # We MUST find a colon...
4351 if leaf.type == token.COLON:
4352 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
4354 # That colon MUST be followed by a string...
4355 if is_valid_index(idx) and LL[idx].type == token.STRING:
4358 # Skip the string trailer, if one exists.
4359 string_parser = StringParser()
4360 idx = string_parser.parse(LL, string_idx)
4362 # That string MAY be followed by a comma...
4363 if is_valid_index(idx) and LL[idx].type == token.COMMA:
4366 # But no more leaves are allowed...
4367 if not is_valid_index(idx):
4372 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
4375 is_valid_index = is_valid_index_factory(LL)
4376 insert_str_child = insert_str_child_factory(LL[string_idx])
4379 ends_with_comma = False
4380 if LL[comma_idx].type == token.COMMA:
4381 ends_with_comma = True
4383 leaves_to_steal_comments_from = [LL[string_idx]]
4385 leaves_to_steal_comments_from.append(LL[comma_idx])
4388 first_line = line.clone()
4389 left_leaves = LL[:string_idx]
4391 # We have to remember to account for (possibly invisible) LPAR and RPAR
4392 # leaves that already wrapped the target string. If these leaves do
4393 # exist, we will replace them with our own LPAR and RPAR leaves.
4394 old_parens_exist = False
4395 if left_leaves and left_leaves[-1].type == token.LPAR:
4396 old_parens_exist = True
4397 leaves_to_steal_comments_from.append(left_leaves[-1])
4400 append_leaves(first_line, line, left_leaves)
4402 lpar_leaf = Leaf(token.LPAR, "(")
4403 if old_parens_exist:
4404 replace_child(LL[string_idx - 1], lpar_leaf)
4406 insert_str_child(lpar_leaf)
4407 first_line.append(lpar_leaf)
4409 # We throw inline comments that were originally to the right of the
4410 # target string to the top line. They will now be shown to the right of
4412 for leaf in leaves_to_steal_comments_from:
4413 for comment_leaf in line.comments_after(leaf):
4414 first_line.append(comment_leaf, preformatted=True)
4416 yield Ok(first_line)
4418 # --- Middle (String) Line
4419 # We only need to yield one (possibly too long) string line, since the
4420 # `StringSplitter` will break it down further if necessary.
4421 string_value = LL[string_idx].value
4424 depth=line.depth + 1,
4425 inside_brackets=True,
4426 should_split_rhs=line.should_split_rhs,
4427 magic_trailing_comma=line.magic_trailing_comma,
4429 string_leaf = Leaf(token.STRING, string_value)
4430 insert_str_child(string_leaf)
4431 string_line.append(string_leaf)
4433 old_rpar_leaf = None
4434 if is_valid_index(string_idx + 1):
4435 right_leaves = LL[string_idx + 1 :]
4439 if old_parens_exist:
4441 right_leaves and right_leaves[-1].type == token.RPAR
4442 ), "Apparently, old parentheses do NOT exist?!"
4443 old_rpar_leaf = right_leaves.pop()
4445 append_leaves(string_line, line, right_leaves)
4447 yield Ok(string_line)
4450 last_line = line.clone()
4451 last_line.bracket_tracker = first_line.bracket_tracker
4453 new_rpar_leaf = Leaf(token.RPAR, ")")
4454 if old_rpar_leaf is not None:
4455 replace_child(old_rpar_leaf, new_rpar_leaf)
4457 insert_str_child(new_rpar_leaf)
4458 last_line.append(new_rpar_leaf)
4460 # If the target string ended with a comma, we place this comma to the
4461 # right of the RPAR on the last line.
4463 comma_leaf = Leaf(token.COMMA, ",")
4464 replace_child(LL[comma_idx], comma_leaf)
4465 last_line.append(comma_leaf)
4472 A state machine that aids in parsing a string's "trailer", which can be
4473 either non-existent, an old-style formatting sequence (e.g. `% varX` or `%
4474 (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX,
4477 NOTE: A new StringParser object MUST be instantiated for each string
4478 trailer we need to parse.
4481 We shall assume that `line` equals the `Line` object that corresponds
4482 to the following line of python code:
4484 x = "Some {}.".format("String") + some_other_string
4487 Furthermore, we will assume that `string_idx` is some index such that:
4489 assert line.leaves[string_idx].value == "Some {}."
4492 The following code snippet then holds:
4494 string_parser = StringParser()
4495 idx = string_parser.parse(line.leaves, string_idx)
4496 assert line.leaves[idx].type == token.PLUS
4502 # String Parser States
4512 # Lookup Table for Next State
4513 _goto: Dict[Tuple[ParserState, NodeType], ParserState] = {
4514 # A string trailer may start with '.' OR '%'.
4515 (START, token.DOT): DOT,
4516 (START, token.PERCENT): PERCENT,
4517 (START, DEFAULT_TOKEN): DONE,
4518 # A '.' MUST be followed by an attribute or method name.
4519 (DOT, token.NAME): NAME,
4520 # A method name MUST be followed by an '(', whereas an attribute name
4521 # is the last symbol in the string trailer.
4522 (NAME, token.LPAR): LPAR,
4523 (NAME, DEFAULT_TOKEN): DONE,
4524 # A '%' symbol can be followed by an '(' or a single argument (e.g. a
4525 # string or variable name).
4526 (PERCENT, token.LPAR): LPAR,
4527 (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG,
4528 # If a '%' symbol is followed by a single argument, that argument is
4529 # the last leaf in the string trailer.
4530 (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE,
4531 # If present, a ')' symbol is the last symbol in a string trailer.
4532 # (NOTE: LPARS and nested RPARS are not included in this lookup table,
4533 # since they are treated as a special case by the parsing logic in this
4534 # classes' implementation.)
4535 (RPAR, DEFAULT_TOKEN): DONE,
4538 def __init__(self) -> None:
4539 self._state = self.START
4540 self._unmatched_lpars = 0
4542 def parse(self, leaves: List[Leaf], string_idx: int) -> int:
4545 * @leaves[@string_idx].type == token.STRING
4548 The index directly after the last leaf which is apart of the string
4549 trailer, if a "trailer" exists.
4551 @string_idx + 1, if no string "trailer" exists.
4553 assert leaves[string_idx].type == token.STRING
4555 idx = string_idx + 1
4556 while idx < len(leaves) and self._next_state(leaves[idx]):
4560 def _next_state(self, leaf: Leaf) -> bool:
4563 * On the first call to this function, @leaf MUST be the leaf that
4564 was directly after the string leaf in question (e.g. if our target
4565 string is `line.leaves[i]` then the first call to this method must
4566 be `line.leaves[i + 1]`).
4567 * On the next call to this function, the leaf parameter passed in
4568 MUST be the leaf directly following @leaf.
4571 True iff @leaf is apart of the string's trailer.
4573 # We ignore empty LPAR or RPAR leaves.
4574 if is_empty_par(leaf):
4577 next_token = leaf.type
4578 if next_token == token.LPAR:
4579 self._unmatched_lpars += 1
4581 current_state = self._state
4583 # The LPAR parser state is a special case. We will return True until we
4584 # find the matching RPAR token.
4585 if current_state == self.LPAR:
4586 if next_token == token.RPAR:
4587 self._unmatched_lpars -= 1
4588 if self._unmatched_lpars == 0:
4589 self._state = self.RPAR
4590 # Otherwise, we use a lookup table to determine the next state.
4592 # If the lookup table matches the current state to the next
4593 # token, we use the lookup table.
4594 if (current_state, next_token) in self._goto:
4595 self._state = self._goto[current_state, next_token]
4597 # Otherwise, we check if a the current state was assigned a
4599 if (current_state, self.DEFAULT_TOKEN) in self._goto:
4600 self._state = self._goto[current_state, self.DEFAULT_TOKEN]
4601 # If no default has been assigned, then this parser has a logic
4604 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
4606 if self._state == self.DONE:
4612 def TErr(err_msg: str) -> Err[CannotTransform]:
4615 Convenience function used when working with the TResult type.
4617 cant_transform = CannotTransform(err_msg)
4618 return Err(cant_transform)
4621 def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
4624 True iff one of the comments in @comment_list is a pragma used by one
4625 of the more common static analysis tools for python (e.g. mypy, flake8,
4628 for comment in comment_list:
4629 if comment.value.startswith(("# type:", "# noqa", "# pylint:")):
4635 def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
4637 Factory for a convenience function that is used to orphan @string_leaf
4638 and then insert multiple new leaves into the same part of the node
4639 structure that @string_leaf had originally occupied.
4642 Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N =
4643 string_leaf.parent`. Assume the node `N` has the following
4650 Leaf(STRING, '"foo"'),
4654 We then run the code snippet shown below.
4656 insert_str_child = insert_str_child_factory(string_leaf)
4658 lpar = Leaf(token.LPAR, '(')
4659 insert_str_child(lpar)
4661 bar = Leaf(token.STRING, '"bar"')
4662 insert_str_child(bar)
4664 rpar = Leaf(token.RPAR, ')')
4665 insert_str_child(rpar)
4668 After which point, it follows that `string_leaf.parent is None` and
4669 the node `N` now has the following structure:
4676 Leaf(STRING, '"bar"'),
4681 string_parent = string_leaf.parent
4682 string_child_idx = string_leaf.remove()
4684 def insert_str_child(child: LN) -> None:
4685 nonlocal string_child_idx
4687 assert string_parent is not None
4688 assert string_child_idx is not None
4690 string_parent.insert_child(string_child_idx, child)
4691 string_child_idx += 1
4693 return insert_str_child
4696 def has_triple_quotes(string: str) -> bool:
4699 True iff @string starts with three quotation characters.
4701 raw_string = string.lstrip(STRING_PREFIX_CHARS)
4702 return raw_string[:3] in {'"""', "'''"}
4705 def parent_type(node: Optional[LN]) -> Optional[NodeType]:
4708 @node.parent.type, if @node is not None and has a parent.
4712 if node is None or node.parent is None:
4715 return node.parent.type
4718 def is_empty_par(leaf: Leaf) -> bool:
4719 return is_empty_lpar(leaf) or is_empty_rpar(leaf)
4722 def is_empty_lpar(leaf: Leaf) -> bool:
4723 return leaf.type == token.LPAR and leaf.value == ""
4726 def is_empty_rpar(leaf: Leaf) -> bool:
4727 return leaf.type == token.RPAR and leaf.value == ""
4730 def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
4736 is_valid_index = is_valid_index_factory(my_list)
4738 assert is_valid_index(0)
4739 assert is_valid_index(2)
4741 assert not is_valid_index(3)
4742 assert not is_valid_index(-1)
4746 def is_valid_index(idx: int) -> bool:
4749 True iff @idx is positive AND seq[@idx] does NOT raise an
4752 return 0 <= idx < len(seq)
4754 return is_valid_index
4757 def line_to_string(line: Line) -> str:
4758 """Returns the string representation of @line.
4760 WARNING: This is known to be computationally expensive.
4762 return str(line).strip("\n")
4766 new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
4769 Append leaves (taken from @old_line) to @new_line, making sure to fix the
4770 underlying Node structure where appropriate.
4772 All of the leaves in @leaves are duplicated. The duplicates are then
4773 appended to @new_line and used to replace their originals in the underlying
4774 Node structure. Any comments attached to the old leaves are reattached to
4778 set(@leaves) is a subset of set(@old_line.leaves).
4780 for old_leaf in leaves:
4781 new_leaf = Leaf(old_leaf.type, old_leaf.value)
4782 replace_child(old_leaf, new_leaf)
4783 new_line.append(new_leaf, preformatted=preformatted)
4785 for comment_leaf in old_line.comments_after(old_leaf):
4786 new_line.append(comment_leaf, preformatted=True)
4789 def replace_child(old_child: LN, new_child: LN) -> None:
4792 * If @old_child.parent is set, replace @old_child with @new_child in
4793 @old_child's underlying Node structure.
4795 * Otherwise, this function does nothing.
4797 parent = old_child.parent
4801 child_idx = old_child.remove()
4802 if child_idx is not None:
4803 parent.insert_child(child_idx, new_child)
4806 def get_string_prefix(string: str) -> str:
4809 * assert_is_leaf_string(@string)
4812 @string's prefix (e.g. '', 'r', 'f', or 'rf').
4814 assert_is_leaf_string(string)
4818 while string[prefix_idx] in STRING_PREFIX_CHARS:
4819 prefix += string[prefix_idx].lower()
4825 def assert_is_leaf_string(string: str) -> None:
4827 Checks the pre-condition that @string has the format that you would expect
4828 of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==
4829 token.STRING`. A more precise description of the pre-conditions that are
4830 checked are listed below.
4833 * @string starts with either ', ", <prefix>', or <prefix>" where
4834 `set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.
4835 * @string ends with a quote character (' or ").
4838 AssertionError(...) if the pre-conditions listed above are not
4841 dquote_idx = string.find('"')
4842 squote_idx = string.find("'")
4843 if -1 in [dquote_idx, squote_idx]:
4844 quote_idx = max(dquote_idx, squote_idx)
4846 quote_idx = min(squote_idx, dquote_idx)
4849 0 <= quote_idx < len(string) - 1
4850 ), f"{string!r} is missing a starting quote character (' or \")."
4851 assert string[-1] in (
4854 ), f"{string!r} is missing an ending quote character (' or \")."
4855 assert set(string[:quote_idx]).issubset(
4856 set(STRING_PREFIX_CHARS)
4857 ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}."
4860 def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]:
4861 """Split line into many lines, starting with the first matching bracket pair.
4863 Note: this usually looks weird, only use this for function definitions.
4864 Prefer RHS otherwise. This is why this function is not symmetrical with
4865 :func:`right_hand_split` which also handles optional parentheses.
4867 tail_leaves: List[Leaf] = []
4868 body_leaves: List[Leaf] = []
4869 head_leaves: List[Leaf] = []
4870 current_leaves = head_leaves
4871 matching_bracket: Optional[Leaf] = None
4872 for leaf in line.leaves:
4874 current_leaves is body_leaves
4875 and leaf.type in CLOSING_BRACKETS
4876 and leaf.opening_bracket is matching_bracket
4878 current_leaves = tail_leaves if body_leaves else head_leaves
4879 current_leaves.append(leaf)
4880 if current_leaves is head_leaves:
4881 if leaf.type in OPENING_BRACKETS:
4882 matching_bracket = leaf
4883 current_leaves = body_leaves
4884 if not matching_bracket:
4885 raise CannotSplit("No brackets found")
4887 head = bracket_split_build_line(head_leaves, line, matching_bracket)
4888 body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
4889 tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
4890 bracket_split_succeeded_or_raise(head, body, tail)
4891 for result in (head, body, tail):
4896 def right_hand_split(
4899 features: Collection[Feature] = (),
4900 omit: Collection[LeafID] = (),
4901 ) -> Iterator[Line]:
4902 """Split line into many lines, starting with the last matching bracket pair.
4904 If the split was by optional parentheses, attempt splitting without them, too.
4905 `omit` is a collection of closing bracket IDs that shouldn't be considered for
4908 Note: running this function modifies `bracket_depth` on the leaves of `line`.
4910 tail_leaves: List[Leaf] = []
4911 body_leaves: List[Leaf] = []
4912 head_leaves: List[Leaf] = []
4913 current_leaves = tail_leaves
4914 opening_bracket: Optional[Leaf] = None
4915 closing_bracket: Optional[Leaf] = None
4916 for leaf in reversed(line.leaves):
4917 if current_leaves is body_leaves:
4918 if leaf is opening_bracket:
4919 current_leaves = head_leaves if body_leaves else tail_leaves
4920 current_leaves.append(leaf)
4921 if current_leaves is tail_leaves:
4922 if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
4923 opening_bracket = leaf.opening_bracket
4924 closing_bracket = leaf
4925 current_leaves = body_leaves
4926 if not (opening_bracket and closing_bracket and head_leaves):
4927 # If there is no opening or closing_bracket that means the split failed and
4928 # all content is in the tail. Otherwise, if `head_leaves` are empty, it means
4929 # the matching `opening_bracket` wasn't available on `line` anymore.
4930 raise CannotSplit("No brackets found")
4932 tail_leaves.reverse()
4933 body_leaves.reverse()
4934 head_leaves.reverse()
4935 head = bracket_split_build_line(head_leaves, line, opening_bracket)
4936 body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
4937 tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
4938 bracket_split_succeeded_or_raise(head, body, tail)
4940 Feature.FORCE_OPTIONAL_PARENTHESES not in features
4941 # the opening bracket is an optional paren
4942 and opening_bracket.type == token.LPAR
4943 and not opening_bracket.value
4944 # the closing bracket is an optional paren
4945 and closing_bracket.type == token.RPAR
4946 and not closing_bracket.value
4947 # it's not an import (optional parens are the only thing we can split on
4948 # in this case; attempting a split without them is a waste of time)
4949 and not line.is_import
4950 # there are no standalone comments in the body
4951 and not body.contains_standalone_comments(0)
4952 # and we can actually remove the parens
4953 and can_omit_invisible_parens(body, line_length, omit_on_explode=omit)
4955 omit = {id(closing_bracket), *omit}
4957 yield from right_hand_split(line, line_length, features=features, omit=omit)
4963 or is_line_short_enough(body, line_length=line_length)
4966 "Splitting failed, body is still too long and can't be split."
4969 elif head.contains_multiline_strings() or tail.contains_multiline_strings():
4971 "The current optional pair of parentheses is bound to fail to"
4972 " satisfy the splitting algorithm because the head or the tail"
4973 " contains multiline strings which by definition never fit one"
4977 ensure_visible(opening_bracket)
4978 ensure_visible(closing_bracket)
4979 for result in (head, body, tail):
4984 def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
4985 """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
4987 Do nothing otherwise.
4989 A left- or right-hand split is based on a pair of brackets. Content before
4990 (and including) the opening bracket is left on one line, content inside the
4991 brackets is put on a separate line, and finally content starting with and
4992 following the closing bracket is put on a separate line.
4994 Those are called `head`, `body`, and `tail`, respectively. If the split
4995 produced the same line (all content in `head`) or ended up with an empty `body`
4996 and the `tail` is just the closing bracket, then it's considered failed.
4998 tail_len = len(str(tail).strip())
5001 raise CannotSplit("Splitting brackets produced the same line")
5005 f"Splitting brackets on an empty body to save {tail_len} characters is"
5010 def bracket_split_build_line(
5011 leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
5013 """Return a new line with given `leaves` and respective comments from `original`.
5015 If `is_body` is True, the result line is one-indented inside brackets and as such
5016 has its first leaf's prefix normalized and a trailing comma added when expected.
5018 result = Line(mode=original.mode, depth=original.depth)
5020 result.inside_brackets = True
5023 # Since body is a new indent level, remove spurious leading whitespace.
5024 normalize_prefix(leaves[0], inside_brackets=True)
5025 # Ensure a trailing comma for imports and standalone function arguments, but
5026 # be careful not to add one after any comments or within type annotations.
5029 and opening_bracket.value == "("
5030 and not any(leaf.type == token.COMMA for leaf in leaves)
5033 if original.is_import or no_commas:
5034 for i in range(len(leaves) - 1, -1, -1):
5035 if leaves[i].type == STANDALONE_COMMENT:
5038 if leaves[i].type != token.COMMA:
5039 new_comma = Leaf(token.COMMA, ",")
5040 leaves.insert(i + 1, new_comma)
5045 result.append(leaf, preformatted=True)
5046 for comment_after in original.comments_after(leaf):
5047 result.append(comment_after, preformatted=True)
5048 if is_body and should_split_line(result, opening_bracket):
5049 result.should_split_rhs = True
5053 def dont_increase_indentation(split_func: Transformer) -> Transformer:
5054 """Normalize prefix of the first leaf in every line returned by `split_func`.
5056 This is a decorator over relevant split functions.
5060 def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
5061 for line in split_func(line, features):
5062 normalize_prefix(line.leaves[0], inside_brackets=True)
5065 return split_wrapper
5068 @dont_increase_indentation
5069 def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
5070 """Split according to delimiters of the highest priority.
5072 If the appropriate Features are given, the split will add trailing commas
5073 also in function signatures and calls that contain `*` and `**`.
5076 last_leaf = line.leaves[-1]
5078 raise CannotSplit("Line empty")
5080 bt = line.bracket_tracker
5082 delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
5084 raise CannotSplit("No delimiters found")
5086 if delimiter_priority == DOT_PRIORITY:
5087 if bt.delimiter_count_with_priority(delimiter_priority) == 1:
5088 raise CannotSplit("Splitting a single attribute from its owner looks wrong")
5090 current_line = Line(
5091 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5093 lowest_depth = sys.maxsize
5094 trailing_comma_safe = True
5096 def append_to_line(leaf: Leaf) -> Iterator[Line]:
5097 """Append `leaf` to current line or to new line if appending impossible."""
5098 nonlocal current_line
5100 current_line.append_safe(leaf, preformatted=True)
5104 current_line = Line(
5105 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5107 current_line.append(leaf)
5109 for leaf in line.leaves:
5110 yield from append_to_line(leaf)
5112 for comment_after in line.comments_after(leaf):
5113 yield from append_to_line(comment_after)
5115 lowest_depth = min(lowest_depth, leaf.bracket_depth)
5116 if leaf.bracket_depth == lowest_depth:
5117 if is_vararg(leaf, within={syms.typedargslist}):
5118 trailing_comma_safe = (
5119 trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
5121 elif is_vararg(leaf, within={syms.arglist, syms.argument}):
5122 trailing_comma_safe = (
5123 trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
5126 leaf_priority = bt.delimiters.get(id(leaf))
5127 if leaf_priority == delimiter_priority:
5130 current_line = Line(
5131 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5136 and delimiter_priority == COMMA_PRIORITY
5137 and current_line.leaves[-1].type != token.COMMA
5138 and current_line.leaves[-1].type != STANDALONE_COMMENT
5140 new_comma = Leaf(token.COMMA, ",")
5141 current_line.append(new_comma)
5145 @dont_increase_indentation
5146 def standalone_comment_split(
5147 line: Line, features: Collection[Feature] = ()
5148 ) -> Iterator[Line]:
5149 """Split standalone comments from the rest of the line."""
5150 if not line.contains_standalone_comments(0):
5151 raise CannotSplit("Line does not have any standalone comments")
5153 current_line = Line(
5154 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5157 def append_to_line(leaf: Leaf) -> Iterator[Line]:
5158 """Append `leaf` to current line or to new line if appending impossible."""
5159 nonlocal current_line
5161 current_line.append_safe(leaf, preformatted=True)
5165 current_line = Line(
5166 line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5168 current_line.append(leaf)
5170 for leaf in line.leaves:
5171 yield from append_to_line(leaf)
5173 for comment_after in line.comments_after(leaf):
5174 yield from append_to_line(comment_after)
5180 def is_import(leaf: Leaf) -> bool:
5181 """Return True if the given leaf starts an import statement."""
5188 (v == "import" and p and p.type == syms.import_name)
5189 or (v == "from" and p and p.type == syms.import_from)
5194 def is_type_comment(leaf: Leaf, suffix: str = "") -> bool:
5195 """Return True if the given leaf is a special comment.
5196 Only returns true for type comments for now."""
5199 return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix)
5202 def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
5203 """Leave existing extra newlines if not `inside_brackets`. Remove everything
5206 Note: don't use backslashes for formatting or you'll lose your voting rights.
5208 if not inside_brackets:
5209 spl = leaf.prefix.split("#")
5210 if "\\" not in spl[0]:
5211 nl_count = spl[-1].count("\n")
5214 leaf.prefix = "\n" * nl_count
5220 def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
5221 """Make all string prefixes lowercase.
5223 If remove_u_prefix is given, also removes any u prefix from the string.
5225 Note: Mutates its argument.
5227 match = re.match(r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", leaf.value, re.DOTALL)
5228 assert match is not None, f"failed to match string {leaf.value!r}"
5229 orig_prefix = match.group(1)
5230 new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u")
5232 new_prefix = new_prefix.replace("u", "")
5233 leaf.value = f"{new_prefix}{match.group(2)}"
5236 def normalize_string_quotes(leaf: Leaf) -> None:
5237 """Prefer double quotes but only if it doesn't cause more escaping.
5239 Adds or removes backslashes as appropriate. Doesn't parse and fix
5240 strings nested in f-strings (yet).
5242 Note: Mutates its argument.
5244 value = leaf.value.lstrip(STRING_PREFIX_CHARS)
5245 if value[:3] == '"""':
5248 elif value[:3] == "'''":
5251 elif value[0] == '"':
5257 first_quote_pos = leaf.value.find(orig_quote)
5258 if first_quote_pos == -1:
5259 return # There's an internal error
5261 prefix = leaf.value[:first_quote_pos]
5262 unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
5263 escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
5264 escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
5265 body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
5266 if "r" in prefix.casefold():
5267 if unescaped_new_quote.search(body):
5268 # There's at least one unescaped new_quote in this raw string
5269 # so converting is impossible
5272 # Do not introduce or remove backslashes in raw strings
5275 # remove unnecessary escapes
5276 new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
5277 if body != new_body:
5278 # Consider the string without unnecessary escapes as the original
5280 leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
5281 new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
5282 new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
5283 if "f" in prefix.casefold():
5284 matches = re.findall(
5286 (?:[^{]|^)\{ # start of the string or a non-{ followed by a single {
5287 ([^{].*?) # contents of the brackets except if begins with {{
5288 \}(?:[^}]|$) # A } followed by end of the string or a non-}
5295 # Do not introduce backslashes in interpolated expressions
5298 if new_quote == '"""' and new_body[-1:] == '"':
5300 new_body = new_body[:-1] + '\\"'
5301 orig_escape_count = body.count("\\")
5302 new_escape_count = new_body.count("\\")
5303 if new_escape_count > orig_escape_count:
5304 return # Do not introduce more escaping
5306 if new_escape_count == orig_escape_count and orig_quote == '"':
5307 return # Prefer double quotes
5309 leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
5312 def normalize_numeric_literal(leaf: Leaf) -> None:
5313 """Normalizes numeric (float, int, and complex) literals.
5315 All letters used in the representation are normalized to lowercase (except
5316 in Python 2 long literals).
5318 text = leaf.value.lower()
5319 if text.startswith(("0o", "0b")):
5320 # Leave octal and binary literals alone.
5322 elif text.startswith("0x"):
5323 text = format_hex(text)
5325 text = format_scientific_notation(text)
5326 elif text.endswith(("j", "l")):
5327 text = format_long_or_complex_number(text)
5329 text = format_float_or_int_string(text)
5333 def format_hex(text: str) -> str:
5335 Formats a hexadecimal string like "0x12b3"
5337 Uses lowercase because of similarity between "B" and "8", which
5338 can cause security issues.
5339 see: https://github.com/psf/black/issues/1692
5342 before, after = text[:2], text[2:]
5343 return f"{before}{after.lower()}"
5346 def format_scientific_notation(text: str) -> str:
5347 """Formats a numeric string utilizing scentific notation"""
5348 before, after = text.split("e")
5350 if after.startswith("-"):
5353 elif after.startswith("+"):
5355 before = format_float_or_int_string(before)
5356 return f"{before}e{sign}{after}"
5359 def format_long_or_complex_number(text: str) -> str:
5360 """Formats a long or complex string like `10L` or `10j`"""
5363 # Capitalize in "2L" because "l" looks too similar to "1".
5366 return f"{format_float_or_int_string(number)}{suffix}"
5369 def format_float_or_int_string(text: str) -> str:
5370 """Formats a float string like "1.0"."""
5374 before, after = text.split(".")
5375 return f"{before or 0}.{after or 0}"
5378 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
5379 """Make existing optional parentheses invisible or create new ones.
5381 `parens_after` is a set of string leaf values immediately after which parens
5384 Standardizes on visible parentheses for single-element tuples, and keeps
5385 existing visible parentheses for other tuples and generator expressions.
5387 for pc in list_comments(node.prefix, is_endmarker=False):
5388 if pc.value in FMT_OFF:
5389 # This `node` has a prefix with `# fmt: off`, don't mess with parens.
5392 for index, child in enumerate(list(node.children)):
5393 # Fixes a bug where invisible parens are not properly stripped from
5394 # assignment statements that contain type annotations.
5395 if isinstance(child, Node) and child.type == syms.annassign:
5396 normalize_invisible_parens(child, parens_after=parens_after)
5398 # Add parentheses around long tuple unpacking in assignments.
5401 and isinstance(child, Node)
5402 and child.type == syms.testlist_star_expr
5407 if child.type == syms.atom:
5408 if maybe_make_parens_invisible_in_atom(child, parent=node):
5409 wrap_in_parentheses(node, child, visible=False)
5410 elif is_one_tuple(child):
5411 wrap_in_parentheses(node, child, visible=True)
5412 elif node.type == syms.import_from:
5413 # "import from" nodes store parentheses directly as part of
5415 if child.type == token.LPAR:
5416 # make parentheses invisible
5417 child.value = "" # type: ignore
5418 node.children[-1].value = "" # type: ignore
5419 elif child.type != token.STAR:
5420 # insert invisible parentheses
5421 node.insert_child(index, Leaf(token.LPAR, ""))
5422 node.append_child(Leaf(token.RPAR, ""))
5425 elif not (isinstance(child, Leaf) and is_multiline_string(child)):
5426 wrap_in_parentheses(node, child, visible=False)
5428 check_lpar = isinstance(child, Leaf) and child.value in parens_after
5431 def normalize_fmt_off(node: Node) -> None:
5432 """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
5435 try_again = convert_one_fmt_off_pair(node)
5438 def convert_one_fmt_off_pair(node: Node) -> bool:
5439 """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
5441 Returns True if a pair was converted.
5443 for leaf in node.leaves():
5444 previous_consumed = 0
5445 for comment in list_comments(leaf.prefix, is_endmarker=False):
5446 if comment.value not in FMT_PASS:
5447 previous_consumed = comment.consumed
5449 # We only want standalone comments. If there's no previous leaf or
5450 # the previous leaf is indentation, it's a standalone comment in
5452 if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT:
5453 prev = preceding_leaf(leaf)
5455 if comment.value in FMT_OFF and prev.type not in WHITESPACE:
5457 if comment.value in FMT_SKIP and prev.type in WHITESPACE:
5460 ignored_nodes = list(generate_ignored_nodes(leaf, comment))
5461 if not ignored_nodes:
5464 first = ignored_nodes[0] # Can be a container node with the `leaf`.
5465 parent = first.parent
5466 prefix = first.prefix
5467 first.prefix = prefix[comment.consumed :]
5468 hidden_value = "".join(str(n) for n in ignored_nodes)
5469 if comment.value in FMT_OFF:
5470 hidden_value = comment.value + "\n" + hidden_value
5471 if comment.value in FMT_SKIP:
5472 hidden_value += " " + comment.value
5473 if hidden_value.endswith("\n"):
5474 # That happens when one of the `ignored_nodes` ended with a NEWLINE
5475 # leaf (possibly followed by a DEDENT).
5476 hidden_value = hidden_value[:-1]
5477 first_idx: Optional[int] = None
5478 for ignored in ignored_nodes:
5479 index = ignored.remove()
5480 if first_idx is None:
5482 assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
5483 assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
5484 parent.insert_child(
5489 prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
5497 def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]:
5498 """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
5500 If comment is skip, returns leaf only.
5501 Stops at the end of the block.
5503 container: Optional[LN] = container_of(leaf)
5504 if comment.value in FMT_SKIP:
5505 prev_sibling = leaf.prev_sibling
5506 if comment.value in leaf.prefix and prev_sibling is not None:
5507 leaf.prefix = leaf.prefix.replace(comment.value, "")
5508 siblings = [prev_sibling]
5510 "\n" not in prev_sibling.prefix
5511 and prev_sibling.prev_sibling is not None
5513 prev_sibling = prev_sibling.prev_sibling
5514 siblings.insert(0, prev_sibling)
5515 for sibling in siblings:
5517 elif leaf.parent is not None:
5520 while container is not None and container.type != token.ENDMARKER:
5521 if is_fmt_on(container):
5524 # fix for fmt: on in children
5525 if contains_fmt_on_at_column(container, leaf.column):
5526 for child in container.children:
5527 if contains_fmt_on_at_column(child, leaf.column):
5532 container = container.next_sibling
5535 def is_fmt_on(container: LN) -> bool:
5536 """Determine whether formatting is switched on within a container.
5537 Determined by whether the last `# fmt:` comment is `on` or `off`.
5540 for comment in list_comments(container.prefix, is_endmarker=False):
5541 if comment.value in FMT_ON:
5543 elif comment.value in FMT_OFF:
5548 def contains_fmt_on_at_column(container: LN, column: int) -> bool:
5549 """Determine if children at a given column have formatting switched on."""
5550 for child in container.children:
5552 isinstance(child, Node)
5553 and first_leaf_column(child) == column
5554 or isinstance(child, Leaf)
5555 and child.column == column
5557 if is_fmt_on(child):
5563 def first_leaf_column(node: Node) -> Optional[int]:
5564 """Returns the column of the first leaf child of a node."""
5565 for child in node.children:
5566 if isinstance(child, Leaf):
5571 def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
5572 """If it's safe, make the parens in the atom `node` invisible, recursively.
5573 Additionally, remove repeated, adjacent invisible parens from the atom `node`
5574 as they are redundant.
5576 Returns whether the node should itself be wrapped in invisible parentheses.
5581 node.type != syms.atom
5582 or is_empty_tuple(node)
5583 or is_one_tuple(node)
5584 or (is_yield(node) and parent.type != syms.expr_stmt)
5585 or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
5589 if is_walrus_assignment(node):
5590 if parent.type in [syms.annassign, syms.expr_stmt]:
5593 first = node.children[0]
5594 last = node.children[-1]
5595 if first.type == token.LPAR and last.type == token.RPAR:
5596 middle = node.children[1]
5597 # make parentheses invisible
5598 first.value = "" # type: ignore
5599 last.value = "" # type: ignore
5600 maybe_make_parens_invisible_in_atom(middle, parent=parent)
5602 if is_atom_with_invisible_parens(middle):
5603 # Strip the invisible parens from `middle` by replacing
5604 # it with the child in-between the invisible parens
5605 middle.replace(middle.children[1])
5612 def is_atom_with_invisible_parens(node: LN) -> bool:
5613 """Given a `LN`, determines whether it's an atom `node` with invisible
5614 parens. Useful in dedupe-ing and normalizing parens.
5616 if isinstance(node, Leaf) or node.type != syms.atom:
5619 first, last = node.children[0], node.children[-1]
5621 isinstance(first, Leaf)
5622 and first.type == token.LPAR
5623 and first.value == ""
5624 and isinstance(last, Leaf)
5625 and last.type == token.RPAR
5626 and last.value == ""
5630 def is_empty_tuple(node: LN) -> bool:
5631 """Return True if `node` holds an empty tuple."""
5633 node.type == syms.atom
5634 and len(node.children) == 2
5635 and node.children[0].type == token.LPAR
5636 and node.children[1].type == token.RPAR
5640 def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
5641 """Returns `wrapped` if `node` is of the shape ( wrapped ).
5643 Parenthesis can be optional. Returns None otherwise"""
5644 if len(node.children) != 3:
5647 lpar, wrapped, rpar = node.children
5648 if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
5654 def first_child_is_arith(node: Node) -> bool:
5655 """Whether first child is an arithmetic or a binary arithmetic expression"""
5662 return bool(node.children and node.children[0].type in expr_types)
5665 def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
5666 """Wrap `child` in parentheses.
5668 This replaces `child` with an atom holding the parentheses and the old
5669 child. That requires moving the prefix.
5671 If `visible` is False, the leaves will be valueless (and thus invisible).
5673 lpar = Leaf(token.LPAR, "(" if visible else "")
5674 rpar = Leaf(token.RPAR, ")" if visible else "")
5675 prefix = child.prefix
5677 index = child.remove() or 0
5678 new_child = Node(syms.atom, [lpar, child, rpar])
5679 new_child.prefix = prefix
5680 parent.insert_child(index, new_child)
5683 def is_one_tuple(node: LN) -> bool:
5684 """Return True if `node` holds a tuple with one element, with or without parens."""
5685 if node.type == syms.atom:
5686 gexp = unwrap_singleton_parenthesis(node)
5687 if gexp is None or gexp.type != syms.testlist_gexp:
5690 return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
5693 node.type in IMPLICIT_TUPLE
5694 and len(node.children) == 2
5695 and node.children[1].type == token.COMMA
5699 def is_walrus_assignment(node: LN) -> bool:
5700 """Return True iff `node` is of the shape ( test := test )"""
5701 inner = unwrap_singleton_parenthesis(node)
5702 return inner is not None and inner.type == syms.namedexpr_test
5705 def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool:
5706 """Return True iff `node` is a trailer valid in a simple decorator"""
5707 return node.type == syms.trailer and (
5709 len(node.children) == 2
5710 and node.children[0].type == token.DOT
5711 and node.children[1].type == token.NAME
5713 # last trailer can be arguments
5716 and len(node.children) == 3
5717 and node.children[0].type == token.LPAR
5718 # and node.children[1].type == syms.argument
5719 and node.children[2].type == token.RPAR
5724 def is_simple_decorator_expression(node: LN) -> bool:
5725 """Return True iff `node` could be a 'dotted name' decorator
5727 This function takes the node of the 'namedexpr_test' of the new decorator
5728 grammar and test if it would be valid under the old decorator grammar.
5730 The old grammar was: decorator: @ dotted_name [arguments] NEWLINE
5731 The new grammar is : decorator: @ namedexpr_test NEWLINE
5733 if node.type == token.NAME:
5735 if node.type == syms.power:
5738 node.children[0].type == token.NAME
5739 and all(map(is_simple_decorator_trailer, node.children[1:-1]))
5741 len(node.children) < 2
5742 or is_simple_decorator_trailer(node.children[-1], last=True)
5748 def is_yield(node: LN) -> bool:
5749 """Return True if `node` holds a `yield` or `yield from` expression."""
5750 if node.type == syms.yield_expr:
5753 if node.type == token.NAME and node.value == "yield": # type: ignore
5756 if node.type != syms.atom:
5759 if len(node.children) != 3:
5762 lpar, expr, rpar = node.children
5763 if lpar.type == token.LPAR and rpar.type == token.RPAR:
5764 return is_yield(expr)
5769 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
5770 """Return True if `leaf` is a star or double star in a vararg or kwarg.
5772 If `within` includes VARARGS_PARENTS, this applies to function signatures.
5773 If `within` includes UNPACKING_PARENTS, it applies to right hand-side
5774 extended iterable unpacking (PEP 3132) and additional unpacking
5775 generalizations (PEP 448).
5777 if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
5781 if p.type == syms.star_expr:
5782 # Star expressions are also used as assignment targets in extended
5783 # iterable unpacking (PEP 3132). See what its parent is instead.
5789 return p.type in within
5792 def is_multiline_string(leaf: Leaf) -> bool:
5793 """Return True if `leaf` is a multiline string that actually spans many lines."""
5794 return has_triple_quotes(leaf.value) and "\n" in leaf.value
5797 def is_stub_suite(node: Node) -> bool:
5798 """Return True if `node` is a suite with a stub body."""
5800 len(node.children) != 4
5801 or node.children[0].type != token.NEWLINE
5802 or node.children[1].type != token.INDENT
5803 or node.children[3].type != token.DEDENT
5807 return is_stub_body(node.children[2])
5810 def is_stub_body(node: LN) -> bool:
5811 """Return True if `node` is a simple statement containing an ellipsis."""
5812 if not isinstance(node, Node) or node.type != syms.simple_stmt:
5815 if len(node.children) != 2:
5818 child = node.children[0]
5820 child.type == syms.atom
5821 and len(child.children) == 3
5822 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
5826 def max_delimiter_priority_in_atom(node: LN) -> Priority:
5827 """Return maximum delimiter priority inside `node`.
5829 This is specific to atoms with contents contained in a pair of parentheses.
5830 If `node` isn't an atom or there are no enclosing parentheses, returns 0.
5832 if node.type != syms.atom:
5835 first = node.children[0]
5836 last = node.children[-1]
5837 if not (first.type == token.LPAR and last.type == token.RPAR):
5840 bt = BracketTracker()
5841 for c in node.children[1:-1]:
5842 if isinstance(c, Leaf):
5845 for leaf in c.leaves():
5848 return bt.max_delimiter_priority()
5854 def ensure_visible(leaf: Leaf) -> None:
5855 """Make sure parentheses are visible.
5857 They could be invisible as part of some statements (see
5858 :func:`normalize_invisible_parens` and :func:`visit_import_from`).
5860 if leaf.type == token.LPAR:
5862 elif leaf.type == token.RPAR:
5866 def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
5867 """Should `line` be immediately split with `delimiter_split()` after RHS?"""
5869 if not (opening_bracket.parent and opening_bracket.value in "[{("):
5872 # We're essentially checking if the body is delimited by commas and there's more
5873 # than one of them (we're excluding the trailing comma and if the delimiter priority
5874 # is still commas, that means there's more).
5876 trailing_comma = False
5878 last_leaf = line.leaves[-1]
5879 if last_leaf.type == token.COMMA:
5880 trailing_comma = True
5881 exclude.add(id(last_leaf))
5882 max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
5883 except (IndexError, ValueError):
5886 return max_priority == COMMA_PRIORITY and (
5887 (line.mode.magic_trailing_comma and trailing_comma)
5888 # always explode imports
5889 or opening_bracket.parent.type in {syms.atom, syms.import_from}
5893 def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool:
5894 """Return True if content between `opening` and `closing` looks like a one-tuple."""
5895 if opening.type != token.LPAR and closing.type != token.RPAR:
5898 depth = closing.bracket_depth + 1
5899 for _opening_index, leaf in enumerate(leaves):
5904 raise LookupError("Opening paren not found in `leaves`")
5908 for leaf in leaves[_opening_index:]:
5912 bracket_depth = leaf.bracket_depth
5913 if bracket_depth == depth and leaf.type == token.COMMA:
5915 if leaf.parent and leaf.parent.type in {
5925 def get_features_used(node: Node) -> Set[Feature]:
5926 """Return a set of (relatively) new Python features used in this file.
5928 Currently looking for:
5930 - underscores in numeric literals;
5931 - trailing commas after * or ** in function signatures and calls;
5932 - positional only arguments in function signatures and lambdas;
5933 - assignment expression;
5934 - relaxed decorator syntax;
5936 features: Set[Feature] = set()
5937 for n in node.pre_order():
5938 if n.type == token.STRING:
5939 value_head = n.value[:2] # type: ignore
5940 if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
5941 features.add(Feature.F_STRINGS)
5943 elif n.type == token.NUMBER:
5944 if "_" in n.value: # type: ignore
5945 features.add(Feature.NUMERIC_UNDERSCORES)
5947 elif n.type == token.SLASH:
5948 if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}:
5949 features.add(Feature.POS_ONLY_ARGUMENTS)
5951 elif n.type == token.COLONEQUAL:
5952 features.add(Feature.ASSIGNMENT_EXPRESSIONS)
5954 elif n.type == syms.decorator:
5955 if len(n.children) > 1 and not is_simple_decorator_expression(
5958 features.add(Feature.RELAXED_DECORATORS)
5961 n.type in {syms.typedargslist, syms.arglist}
5963 and n.children[-1].type == token.COMMA
5965 if n.type == syms.typedargslist:
5966 feature = Feature.TRAILING_COMMA_IN_DEF
5968 feature = Feature.TRAILING_COMMA_IN_CALL
5970 for ch in n.children:
5971 if ch.type in STARS:
5972 features.add(feature)
5974 if ch.type == syms.argument:
5975 for argch in ch.children:
5976 if argch.type in STARS:
5977 features.add(feature)
5982 def detect_target_versions(node: Node) -> Set[TargetVersion]:
5983 """Detect the version to target based on the nodes used."""
5984 features = get_features_used(node)
5986 version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
5990 def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
5991 """Generate sets of closing bracket IDs that should be omitted in a RHS.
5993 Brackets can be omitted if the entire trailer up to and including
5994 a preceding closing bracket fits in one line.
5996 Yielded sets are cumulative (contain results of previous yields, too). First
5997 set is empty, unless the line should explode, in which case bracket pairs until
5998 the one that needs to explode are omitted.
6001 omit: Set[LeafID] = set()
6002 if not line.magic_trailing_comma:
6005 length = 4 * line.depth
6006 opening_bracket: Optional[Leaf] = None
6007 closing_bracket: Optional[Leaf] = None
6008 inner_brackets: Set[LeafID] = set()
6009 for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
6010 length += leaf_length
6011 if length > line_length:
6014 has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
6015 if leaf.type == STANDALONE_COMMENT or has_inline_comment:
6019 if leaf is opening_bracket:
6020 opening_bracket = None
6021 elif leaf.type in CLOSING_BRACKETS:
6022 prev = line.leaves[index - 1] if index > 0 else None
6025 and prev.type == token.COMMA
6026 and not is_one_tuple_between(
6027 leaf.opening_bracket, leaf, line.leaves
6030 # Never omit bracket pairs with trailing commas.
6031 # We need to explode on those.
6034 inner_brackets.add(id(leaf))
6035 elif leaf.type in CLOSING_BRACKETS:
6036 prev = line.leaves[index - 1] if index > 0 else None
6037 if prev and prev.type in OPENING_BRACKETS:
6038 # Empty brackets would fail a split so treat them as "inner"
6039 # brackets (e.g. only add them to the `omit` set if another
6040 # pair of brackets was good enough.
6041 inner_brackets.add(id(leaf))
6045 omit.add(id(closing_bracket))
6046 omit.update(inner_brackets)
6047 inner_brackets.clear()
6052 and prev.type == token.COMMA
6053 and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves)
6055 # Never omit bracket pairs with trailing commas.
6056 # We need to explode on those.
6060 opening_bracket = leaf.opening_bracket
6061 closing_bracket = leaf
6064 def get_future_imports(node: Node) -> Set[str]:
6065 """Return a set of __future__ imports in the file."""
6066 imports: Set[str] = set()
6068 def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
6069 for child in children:
6070 if isinstance(child, Leaf):
6071 if child.type == token.NAME:
6074 elif child.type == syms.import_as_name:
6075 orig_name = child.children[0]
6076 assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
6077 assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
6078 yield orig_name.value
6080 elif child.type == syms.import_as_names:
6081 yield from get_imports_from_children(child.children)
6084 raise AssertionError("Invalid syntax parsing imports")
6086 for child in node.children:
6087 if child.type != syms.simple_stmt:
6090 first_child = child.children[0]
6091 if isinstance(first_child, Leaf):
6092 # Continue looking if we see a docstring; otherwise stop.
6094 len(child.children) == 2
6095 and first_child.type == token.STRING
6096 and child.children[1].type == token.NEWLINE
6102 elif first_child.type == syms.import_from:
6103 module_name = first_child.children[1]
6104 if not isinstance(module_name, Leaf) or module_name.value != "__future__":
6107 imports |= set(get_imports_from_children(first_child.children[3:]))
6115 def get_gitignore(root: Path) -> PathSpec:
6116 """ Return a PathSpec matching gitignore content if present."""
6117 gitignore = root / ".gitignore"
6118 lines: List[str] = []
6119 if gitignore.is_file():
6120 with gitignore.open() as gf:
6121 lines = gf.readlines()
6122 return PathSpec.from_lines("gitwildmatch", lines)
6125 def normalize_path_maybe_ignore(
6126 path: Path, root: Path, report: "Report"
6128 """Normalize `path`. May return `None` if `path` was ignored.
6130 `report` is where "path ignored" output goes.
6133 abspath = path if path.is_absolute() else Path.cwd() / path
6134 normalized_path = abspath.resolve().relative_to(root).as_posix()
6135 except OSError as e:
6136 report.path_ignored(path, f"cannot be read because {e}")
6140 if path.is_symlink():
6141 report.path_ignored(path, f"is a symbolic link that points outside {root}")
6146 return normalized_path
6149 def path_is_excluded(
6150 normalized_path: str,
6151 pattern: Optional[Pattern[str]],
6153 match = pattern.search(normalized_path) if pattern else None
6154 return bool(match and match.group(0))
6157 def gen_python_files(
6158 paths: Iterable[Path],
6160 include: Optional[Pattern[str]],
6161 exclude: Pattern[str],
6162 extend_exclude: Optional[Pattern[str]],
6163 force_exclude: Optional[Pattern[str]],
6165 gitignore: PathSpec,
6166 ) -> Iterator[Path]:
6167 """Generate all files under `path` whose paths are not excluded by the
6168 `exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
6169 but are included by the `include` regex.
6171 Symbolic links pointing outside of the `root` directory are ignored.
6173 `report` is where output about exclusions goes.
6175 assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
6177 normalized_path = normalize_path_maybe_ignore(child, root, report)
6178 if normalized_path is None:
6181 # First ignore files matching .gitignore
6182 if gitignore.match_file(normalized_path):
6183 report.path_ignored(child, "matches the .gitignore file content")
6186 # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
6187 normalized_path = "/" + normalized_path
6189 normalized_path += "/"
6191 if path_is_excluded(normalized_path, exclude):
6192 report.path_ignored(child, "matches the --exclude regular expression")
6195 if path_is_excluded(normalized_path, extend_exclude):
6196 report.path_ignored(
6197 child, "matches the --extend-exclude regular expression"
6201 if path_is_excluded(normalized_path, force_exclude):
6202 report.path_ignored(child, "matches the --force-exclude regular expression")
6206 yield from gen_python_files(
6217 elif child.is_file():
6218 include_match = include.search(normalized_path) if include else True
6224 def find_project_root(srcs: Tuple[str, ...]) -> Path:
6225 """Return a directory containing .git, .hg, or pyproject.toml.
6227 That directory will be a common parent of all files and directories
6230 If no directory in the tree contains a marker that would specify it's the
6231 project root, the root of the file system is returned.
6234 return Path("/").resolve()
6236 path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
6238 # A list of lists of parents for each 'src'. 'src' is included as a
6239 # "parent" of itself if it is a directory
6241 list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
6245 set.intersection(*(set(parents) for parents in src_parents)),
6246 key=lambda path: path.parts,
6249 for directory in (common_base, *common_base.parents):
6250 if (directory / ".git").exists():
6253 if (directory / ".hg").is_dir():
6256 if (directory / "pyproject.toml").is_file():
6263 def find_user_pyproject_toml() -> Path:
6264 r"""Return the path to the top-level user configuration for black.
6266 This looks for ~\.black on Windows and ~/.config/black on Linux and other
6269 if sys.platform == "win32":
6271 user_config_path = Path.home() / ".black"
6273 config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
6274 user_config_path = Path(config_root).expanduser() / "black"
6275 return user_config_path.resolve()
6280 """Provides a reformatting counter. Can be rendered with `str(report)`."""
6285 verbose: bool = False
6286 change_count: int = 0
6288 failure_count: int = 0
6290 def done(self, src: Path, changed: Changed) -> None:
6291 """Increment the counter for successful reformatting. Write out a message."""
6292 if changed is Changed.YES:
6293 reformatted = "would reformat" if self.check or self.diff else "reformatted"
6294 if self.verbose or not self.quiet:
6295 out(f"{reformatted} {src}")
6296 self.change_count += 1
6299 if changed is Changed.NO:
6300 msg = f"{src} already well formatted, good job."
6302 msg = f"{src} wasn't modified on disk since last run."
6303 out(msg, bold=False)
6304 self.same_count += 1
6306 def failed(self, src: Path, message: str) -> None:
6307 """Increment the counter for failed reformatting. Write out a message."""
6308 err(f"error: cannot format {src}: {message}")
6309 self.failure_count += 1
6311 def path_ignored(self, path: Path, message: str) -> None:
6313 out(f"{path} ignored: {message}", bold=False)
6316 def return_code(self) -> int:
6317 """Return the exit code that the app should use.
6319 This considers the current state of changed files and failures:
6320 - if there were any failures, return 123;
6321 - if any files were changed and --check is being used, return 1;
6322 - otherwise return 0.
6324 # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
6325 # 126 we have special return codes reserved by the shell.
6326 if self.failure_count:
6329 elif self.change_count and self.check:
6334 def __str__(self) -> str:
6335 """Render a color report of the current state.
6337 Use `click.unstyle` to remove colors.
6339 if self.check or self.diff:
6340 reformatted = "would be reformatted"
6341 unchanged = "would be left unchanged"
6342 failed = "would fail to reformat"
6344 reformatted = "reformatted"
6345 unchanged = "left unchanged"
6346 failed = "failed to reformat"
6348 if self.change_count:
6349 s = "s" if self.change_count > 1 else ""
6351 click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
6354 s = "s" if self.same_count > 1 else ""
6355 report.append(f"{self.same_count} file{s} {unchanged}")
6356 if self.failure_count:
6357 s = "s" if self.failure_count > 1 else ""
6359 click.style(f"{self.failure_count} file{s} {failed}", fg="red")
6361 return ", ".join(report) + "."
6364 def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
6365 filename = "<unknown>"
6366 if sys.version_info >= (3, 8):
6367 # TODO: support Python 4+ ;)
6368 for minor_version in range(sys.version_info[1], 4, -1):
6370 return ast.parse(src, filename, feature_version=(3, minor_version))
6374 for feature_version in (7, 6):
6376 return ast3.parse(src, filename, feature_version=feature_version)
6379 if ast27.__name__ == "ast":
6381 "The requested source code has invalid Python 3 syntax.\n"
6382 "If you are trying to format Python 2 files please reinstall Black"
6383 " with the 'python2' extra: `python3 -m pip install black[python2]`."
6385 return ast27.parse(src)
6388 def _fixup_ast_constants(
6389 node: Union[ast.AST, ast3.AST, ast27.AST]
6390 ) -> Union[ast.AST, ast3.AST, ast27.AST]:
6391 """Map ast nodes deprecated in 3.8 to Constant."""
6392 if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
6393 return ast.Constant(value=node.s)
6395 if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
6396 return ast.Constant(value=node.n)
6398 if isinstance(node, (ast.NameConstant, ast3.NameConstant)):
6399 return ast.Constant(value=node.value)
6405 node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0
6407 """Simple visitor generating strings to compare ASTs by content."""
6409 node = _fixup_ast_constants(node)
6411 yield f"{' ' * depth}{node.__class__.__name__}("
6413 for field in sorted(node._fields): # noqa: F402
6414 # TypeIgnore has only one field 'lineno' which breaks this comparison
6415 type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
6416 if sys.version_info >= (3, 8):
6417 type_ignore_classes += (ast.TypeIgnore,)
6418 if isinstance(node, type_ignore_classes):
6422 value = getattr(node, field)
6423 except AttributeError:
6426 yield f"{' ' * (depth+1)}{field}="
6428 if isinstance(value, list):
6430 # Ignore nested tuples within del statements, because we may insert
6431 # parentheses and they change the AST.
6434 and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
6435 and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
6437 for item in item.elts:
6438 yield from _stringify_ast(item, depth + 2)
6440 elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
6441 yield from _stringify_ast(item, depth + 2)
6443 elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)):
6444 yield from _stringify_ast(value, depth + 2)
6447 # Constant strings may be indented across newlines, if they are
6448 # docstrings; fold spaces after newlines when comparing. Similarly,
6449 # trailing and leading space may be removed.
6451 isinstance(node, ast.Constant)
6452 and field == "value"
6453 and isinstance(value, str)
6455 normalized = re.sub(r" *\n[ \t]*", "\n", value).strip()
6458 yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}"
6460 yield f"{' ' * depth}) # /{node.__class__.__name__}"
6463 def assert_equivalent(src: str, dst: str) -> None:
6464 """Raise AssertionError if `src` and `dst` aren't equivalent."""
6466 src_ast = parse_ast(src)
6467 except Exception as exc:
6468 raise AssertionError(
6469 "cannot use --safe with this file; failed to parse source file. AST"
6470 f" error message: {exc}"
6474 dst_ast = parse_ast(dst)
6475 except Exception as exc:
6476 log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
6477 raise AssertionError(
6478 f"INTERNAL ERROR: Black produced invalid code: {exc}. Please report a bug"
6479 " on https://github.com/psf/black/issues. This invalid output might be"
6483 src_ast_str = "\n".join(_stringify_ast(src_ast))
6484 dst_ast_str = "\n".join(_stringify_ast(dst_ast))
6485 if src_ast_str != dst_ast_str:
6486 log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
6487 raise AssertionError(
6488 "INTERNAL ERROR: Black produced code that is not equivalent to the"
6489 " source. Please report a bug on https://github.com/psf/black/issues. "
6490 f" This diff might be helpful: {log}"
6494 def assert_stable(src: str, dst: str, mode: Mode) -> None:
6495 """Raise AssertionError if `dst` reformats differently the second time."""
6496 newdst = format_str(dst, mode=mode)
6500 diff(src, dst, "source", "first pass"),
6501 diff(dst, newdst, "first pass", "second pass"),
6503 raise AssertionError(
6504 "INTERNAL ERROR: Black produced different code on the second pass of the"
6505 " formatter. Please report a bug on https://github.com/psf/black/issues."
6506 f" This diff might be helpful: {log}"
6510 @mypyc_attr(patchable=True)
6511 def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str:
6512 """Dump `output` to a temporary file. Return path to the file."""
6513 with tempfile.NamedTemporaryFile(
6514 mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
6516 for lines in output:
6518 if ensure_final_newline and lines and lines[-1] != "\n":
6524 def nullcontext() -> Iterator[None]:
6525 """Return an empty context manager.
6527 To be used like `nullcontext` in Python 3.7.
6532 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
6533 """Return a unified diff string between strings `a` and `b`."""
6536 a_lines = [line for line in a.splitlines(keepends=True)]
6537 b_lines = [line for line in b.splitlines(keepends=True)]
6539 for line in difflib.unified_diff(
6540 a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5
6542 # Work around https://bugs.python.org/issue2142
6543 # See https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html
6544 if line[-1] == "\n":
6545 diff_lines.append(line)
6547 diff_lines.append(line + "\n")
6548 diff_lines.append("\\ No newline at end of file\n")
6549 return "".join(diff_lines)
6552 def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None:
6553 """asyncio signal handler that cancels all `tasks` and reports to stderr."""
6559 def shutdown(loop: asyncio.AbstractEventLoop) -> None:
6560 """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
6562 if sys.version_info[:2] >= (3, 7):
6563 all_tasks = asyncio.all_tasks
6565 all_tasks = asyncio.Task.all_tasks
6566 # This part is borrowed from asyncio/runners.py in Python 3.7b2.
6567 to_cancel = [task for task in all_tasks(loop) if not task.done()]
6571 for task in to_cancel:
6573 loop.run_until_complete(
6574 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
6577 # `concurrent.futures.Future` objects cannot be cancelled once they
6578 # are already running. There might be some when the `shutdown()` happened.
6579 # Silence their logger's spew about the event loop being closed.
6580 cf_logger = logging.getLogger("concurrent.futures")
6581 cf_logger.setLevel(logging.CRITICAL)
6585 def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
6586 """Replace `regex` with `replacement` twice on `original`.
6588 This is used by string normalization to perform replaces on
6589 overlapping matches.
6591 return regex.sub(replacement, regex.sub(replacement, original))
6594 def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
6595 """Compile a regular expression string in `regex`.
6597 If it contains newlines, use verbose mode.
6600 regex = "(?x)" + regex
6601 compiled: Pattern[str] = re.compile(regex)
6605 def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
6606 """Like `reversed(enumerate(sequence))` if that were possible."""
6607 index = len(sequence) - 1
6608 for element in reversed(sequence):
6609 yield (index, element)
6613 def enumerate_with_length(
6614 line: Line, reversed: bool = False
6615 ) -> Iterator[Tuple[Index, Leaf, int]]:
6616 """Return an enumeration of leaves with their length.
6618 Stops prematurely on multiline strings and standalone comments.
6621 Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
6622 enumerate_reversed if reversed else enumerate,
6624 for index, leaf in op(line.leaves):
6625 length = len(leaf.prefix) + len(leaf.value)
6626 if "\n" in leaf.value:
6627 return # Multiline strings, we can't continue.
6629 for comment in line.comments_after(leaf):
6630 length += len(comment.value)
6632 yield index, leaf, length
6635 def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
6636 """Return True if `line` is no longer than `line_length`.
6638 Uses the provided `line_str` rendering, if any, otherwise computes a new one.
6641 line_str = line_to_string(line)
6643 len(line_str) <= line_length
6644 and "\n" not in line_str # multiline strings
6645 and not line.contains_standalone_comments()
6649 def can_be_split(line: Line) -> bool:
6650 """Return False if the line cannot be split *for sure*.
6652 This is not an exhaustive search but a cheap heuristic that we can use to
6653 avoid some unfortunate formattings (mostly around wrapping unsplittable code
6654 in unnecessary parentheses).
6656 leaves = line.leaves
6660 if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
6664 for leaf in leaves[-2::-1]:
6665 if leaf.type in OPENING_BRACKETS:
6666 if next.type not in CLOSING_BRACKETS:
6670 elif leaf.type == token.DOT:
6672 elif leaf.type == token.NAME:
6673 if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
6676 elif leaf.type not in CLOSING_BRACKETS:
6679 if dot_count > 1 and call_count > 1:
6685 def can_omit_invisible_parens(
6688 omit_on_explode: Collection[LeafID] = (),
6690 """Does `line` have a shape safe to reformat without optional parens around it?
6692 Returns True for only a subset of potentially nice looking formattings but
6693 the point is to not return false positives that end up producing lines that
6696 bt = line.bracket_tracker
6697 if not bt.delimiters:
6698 # Without delimiters the optional parentheses are useless.
6701 max_priority = bt.max_delimiter_priority()
6702 if bt.delimiter_count_with_priority(max_priority) > 1:
6703 # With more than one delimiter of a kind the optional parentheses read better.
6706 if max_priority == DOT_PRIORITY:
6707 # A single stranded method call doesn't require optional parentheses.
6710 assert len(line.leaves) >= 2, "Stranded delimiter"
6712 # With a single delimiter, omit if the expression starts or ends with
6714 first = line.leaves[0]
6715 second = line.leaves[1]
6716 if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
6717 if _can_omit_opening_paren(line, first=first, line_length=line_length):
6720 # Note: we are not returning False here because a line might have *both*
6721 # a leading opening bracket and a trailing closing bracket. If the
6722 # opening bracket doesn't match our rule, maybe the closing will.
6724 penultimate = line.leaves[-2]
6725 last = line.leaves[-1]
6726 if line.magic_trailing_comma:
6728 penultimate, last = last_two_except(line.leaves, omit=omit_on_explode)
6730 # Turns out we'd omit everything. We cannot skip the optional parentheses.
6734 last.type == token.RPAR
6735 or last.type == token.RBRACE
6737 # don't use indexing for omitting optional parentheses;
6739 last.type == token.RSQB
6741 and last.parent.type != syms.trailer
6744 if penultimate.type in OPENING_BRACKETS:
6745 # Empty brackets don't help.
6748 if is_multiline_string(first):
6749 # Additional wrapping of a multiline string in this situation is
6753 if line.magic_trailing_comma and penultimate.type == token.COMMA:
6754 # The rightmost non-omitted bracket pair is the one we want to explode on.
6757 if _can_omit_closing_paren(line, last=last, line_length=line_length):
6763 def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:
6764 """See `can_omit_invisible_parens`."""
6766 length = 4 * line.depth
6768 for _index, leaf, leaf_length in enumerate_with_length(line):
6769 if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
6772 length += leaf_length
6773 if length > line_length:
6776 if leaf.type in OPENING_BRACKETS:
6777 # There are brackets we can further split on.
6781 # checked the entire string and line length wasn't exceeded
6782 if len(line.leaves) == _index + 1:
6788 def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:
6789 """See `can_omit_invisible_parens`."""
6790 length = 4 * line.depth
6791 seen_other_brackets = False
6792 for _index, leaf, leaf_length in enumerate_with_length(line):
6793 length += leaf_length
6794 if leaf is last.opening_bracket:
6795 if seen_other_brackets or length <= line_length:
6798 elif leaf.type in OPENING_BRACKETS:
6799 # There are brackets we can further split on.
6800 seen_other_brackets = True
6805 def last_two_except(leaves: List[Leaf], omit: Collection[LeafID]) -> Tuple[Leaf, Leaf]:
6806 """Return (penultimate, last) leaves skipping brackets in `omit` and contents."""
6809 for leaf in reversed(leaves):
6811 if leaf is stop_after:
6818 if id(leaf) in omit:
6819 stop_after = leaf.opening_bracket
6823 raise LookupError("Last two leaves were also skipped")
6826 def run_transformer(
6828 transform: Transformer,
6830 features: Collection[Feature],
6835 line_str = line_to_string(line)
6836 result: List[Line] = []
6837 for transformed_line in transform(line, features):
6838 if str(transformed_line).strip("\n") == line_str:
6839 raise CannotTransform("Line transformer returned an unchanged result")
6841 result.extend(transform_line(transformed_line, mode=mode, features=features))
6844 transform.__name__ == "rhs"
6845 and line.bracket_tracker.invisible
6846 and not any(bracket.value for bracket in line.bracket_tracker.invisible)
6847 and not line.contains_multiline_strings()
6848 and not result[0].contains_uncollapsable_type_comments()
6849 and not result[0].contains_unsplittable_type_ignore()
6850 and not is_line_short_enough(result[0], line_length=mode.line_length)
6854 line_copy = line.clone()
6855 append_leaves(line_copy, line, line.leaves)
6856 features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES}
6857 second_opinion = run_transformer(
6858 line_copy, transform, mode, features_fop, line_str=line_str
6861 is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion
6863 result = second_opinion
6867 def get_cache_file(mode: Mode) -> Path:
6868 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
6871 def read_cache(mode: Mode) -> Cache:
6872 """Read the cache if it exists and is well formed.
6874 If it is not well formed, the call to write_cache later should resolve the issue.
6876 cache_file = get_cache_file(mode)
6877 if not cache_file.exists():
6880 with cache_file.open("rb") as fobj:
6882 cache: Cache = pickle.load(fobj)
6883 except (pickle.UnpicklingError, ValueError):
6889 def get_cache_info(path: Path) -> CacheInfo:
6890 """Return the information used to check if a file is already formatted or not."""
6892 return stat.st_mtime, stat.st_size
6895 def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
6896 """Split an iterable of paths in `sources` into two sets.
6898 The first contains paths of files that modified on disk or are not in the
6899 cache. The other contains paths to non-modified files.
6901 todo, done = set(), set()
6903 res_src = src.resolve()
6904 if cache.get(str(res_src)) != get_cache_info(res_src):
6911 def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:
6912 """Update the cache file."""
6913 cache_file = get_cache_file(mode)
6915 CACHE_DIR.mkdir(parents=True, exist_ok=True)
6918 **{str(src.resolve()): get_cache_info(src) for src in sources},
6920 with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
6921 pickle.dump(new_cache, f, protocol=4)
6922 os.replace(f.name, cache_file)
6927 def patch_click() -> None:
6928 """Make Click not crash.
6930 On certain misconfigured environments, Python 3 selects the ASCII encoding as the
6931 default which restricts paths that it can access during the lifetime of the
6932 application. Click refuses to work in this scenario by raising a RuntimeError.
6934 In case of Black the likelihood that non-ASCII characters are going to be used in
6935 file paths is minimal since it's Python source code. Moreover, this crash was
6936 spurious on Python 3.7 thanks to PEP 538 and PEP 540.
6939 from click import core
6940 from click import _unicodefun # type: ignore
6941 except ModuleNotFoundError:
6944 for module in (core, _unicodefun):
6945 if hasattr(module, "_verify_python3_env"):
6946 module._verify_python3_env = lambda: None
6949 def patched_main() -> None:
6955 def is_docstring(leaf: Leaf) -> bool:
6956 if not is_multiline_string(leaf):
6957 # For the purposes of docstring re-indentation, we don't need to do anything
6958 # with single-line docstrings.
6961 if prev_siblings_are(
6962 leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt]
6966 # Multiline docstring on the same line as the `def`.
6967 if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]):
6968 # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python
6969 # grammar. We're safe to return True without further checks.
6975 def lines_with_leading_tabs_expanded(s: str) -> List[str]:
6977 Splits string into lines and expands only leading tabs (following the normal
6981 for line in s.splitlines():
6982 # Find the index of the first non-whitespace character after a string of
6983 # whitespace that includes at least one tab
6984 match = re.match(r"\s*\t+\s*(\S)", line)
6986 first_non_whitespace_idx = match.start(1)
6989 line[:first_non_whitespace_idx].expandtabs()
6990 + line[first_non_whitespace_idx:]
6997 def fix_docstring(docstring: str, prefix: str) -> str:
6998 # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
7001 lines = lines_with_leading_tabs_expanded(docstring)
7002 # Determine minimum indentation (first line doesn't count):
7003 indent = sys.maxsize
7004 for line in lines[1:]:
7005 stripped = line.lstrip()
7007 indent = min(indent, len(line) - len(stripped))
7008 # Remove indentation (first line is special):
7009 trimmed = [lines[0].strip()]
7010 if indent < sys.maxsize:
7011 last_line_idx = len(lines) - 2
7012 for i, line in enumerate(lines[1:]):
7013 stripped_line = line[indent:].rstrip()
7014 if stripped_line or i == last_line_idx:
7015 trimmed.append(prefix + stripped_line)
7018 return "\n".join(trimmed)
7021 if __name__ == "__main__":