All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@git.madduck.net.
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
3 from abc import ABC, abstractmethod
4 from collections import defaultdict
5 from concurrent.futures import Executor, ThreadPoolExecutor, ProcessPoolExecutor
6 from contextlib import contextmanager
7 from datetime import datetime
9 from functools import lru_cache, partial, wraps
13 from multiprocessing import Manager, freeze_support
15 from pathlib import Path
45 from mypy_extensions import mypyc_attr
47 from appdirs import user_cache_dir
48 from dataclasses import dataclass, field, replace
53 from typed_ast import ast3, ast27
55 if sys.version_info < (3, 8):
57 "The typed_ast package is not installed.\n"
58 "You can install it with `python3 -m pip install typed-ast`.",
65 from pathspec import PathSpec
68 from blib2to3.pytree import Node, Leaf, type_repr
69 from blib2to3 import pygram, pytree
70 from blib2to3.pgen2 import driver, token
71 from blib2to3.pgen2.grammar import Grammar
72 from blib2to3.pgen2.parse import ParseError
74 from _black_version import version as __version__
76 if sys.version_info < (3, 8):
77 from typing_extensions import Final
79 from typing import Final
82 import colorama # noqa: F401
84 DEFAULT_LINE_LENGTH = 88
85 DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950
86 DEFAULT_INCLUDES = r"\.pyi?$"
87 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
88 STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__"
90 STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters.
104 LN = Union[Leaf, Node]
105 Transformer = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
108 CacheInfo = Tuple[Timestamp, FileSize]
109 Cache = Dict[str, CacheInfo]
110 out = partial(click.secho, bold=True, err=True)
111 err = partial(click.secho, fg="red", err=True)
113 pygram.initialize(CACHE_DIR)
114 syms = pygram.python_symbols
117 class NothingChanged(UserWarning):
118 """Raised when reformatted code is the same as source."""
121 class CannotTransform(Exception):
122 """Base class for errors raised by Transformers."""
125 class CannotSplit(CannotTransform):
126 """A readable split that fits the allotted line length is impossible."""
129 class InvalidInput(ValueError):
130 """Raised when input source code fails all parse attempts."""
133 class BracketMatchError(KeyError):
134 """Raised when an opening bracket is unable to be matched to a closing bracket."""
138 E = TypeVar("E", bound=Exception)
141 class Ok(Generic[T]):
142 def __init__(self, value: T) -> None:
149 class Err(Generic[E]):
150 def __init__(self, e: E) -> None:
157 # The 'Result' return type is used to implement an error-handling model heavily
158 # influenced by that used by the Rust programming language
159 # (see https://doc.rust-lang.org/book/ch09-00-error-handling.html).
160 Result = Union[Ok[T], Err[E]]
161 TResult = Result[T, CannotTransform] # (T)ransform Result
162 TMatchResult = TResult[Index]
165 class WriteBack(Enum):
173 def from_configuration(
174 cls, *, check: bool, diff: bool, color: bool = False
176 if check and not diff:
180 return cls.COLOR_DIFF
182 return cls.DIFF if diff else cls.YES
191 class TargetVersion(Enum):
201 def is_python2(self) -> bool:
202 return self is TargetVersion.PY27
206 # All string literals are unicode
209 NUMERIC_UNDERSCORES = 3
210 TRAILING_COMMA_IN_CALL = 4
211 TRAILING_COMMA_IN_DEF = 5
212 # The following two feature-flags are mutually exclusive, and exactly one should be
213 # set for every version of python.
214 ASYNC_IDENTIFIERS = 6
216 ASSIGNMENT_EXPRESSIONS = 8
217 POS_ONLY_ARGUMENTS = 9
218 RELAXED_DECORATORS = 10
219 FORCE_OPTIONAL_PARENTHESES = 50
222 VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
223 TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS},
224 TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
225 TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
226 TargetVersion.PY35: {
227 Feature.UNICODE_LITERALS,
228 Feature.TRAILING_COMMA_IN_CALL,
229 Feature.ASYNC_IDENTIFIERS,
231 TargetVersion.PY36: {
232 Feature.UNICODE_LITERALS,
234 Feature.NUMERIC_UNDERSCORES,
235 Feature.TRAILING_COMMA_IN_CALL,
236 Feature.TRAILING_COMMA_IN_DEF,
237 Feature.ASYNC_IDENTIFIERS,
239 TargetVersion.PY37: {
240 Feature.UNICODE_LITERALS,
242 Feature.NUMERIC_UNDERSCORES,
243 Feature.TRAILING_COMMA_IN_CALL,
244 Feature.TRAILING_COMMA_IN_DEF,
245 Feature.ASYNC_KEYWORDS,
247 TargetVersion.PY38: {
248 Feature.UNICODE_LITERALS,
250 Feature.NUMERIC_UNDERSCORES,
251 Feature.TRAILING_COMMA_IN_CALL,
252 Feature.TRAILING_COMMA_IN_DEF,
253 Feature.ASYNC_KEYWORDS,
254 Feature.ASSIGNMENT_EXPRESSIONS,
255 Feature.POS_ONLY_ARGUMENTS,
257 TargetVersion.PY39: {
258 Feature.UNICODE_LITERALS,
260 Feature.NUMERIC_UNDERSCORES,
261 Feature.TRAILING_COMMA_IN_CALL,
262 Feature.TRAILING_COMMA_IN_DEF,
263 Feature.ASYNC_KEYWORDS,
264 Feature.ASSIGNMENT_EXPRESSIONS,
265 Feature.RELAXED_DECORATORS,
266 Feature.POS_ONLY_ARGUMENTS,
273 target_versions: Set[TargetVersion] = field(default_factory=set)
274 line_length: int = DEFAULT_LINE_LENGTH
275 string_normalization: bool = True
276 magic_trailing_comma: bool = True
277 experimental_string_processing: bool = False
280 def get_cache_key(self) -> str:
281 if self.target_versions:
282 version_str = ",".join(
284 for version in sorted(self.target_versions, key=lambda v: v.value)
290 str(self.line_length),
291 str(int(self.string_normalization)),
292 str(int(self.is_pyi)),
294 return ".".join(parts)
297 # Legacy name, left for integrations.
301 def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
302 return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
305 def find_pyproject_toml(path_search_start: Iterable[str]) -> Optional[str]:
306 """Find the absolute filepath to a pyproject.toml if it exists"""
307 path_project_root = find_project_root(path_search_start)
308 path_pyproject_toml = path_project_root / "pyproject.toml"
309 if path_pyproject_toml.is_file():
310 return str(path_pyproject_toml)
312 path_user_pyproject_toml = find_user_pyproject_toml()
313 return str(path_user_pyproject_toml) if path_user_pyproject_toml.is_file() else None
316 def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
317 """Parse a pyproject toml file, pulling out relevant parts for Black
319 If parsing fails, will raise a toml.TomlDecodeError
321 pyproject_toml = toml.load(path_config)
322 config = pyproject_toml.get("tool", {}).get("black", {})
323 return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
326 def read_pyproject_toml(
327 ctx: click.Context, param: click.Parameter, value: Optional[str]
329 """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
331 Returns the path to a successfully found and read configuration file, None
335 value = find_pyproject_toml(ctx.params.get("src", ()))
340 config = parse_pyproject_toml(value)
341 except (toml.TomlDecodeError, OSError) as e:
342 raise click.FileError(
343 filename=value, hint=f"Error reading configuration file: {e}"
349 # Sanitize the values to be Click friendly. For more information please see:
350 # https://github.com/psf/black/issues/1458
351 # https://github.com/pallets/click/issues/1567
353 k: str(v) if not isinstance(v, (list, dict)) else v
354 for k, v in config.items()
357 target_version = config.get("target_version")
358 if target_version is not None and not isinstance(target_version, list):
359 raise click.BadOptionUsage(
360 "target-version", "Config key target-version must be a list"
363 default_map: Dict[str, Any] = {}
365 default_map.update(ctx.default_map)
366 default_map.update(config)
368 ctx.default_map = default_map
372 def target_version_option_callback(
373 c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
374 ) -> List[TargetVersion]:
375 """Compute the target versions from a --target-version flag.
377 This is its own function because mypy couldn't infer the type correctly
378 when it was a lambda, causing mypyc trouble.
380 return [TargetVersion[val.upper()] for val in v]
385 param: click.Parameter,
386 value: Optional[str],
387 ) -> Optional[Pattern]:
389 return re_compile_maybe_verbose(value) if value is not None else None
391 raise click.BadParameter("Not a valid regular expression")
394 @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
395 @click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
400 default=DEFAULT_LINE_LENGTH,
401 help="How many characters per line to allow.",
407 type=click.Choice([v.name.lower() for v in TargetVersion]),
408 callback=target_version_option_callback,
411 "Python versions that should be supported by Black's output. [default: per-file"
419 "Format all input files like typing stubs regardless of file extension (useful"
420 " when piping source on standard input)."
425 "--skip-string-normalization",
427 help="Don't normalize string quotes or prefixes.",
431 "--skip-magic-trailing-comma",
433 help="Don't use trailing commas as a reason to split lines.",
436 "--experimental-string-processing",
440 "Experimental option that performs more normalization on string literals."
441 " Currently disabled because it leads to some crashes."
448 "Don't write the files back, just return the status. Return code 0 means"
449 " nothing would change. Return code 1 means some files would be reformatted."
450 " Return code 123 means there was an internal error."
456 help="Don't write the files back, just output a diff for each file on stdout.",
459 "--color/--no-color",
461 help="Show colored diff. Only applies when `--diff` is given.",
466 help="If --fast given, skip temporary sanity checks. [default: --safe]",
471 default=DEFAULT_INCLUDES,
472 callback=validate_regex,
474 "A regular expression that matches files and directories that should be"
475 " included on recursive searches. An empty value means all files are included"
476 " regardless of the name. Use forward slashes for directories on all platforms"
477 " (Windows, too). Exclusions are calculated first, inclusions later."
484 default=DEFAULT_EXCLUDES,
485 callback=validate_regex,
487 "A regular expression that matches files and directories that should be"
488 " excluded on recursive searches. An empty value means no paths are excluded."
489 " Use forward slashes for directories on all platforms (Windows, too). "
490 " Exclusions are calculated first, inclusions later."
497 callback=validate_regex,
499 "Like --exclude, but adds additional files and directories on top of the"
500 " excluded ones. (Useful if you simply want to add to the default)"
506 callback=validate_regex,
508 "Like --exclude, but files and directories matching this regex will be "
509 "excluded even when they are passed explicitly as arguments."
516 "The name of the file when passing it through stdin. Useful to make "
517 "sure Black will respect --force-exclude option on some "
518 "editors that rely on using stdin."
526 "Don't emit non-error messages to stderr. Errors are still emitted; silence"
527 " those with 2>/dev/null."
535 "Also emit messages to stderr about files that were not changed or were ignored"
536 " due to exclusion patterns."
539 @click.version_option(version=__version__)
544 exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
559 callback=read_pyproject_toml,
560 help="Read configuration from FILE path.",
567 target_version: List[TargetVersion],
573 skip_string_normalization: bool,
574 skip_magic_trailing_comma: bool,
575 experimental_string_processing: bool,
580 extend_exclude: Optional[Pattern],
581 force_exclude: Optional[Pattern],
582 stdin_filename: Optional[str],
583 src: Tuple[str, ...],
584 config: Optional[str],
586 """The uncompromising code formatter."""
587 write_back = WriteBack.from_configuration(check=check, diff=diff, color=color)
589 versions = set(target_version)
591 # We'll autodetect later.
594 target_versions=versions,
595 line_length=line_length,
597 string_normalization=not skip_string_normalization,
598 magic_trailing_comma=not skip_magic_trailing_comma,
599 experimental_string_processing=experimental_string_processing,
601 if config and verbose:
602 out(f"Using configuration from {config}.", bold=False, fg="blue")
604 print(format_str(code, mode=mode))
606 report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
607 sources = get_sources(
614 extend_exclude=extend_exclude,
615 force_exclude=force_exclude,
617 stdin_filename=stdin_filename,
622 "No Python files are present to be formatted. Nothing to do 😴",
628 if len(sources) == 1:
632 write_back=write_back,
638 sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
641 if verbose or not quiet:
642 out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨")
643 click.secho(str(report), err=True)
644 ctx.exit(report.return_code)
650 src: Tuple[str, ...],
653 include: Pattern[str],
654 exclude: Pattern[str],
655 extend_exclude: Optional[Pattern[str]],
656 force_exclude: Optional[Pattern[str]],
658 stdin_filename: Optional[str],
660 """Compute the set of files to be formatted."""
662 root = find_project_root(src)
663 sources: Set[Path] = set()
664 path_empty(src, "No Path provided. Nothing to do 😴", quiet, verbose, ctx)
665 gitignore = get_gitignore(root)
668 if s == "-" and stdin_filename:
669 p = Path(stdin_filename)
675 if is_stdin or p.is_file():
676 normalized_path = normalize_path_maybe_ignore(p, root, report)
677 if normalized_path is None:
680 normalized_path = "/" + normalized_path
681 # Hard-exclude any files that matches the `--force-exclude` regex.
683 force_exclude_match = force_exclude.search(normalized_path)
685 force_exclude_match = None
686 if force_exclude_match and force_exclude_match.group(0):
687 report.path_ignored(p, "matches the --force-exclude regular expression")
691 p = Path(f"{STDIN_PLACEHOLDER}{str(p)}")
710 err(f"invalid path: {s}")
715 src: Sized, msg: str, quiet: bool, verbose: bool, ctx: click.Context
718 Exit if there is no `src` provided for formatting
720 if not src and (verbose or not quiet):
726 src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
728 """Reformat a single file under `src` without spawning child processes.
730 `fast`, `write_back`, and `mode` options are passed to
731 :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
738 elif str(src).startswith(STDIN_PLACEHOLDER):
740 # Use the original name again in case we want to print something
742 src = Path(str(src)[len(STDIN_PLACEHOLDER) :])
747 if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
748 changed = Changed.YES
751 if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
752 cache = read_cache(mode)
753 res_src = src.resolve()
754 res_src_s = str(res_src)
755 if res_src_s in cache and cache[res_src_s] == get_cache_info(res_src):
756 changed = Changed.CACHED
757 if changed is not Changed.CACHED and format_file_in_place(
758 src, fast=fast, write_back=write_back, mode=mode
760 changed = Changed.YES
761 if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
762 write_back is WriteBack.CHECK and changed is Changed.NO
764 write_cache(cache, [src], mode)
765 report.done(src, changed)
766 except Exception as exc:
768 traceback.print_exc()
769 report.failed(src, str(exc))
773 sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
775 """Reformat multiple files using a ProcessPoolExecutor."""
777 loop = asyncio.get_event_loop()
778 worker_count = os.cpu_count()
779 if sys.platform == "win32":
780 # Work around https://bugs.python.org/issue26903
781 worker_count = min(worker_count, 60)
783 executor = ProcessPoolExecutor(max_workers=worker_count)
784 except (ImportError, OSError):
785 # we arrive here if the underlying system does not support multi-processing
786 # like in AWS Lambda or Termux, in which case we gracefully fallback to
787 # a ThreadPollExecutor with just a single worker (more workers would not do us
788 # any good due to the Global Interpreter Lock)
789 executor = ThreadPoolExecutor(max_workers=1)
792 loop.run_until_complete(
796 write_back=write_back,
805 if executor is not None:
809 async def schedule_formatting(
812 write_back: WriteBack,
815 loop: asyncio.AbstractEventLoop,
818 """Run formatting of `sources` in parallel using the provided `executor`.
820 (Use ProcessPoolExecutors for actual parallelism.)
822 `write_back`, `fast`, and `mode` options are passed to
823 :func:`format_file_in_place`.
826 if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
827 cache = read_cache(mode)
828 sources, cached = filter_cached(cache, sources)
829 for src in sorted(cached):
830 report.done(src, Changed.CACHED)
835 sources_to_cache = []
837 if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
838 # For diff output, we need locks to ensure we don't interleave output
839 # from different processes.
841 lock = manager.Lock()
843 asyncio.ensure_future(
844 loop.run_in_executor(
845 executor, format_file_in_place, src, fast, mode, write_back, lock
848 for src in sorted(sources)
850 pending: Iterable["asyncio.Future[bool]"] = tasks.keys()
852 loop.add_signal_handler(signal.SIGINT, cancel, pending)
853 loop.add_signal_handler(signal.SIGTERM, cancel, pending)
854 except NotImplementedError:
855 # There are no good alternatives for these on Windows.
858 done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
860 src = tasks.pop(task)
862 cancelled.append(task)
863 elif task.exception():
864 report.failed(src, str(task.exception()))
866 changed = Changed.YES if task.result() else Changed.NO
867 # If the file was written back or was successfully checked as
868 # well-formatted, store this information in the cache.
869 if write_back is WriteBack.YES or (
870 write_back is WriteBack.CHECK and changed is Changed.NO
872 sources_to_cache.append(src)
873 report.done(src, changed)
875 await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
877 write_cache(cache, sources_to_cache, mode)
880 def format_file_in_place(
884 write_back: WriteBack = WriteBack.NO,
885 lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
887 """Format file under `src` path. Return True if changed.
889 If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
891 `mode` and `fast` options are passed to :func:`format_file_contents`.
893 if src.suffix == ".pyi":
894 mode = replace(mode, is_pyi=True)
896 then = datetime.utcfromtimestamp(src.stat().st_mtime)
897 with open(src, "rb") as buf:
898 src_contents, encoding, newline = decode_bytes(buf.read())
900 dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
901 except NothingChanged:
904 if write_back == WriteBack.YES:
905 with open(src, "w", encoding=encoding, newline=newline) as f:
906 f.write(dst_contents)
907 elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
908 now = datetime.utcnow()
909 src_name = f"{src}\t{then} +0000"
910 dst_name = f"{src}\t{now} +0000"
911 diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
913 if write_back == WriteBack.COLOR_DIFF:
914 diff_contents = color_diff(diff_contents)
916 with lock or nullcontext():
917 f = io.TextIOWrapper(
923 f = wrap_stream_for_windows(f)
924 f.write(diff_contents)
930 def color_diff(contents: str) -> str:
931 """Inject the ANSI color codes to the diff."""
932 lines = contents.split("\n")
933 for i, line in enumerate(lines):
934 if line.startswith("+++") or line.startswith("---"):
935 line = "\033[1;37m" + line + "\033[0m" # bold white, reset
936 elif line.startswith("@@"):
937 line = "\033[36m" + line + "\033[0m" # cyan, reset
938 elif line.startswith("+"):
939 line = "\033[32m" + line + "\033[0m" # green, reset
940 elif line.startswith("-"):
941 line = "\033[31m" + line + "\033[0m" # red, reset
943 return "\n".join(lines)
946 def wrap_stream_for_windows(
948 ) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
950 Wrap stream with colorama's wrap_stream so colors are shown on Windows.
952 If `colorama` is unavailable, the original stream is returned unmodified.
953 Otherwise, the `wrap_stream()` function determines whether the stream needs
954 to be wrapped for a Windows environment and will accordingly either return
955 an `AnsiToWin32` wrapper or the original stream.
958 from colorama.initialise import wrap_stream
962 # Set `strip=False` to avoid needing to modify test_express_diff_with_color.
963 return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
966 def format_stdin_to_stdout(
967 fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: Mode
969 """Format file on stdin. Return True if changed.
971 If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
972 write a diff to stdout. The `mode` argument is passed to
973 :func:`format_file_contents`.
975 then = datetime.utcnow()
976 src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
979 dst = format_file_contents(src, fast=fast, mode=mode)
982 except NothingChanged:
986 f = io.TextIOWrapper(
987 sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
989 if write_back == WriteBack.YES:
991 elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
992 now = datetime.utcnow()
993 src_name = f"STDIN\t{then} +0000"
994 dst_name = f"STDOUT\t{now} +0000"
995 d = diff(src, dst, src_name, dst_name)
996 if write_back == WriteBack.COLOR_DIFF:
998 f = wrap_stream_for_windows(f)
1003 def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
1004 """Reformat contents of a file and return new contents.
1006 If `fast` is False, additionally confirm that the reformatted code is
1007 valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
1008 `mode` is passed to :func:`format_str`.
1010 if not src_contents.strip():
1011 raise NothingChanged
1013 dst_contents = format_str(src_contents, mode=mode)
1014 if src_contents == dst_contents:
1015 raise NothingChanged
1018 assert_equivalent(src_contents, dst_contents)
1019 assert_stable(src_contents, dst_contents, mode=mode)
1023 def format_str(src_contents: str, *, mode: Mode) -> FileContent:
1024 """Reformat a string and return new contents.
1026 `mode` determines formatting options, such as how many characters per line are
1030 >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode()))
1031 def f(arg: str = "") -> None:
1034 A more complex example:
1037 ... black.format_str(
1038 ... "def f(arg:str='')->None: hey",
1039 ... mode=black.Mode(
1040 ... target_versions={black.TargetVersion.PY36},
1042 ... string_normalization=False,
1053 src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
1055 future_imports = get_future_imports(src_node)
1056 if mode.target_versions:
1057 versions = mode.target_versions
1059 versions = detect_target_versions(src_node)
1060 normalize_fmt_off(src_node)
1061 lines = LineGenerator(
1063 remove_u_prefix="unicode_literals" in future_imports
1064 or supports_feature(versions, Feature.UNICODE_LITERALS),
1066 elt = EmptyLineTracker(is_pyi=mode.is_pyi)
1067 empty_line = Line(mode=mode)
1069 split_line_features = {
1071 for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
1072 if supports_feature(versions, feature)
1074 for current_line in lines.visit(src_node):
1075 dst_contents.append(str(empty_line) * after)
1076 before, after = elt.maybe_empty_lines(current_line)
1077 dst_contents.append(str(empty_line) * before)
1078 for line in transform_line(
1079 current_line, mode=mode, features=split_line_features
1081 dst_contents.append(str(line))
1082 return "".join(dst_contents)
1085 def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
1086 """Return a tuple of (decoded_contents, encoding, newline).
1088 `newline` is either CRLF or LF but `decoded_contents` is decoded with
1089 universal newlines (i.e. only contains LF).
1091 srcbuf = io.BytesIO(src)
1092 encoding, lines = tokenize.detect_encoding(srcbuf.readline)
1094 return "", encoding, "\n"
1096 newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
1098 with io.TextIOWrapper(srcbuf, encoding) as tiow:
1099 return tiow.read(), encoding, newline
1102 def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
1103 if not target_versions:
1104 # No target_version specified, so try all grammars.
1107 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
1109 pygram.python_grammar_no_print_statement_no_exec_statement,
1110 # Python 2.7 with future print_function import
1111 pygram.python_grammar_no_print_statement,
1113 pygram.python_grammar,
1116 if all(version.is_python2() for version in target_versions):
1117 # Python 2-only code, so try Python 2 grammars.
1119 # Python 2.7 with future print_function import
1120 pygram.python_grammar_no_print_statement,
1122 pygram.python_grammar,
1125 # Python 3-compatible code, so only try Python 3 grammar.
1127 # If we have to parse both, try to parse async as a keyword first
1128 if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS):
1131 pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords
1133 if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
1135 grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
1136 # At least one of the above branches must have been taken, because every Python
1137 # version has exactly one of the two 'ASYNC_*' flags
1141 def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
1142 """Given a string with source, return the lib2to3 Node."""
1143 if not src_txt.endswith("\n"):
1146 for grammar in get_grammars(set(target_versions)):
1147 drv = driver.Driver(grammar, pytree.convert)
1149 result = drv.parse_string(src_txt, True)
1152 except ParseError as pe:
1153 lineno, column = pe.context[1]
1154 lines = src_txt.splitlines()
1156 faulty_line = lines[lineno - 1]
1158 faulty_line = "<line number missing in source>"
1159 exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
1163 if isinstance(result, Leaf):
1164 result = Node(syms.file_input, [result])
1168 def lib2to3_unparse(node: Node) -> str:
1169 """Given a lib2to3 node, return its string representation."""
1174 class Visitor(Generic[T]):
1175 """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
1177 def visit(self, node: LN) -> Iterator[T]:
1178 """Main method to visit `node` and its children.
1180 It tries to find a `visit_*()` method for the given `node.type`, like
1181 `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
1182 If no dedicated `visit_*()` method is found, chooses `visit_default()`
1185 Then yields objects of type `T` from the selected visitor.
1188 name = token.tok_name[node.type]
1190 name = str(type_repr(node.type))
1191 # We explicitly branch on whether a visitor exists (instead of
1192 # using self.visit_default as the default arg to getattr) in order
1193 # to save needing to create a bound method object and so mypyc can
1194 # generate a native call to visit_default.
1195 visitf = getattr(self, f"visit_{name}", None)
1197 yield from visitf(node)
1199 yield from self.visit_default(node)
1201 def visit_default(self, node: LN) -> Iterator[T]:
1202 """Default `visit_*()` implementation. Recurses to children of `node`."""
1203 if isinstance(node, Node):
1204 for child in node.children:
1205 yield from self.visit(child)
1209 class DebugVisitor(Visitor[T]):
1212 def visit_default(self, node: LN) -> Iterator[T]:
1213 indent = " " * (2 * self.tree_depth)
1214 if isinstance(node, Node):
1215 _type = type_repr(node.type)
1216 out(f"{indent}{_type}", fg="yellow")
1217 self.tree_depth += 1
1218 for child in node.children:
1219 yield from self.visit(child)
1221 self.tree_depth -= 1
1222 out(f"{indent}/{_type}", fg="yellow", bold=False)
1224 _type = token.tok_name.get(node.type, str(node.type))
1225 out(f"{indent}{_type}", fg="blue", nl=False)
1227 # We don't have to handle prefixes for `Node` objects since
1228 # that delegates to the first child anyway.
1229 out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
1230 out(f" {node.value!r}", fg="blue", bold=False)
1233 def show(cls, code: Union[str, Leaf, Node]) -> None:
1234 """Pretty-print the lib2to3 AST of a given string of `code`.
1236 Convenience method for debugging.
1238 v: DebugVisitor[None] = DebugVisitor()
1239 if isinstance(code, str):
1240 code = lib2to3_parse(code)
1244 WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE}
1245 STATEMENT: Final = {
1255 STANDALONE_COMMENT: Final = 153
1256 token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
1257 LOGIC_OPERATORS: Final = {"and", "or"}
1258 COMPARATORS: Final = {
1266 MATH_OPERATORS: Final = {
1282 STARS: Final = {token.STAR, token.DOUBLESTAR}
1283 VARARGS_SPECIALS: Final = STARS | {token.SLASH}
1284 VARARGS_PARENTS: Final = {
1286 syms.argument, # double star in arglist
1287 syms.trailer, # single argument to call
1289 syms.varargslist, # lambdas
1291 UNPACKING_PARENTS: Final = {
1292 syms.atom, # single element of a list or set literal
1296 syms.testlist_star_expr,
1298 TEST_DESCENDANTS: Final = {
1315 ASSIGNMENTS: Final = {
1331 COMPREHENSION_PRIORITY: Final = 20
1332 COMMA_PRIORITY: Final = 18
1333 TERNARY_PRIORITY: Final = 16
1334 LOGIC_PRIORITY: Final = 14
1335 STRING_PRIORITY: Final = 12
1336 COMPARATOR_PRIORITY: Final = 10
1337 MATH_PRIORITIES: Final = {
1339 token.CIRCUMFLEX: 8,
1342 token.RIGHTSHIFT: 6,
1347 token.DOUBLESLASH: 4,
1351 token.DOUBLESTAR: 2,
1353 DOT_PRIORITY: Final = 1
1357 class BracketTracker:
1358 """Keeps track of brackets on a line."""
1361 bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
1362 delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
1363 previous: Optional[Leaf] = None
1364 _for_loop_depths: List[int] = field(default_factory=list)
1365 _lambda_argument_depths: List[int] = field(default_factory=list)
1366 invisible: List[Leaf] = field(default_factory=list)
1368 def mark(self, leaf: Leaf) -> None:
1369 """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
1371 All leaves receive an int `bracket_depth` field that stores how deep
1372 within brackets a given leaf is. 0 means there are no enclosing brackets
1373 that started on this line.
1375 If a leaf is itself a closing bracket, it receives an `opening_bracket`
1376 field that it forms a pair with. This is a one-directional link to
1377 avoid reference cycles.
1379 If a leaf is a delimiter (a token on which Black can split the line if
1380 needed) and it's on depth 0, its `id()` is stored in the tracker's
1383 if leaf.type == token.COMMENT:
1386 self.maybe_decrement_after_for_loop_variable(leaf)
1387 self.maybe_decrement_after_lambda_arguments(leaf)
1388 if leaf.type in CLOSING_BRACKETS:
1391 opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
1392 except KeyError as e:
1393 raise BracketMatchError(
1394 "Unable to match a closing bracket to the following opening"
1397 leaf.opening_bracket = opening_bracket
1399 self.invisible.append(leaf)
1400 leaf.bracket_depth = self.depth
1402 delim = is_split_before_delimiter(leaf, self.previous)
1403 if delim and self.previous is not None:
1404 self.delimiters[id(self.previous)] = delim
1406 delim = is_split_after_delimiter(leaf, self.previous)
1408 self.delimiters[id(leaf)] = delim
1409 if leaf.type in OPENING_BRACKETS:
1410 self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
1413 self.invisible.append(leaf)
1414 self.previous = leaf
1415 self.maybe_increment_lambda_arguments(leaf)
1416 self.maybe_increment_for_loop_variable(leaf)
1418 def any_open_brackets(self) -> bool:
1419 """Return True if there is an yet unmatched open bracket on the line."""
1420 return bool(self.bracket_match)
1422 def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
1423 """Return the highest priority of a delimiter found on the line.
1425 Values are consistent with what `is_split_*_delimiter()` return.
1426 Raises ValueError on no delimiters.
1428 return max(v for k, v in self.delimiters.items() if k not in exclude)
1430 def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
1431 """Return the number of delimiters with the given `priority`.
1433 If no `priority` is passed, defaults to max priority on the line.
1435 if not self.delimiters:
1438 priority = priority or self.max_delimiter_priority()
1439 return sum(1 for p in self.delimiters.values() if p == priority)
1441 def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
1442 """In a for loop, or comprehension, the variables are often unpacks.
1444 To avoid splitting on the comma in this situation, increase the depth of
1445 tokens between `for` and `in`.
1447 if leaf.type == token.NAME and leaf.value == "for":
1449 self._for_loop_depths.append(self.depth)
1454 def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
1455 """See `maybe_increment_for_loop_variable` above for explanation."""
1457 self._for_loop_depths
1458 and self._for_loop_depths[-1] == self.depth
1459 and leaf.type == token.NAME
1460 and leaf.value == "in"
1463 self._for_loop_depths.pop()
1468 def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
1469 """In a lambda expression, there might be more than one argument.
1471 To avoid splitting on the comma in this situation, increase the depth of
1472 tokens between `lambda` and `:`.
1474 if leaf.type == token.NAME and leaf.value == "lambda":
1476 self._lambda_argument_depths.append(self.depth)
1481 def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
1482 """See `maybe_increment_lambda_arguments` above for explanation."""
1484 self._lambda_argument_depths
1485 and self._lambda_argument_depths[-1] == self.depth
1486 and leaf.type == token.COLON
1489 self._lambda_argument_depths.pop()
1494 def get_open_lsqb(self) -> Optional[Leaf]:
1495 """Return the most recent opening square bracket (if any)."""
1496 return self.bracket_match.get((self.depth - 1, token.RSQB))
1501 """Holds leaves and comments. Can be printed with `str(line)`."""
1505 leaves: List[Leaf] = field(default_factory=list)
1506 # keys ordered like `leaves`
1507 comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
1508 bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
1509 inside_brackets: bool = False
1510 should_split_rhs: bool = False
1511 magic_trailing_comma: Optional[Leaf] = None
1513 def append(self, leaf: Leaf, preformatted: bool = False) -> None:
1514 """Add a new `leaf` to the end of the line.
1516 Unless `preformatted` is True, the `leaf` will receive a new consistent
1517 whitespace prefix and metadata applied by :class:`BracketTracker`.
1518 Trailing commas are maybe removed, unpacked for loop variables are
1519 demoted from being delimiters.
1521 Inline comments are put aside.
1523 has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
1527 if token.COLON == leaf.type and self.is_class_paren_empty:
1528 del self.leaves[-2:]
1529 if self.leaves and not preformatted:
1530 # Note: at this point leaf.prefix should be empty except for
1531 # imports, for which we only preserve newlines.
1532 leaf.prefix += whitespace(
1533 leaf, complex_subscript=self.is_complex_subscript(leaf)
1535 if self.inside_brackets or not preformatted:
1536 self.bracket_tracker.mark(leaf)
1537 if self.mode.magic_trailing_comma:
1538 if self.has_magic_trailing_comma(leaf):
1539 self.magic_trailing_comma = leaf
1540 elif self.has_magic_trailing_comma(leaf, ensure_removable=True):
1541 self.remove_trailing_comma()
1542 if not self.append_comment(leaf):
1543 self.leaves.append(leaf)
1545 def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
1546 """Like :func:`append()` but disallow invalid standalone comment structure.
1548 Raises ValueError when any `leaf` is appended after a standalone comment
1549 or when a standalone comment is not the first leaf on the line.
1551 if self.bracket_tracker.depth == 0:
1553 raise ValueError("cannot append to standalone comments")
1555 if self.leaves and leaf.type == STANDALONE_COMMENT:
1557 "cannot append standalone comments to a populated line"
1560 self.append(leaf, preformatted=preformatted)
1563 def is_comment(self) -> bool:
1564 """Is this line a standalone comment?"""
1565 return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
1568 def is_decorator(self) -> bool:
1569 """Is this line a decorator?"""
1570 return bool(self) and self.leaves[0].type == token.AT
1573 def is_import(self) -> bool:
1574 """Is this an import line?"""
1575 return bool(self) and is_import(self.leaves[0])
1578 def is_class(self) -> bool:
1579 """Is this line a class definition?"""
1582 and self.leaves[0].type == token.NAME
1583 and self.leaves[0].value == "class"
1587 def is_stub_class(self) -> bool:
1588 """Is this line a class definition with a body consisting only of "..."?"""
1589 return self.is_class and self.leaves[-3:] == [
1590 Leaf(token.DOT, ".") for _ in range(3)
1594 def is_def(self) -> bool:
1595 """Is this a function definition? (Also returns True for async defs.)"""
1597 first_leaf = self.leaves[0]
1602 second_leaf: Optional[Leaf] = self.leaves[1]
1605 return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
1606 first_leaf.type == token.ASYNC
1607 and second_leaf is not None
1608 and second_leaf.type == token.NAME
1609 and second_leaf.value == "def"
1613 def is_class_paren_empty(self) -> bool:
1614 """Is this a class with no base classes but using parentheses?
1616 Those are unnecessary and should be removed.
1620 and len(self.leaves) == 4
1622 and self.leaves[2].type == token.LPAR
1623 and self.leaves[2].value == "("
1624 and self.leaves[3].type == token.RPAR
1625 and self.leaves[3].value == ")"
1629 def is_triple_quoted_string(self) -> bool:
1630 """Is the line a triple quoted string?"""
1633 and self.leaves[0].type == token.STRING
1634 and self.leaves[0].value.startswith(('"""', "'''"))
1637 def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
1638 """If so, needs to be split before emitting."""
1639 for leaf in self.leaves:
1640 if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:
1645 def contains_uncollapsable_type_comments(self) -> bool:
1648 last_leaf = self.leaves[-1]
1649 ignored_ids.add(id(last_leaf))
1650 if last_leaf.type == token.COMMA or (
1651 last_leaf.type == token.RPAR and not last_leaf.value
1653 # When trailing commas or optional parens are inserted by Black for
1654 # consistency, comments after the previous last element are not moved
1655 # (they don't have to, rendering will still be correct). So we ignore
1656 # trailing commas and invisible.
1657 last_leaf = self.leaves[-2]
1658 ignored_ids.add(id(last_leaf))
1662 # A type comment is uncollapsable if it is attached to a leaf
1663 # that isn't at the end of the line (since that could cause it
1664 # to get associated to a different argument) or if there are
1665 # comments before it (since that could cause it to get hidden
1667 comment_seen = False
1668 for leaf_id, comments in self.comments.items():
1669 for comment in comments:
1670 if is_type_comment(comment):
1671 if comment_seen or (
1672 not is_type_comment(comment, " ignore")
1673 and leaf_id not in ignored_ids
1681 def contains_unsplittable_type_ignore(self) -> bool:
1685 # If a 'type: ignore' is attached to the end of a line, we
1686 # can't split the line, because we can't know which of the
1687 # subexpressions the ignore was meant to apply to.
1689 # We only want this to apply to actual physical lines from the
1690 # original source, though: we don't want the presence of a
1691 # 'type: ignore' at the end of a multiline expression to
1692 # justify pushing it all onto one line. Thus we
1693 # (unfortunately) need to check the actual source lines and
1694 # only report an unsplittable 'type: ignore' if this line was
1695 # one line in the original code.
1697 # Grab the first and last line numbers, skipping generated leaves
1698 first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)
1700 (leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0
1703 if first_line == last_line:
1704 # We look at the last two leaves since a comma or an
1705 # invisible paren could have been added at the end of the
1707 for node in self.leaves[-2:]:
1708 for comment in self.comments.get(id(node), []):
1709 if is_type_comment(comment, " ignore"):
1714 def contains_multiline_strings(self) -> bool:
1715 return any(is_multiline_string(leaf) for leaf in self.leaves)
1717 def has_magic_trailing_comma(
1718 self, closing: Leaf, ensure_removable: bool = False
1720 """Return True if we have a magic trailing comma, that is when:
1721 - there's a trailing comma here
1722 - it's not a one-tuple
1723 Additionally, if ensure_removable:
1724 - it's not from square bracket indexing
1727 closing.type in CLOSING_BRACKETS
1729 and self.leaves[-1].type == token.COMMA
1733 if closing.type == token.RBRACE:
1736 if closing.type == token.RSQB:
1737 if not ensure_removable:
1739 comma = self.leaves[-1]
1740 return bool(comma.parent and comma.parent.type == syms.listmaker)
1745 if not is_one_tuple_between(closing.opening_bracket, closing, self.leaves):
1750 def append_comment(self, comment: Leaf) -> bool:
1751 """Add an inline or standalone comment to the line."""
1753 comment.type == STANDALONE_COMMENT
1754 and self.bracket_tracker.any_open_brackets()
1759 if comment.type != token.COMMENT:
1763 comment.type = STANDALONE_COMMENT
1767 last_leaf = self.leaves[-1]
1769 last_leaf.type == token.RPAR
1770 and not last_leaf.value
1771 and last_leaf.parent
1772 and len(list(last_leaf.parent.leaves())) <= 3
1773 and not is_type_comment(comment)
1775 # Comments on an optional parens wrapping a single leaf should belong to
1776 # the wrapped node except if it's a type comment. Pinning the comment like
1777 # this avoids unstable formatting caused by comment migration.
1778 if len(self.leaves) < 2:
1779 comment.type = STANDALONE_COMMENT
1783 last_leaf = self.leaves[-2]
1784 self.comments.setdefault(id(last_leaf), []).append(comment)
1787 def comments_after(self, leaf: Leaf) -> List[Leaf]:
1788 """Generate comments that should appear directly after `leaf`."""
1789 return self.comments.get(id(leaf), [])
1791 def remove_trailing_comma(self) -> None:
1792 """Remove the trailing comma and moves the comments attached to it."""
1793 trailing_comma = self.leaves.pop()
1794 trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
1795 self.comments.setdefault(id(self.leaves[-1]), []).extend(
1796 trailing_comma_comments
1799 def is_complex_subscript(self, leaf: Leaf) -> bool:
1800 """Return True iff `leaf` is part of a slice with non-trivial exprs."""
1801 open_lsqb = self.bracket_tracker.get_open_lsqb()
1802 if open_lsqb is None:
1805 subscript_start = open_lsqb.next_sibling
1807 if isinstance(subscript_start, Node):
1808 if subscript_start.type == syms.listmaker:
1811 if subscript_start.type == syms.subscriptlist:
1812 subscript_start = child_towards(subscript_start, leaf)
1813 return subscript_start is not None and any(
1814 n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
1817 def clone(self) -> "Line":
1821 inside_brackets=self.inside_brackets,
1822 should_split_rhs=self.should_split_rhs,
1823 magic_trailing_comma=self.magic_trailing_comma,
1826 def __str__(self) -> str:
1827 """Render the line."""
1831 indent = " " * self.depth
1832 leaves = iter(self.leaves)
1833 first = next(leaves)
1834 res = f"{first.prefix}{indent}{first.value}"
1837 for comment in itertools.chain.from_iterable(self.comments.values()):
1842 def __bool__(self) -> bool:
1843 """Return True if the line has leaves or comments."""
1844 return bool(self.leaves or self.comments)
1848 class EmptyLineTracker:
1849 """Provides a stateful method that returns the number of potential extra
1850 empty lines needed before and after the currently processed line.
1852 Note: this tracker works on lines that haven't been split yet. It assumes
1853 the prefix of the first leaf consists of optional newlines. Those newlines
1854 are consumed by `maybe_empty_lines()` and included in the computation.
1857 is_pyi: bool = False
1858 previous_line: Optional[Line] = None
1859 previous_after: int = 0
1860 previous_defs: List[int] = field(default_factory=list)
1862 def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1863 """Return the number of extra empty lines before and after the `current_line`.
1865 This is for separating `def`, `async def` and `class` with extra empty
1866 lines (two on module-level).
1868 before, after = self._maybe_empty_lines(current_line)
1870 # Black should not insert empty lines at the beginning
1873 if self.previous_line is None
1874 else before - self.previous_after
1876 self.previous_after = after
1877 self.previous_line = current_line
1878 return before, after
1880 def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
1882 if current_line.depth == 0:
1883 max_allowed = 1 if self.is_pyi else 2
1884 if current_line.leaves:
1885 # Consume the first leaf's extra newlines.
1886 first_leaf = current_line.leaves[0]
1887 before = first_leaf.prefix.count("\n")
1888 before = min(before, max_allowed)
1889 first_leaf.prefix = ""
1892 depth = current_line.depth
1893 while self.previous_defs and self.previous_defs[-1] >= depth:
1894 self.previous_defs.pop()
1896 before = 0 if depth else 1
1898 before = 1 if depth else 2
1899 if current_line.is_decorator or current_line.is_def or current_line.is_class:
1900 return self._maybe_empty_lines_for_class_or_def(current_line, before)
1904 and self.previous_line.is_import
1905 and not current_line.is_import
1906 and depth == self.previous_line.depth
1908 return (before or 1), 0
1912 and self.previous_line.is_class
1913 and current_line.is_triple_quoted_string
1919 def _maybe_empty_lines_for_class_or_def(
1920 self, current_line: Line, before: int
1921 ) -> Tuple[int, int]:
1922 if not current_line.is_decorator:
1923 self.previous_defs.append(current_line.depth)
1924 if self.previous_line is None:
1925 # Don't insert empty lines before the first line in the file.
1928 if self.previous_line.is_decorator:
1929 if self.is_pyi and current_line.is_stub_class:
1930 # Insert an empty line after a decorated stub class
1935 if self.previous_line.depth < current_line.depth and (
1936 self.previous_line.is_class or self.previous_line.is_def
1941 self.previous_line.is_comment
1942 and self.previous_line.depth == current_line.depth
1948 if self.previous_line.depth > current_line.depth:
1950 elif current_line.is_class or self.previous_line.is_class:
1951 if current_line.is_stub_class and self.previous_line.is_stub_class:
1952 # No blank line between classes with an empty body
1957 current_line.is_def or current_line.is_decorator
1958 ) and not self.previous_line.is_def:
1959 # Blank line between a block of functions (maybe with preceding
1960 # decorators) and a block of non-functions
1966 if current_line.depth and newlines:
1972 class LineGenerator(Visitor[Line]):
1973 """Generates reformatted Line objects. Empty lines are not emitted.
1975 Note: destroys the tree it's visiting by mutating prefixes of its leaves
1976 in ways that will no longer stringify to valid Python code on the tree.
1980 remove_u_prefix: bool = False
1981 current_line: Line = field(init=False)
1983 def line(self, indent: int = 0) -> Iterator[Line]:
1986 If the line is empty, only emit if it makes sense.
1987 If the line is too long, split it first and then generate.
1989 If any lines were generated, set up a new current_line.
1991 if not self.current_line:
1992 self.current_line.depth += indent
1993 return # Line is empty, don't emit. Creating a new one unnecessary.
1995 complete_line = self.current_line
1996 self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
1999 def visit_default(self, node: LN) -> Iterator[Line]:
2000 """Default `visit_*()` implementation. Recurses to children of `node`."""
2001 if isinstance(node, Leaf):
2002 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
2003 for comment in generate_comments(node):
2004 if any_open_brackets:
2005 # any comment within brackets is subject to splitting
2006 self.current_line.append(comment)
2007 elif comment.type == token.COMMENT:
2008 # regular trailing comment
2009 self.current_line.append(comment)
2010 yield from self.line()
2013 # regular standalone comment
2014 yield from self.line()
2016 self.current_line.append(comment)
2017 yield from self.line()
2019 normalize_prefix(node, inside_brackets=any_open_brackets)
2020 if self.mode.string_normalization and node.type == token.STRING:
2021 normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
2022 normalize_string_quotes(node)
2023 if node.type == token.NUMBER:
2024 normalize_numeric_literal(node)
2025 if node.type not in WHITESPACE:
2026 self.current_line.append(node)
2027 yield from super().visit_default(node)
2029 def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
2030 """Increase indentation level, maybe yield a line."""
2031 # In blib2to3 INDENT never holds comments.
2032 yield from self.line(+1)
2033 yield from self.visit_default(node)
2035 def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
2036 """Decrease indentation level, maybe yield a line."""
2037 # The current line might still wait for trailing comments. At DEDENT time
2038 # there won't be any (they would be prefixes on the preceding NEWLINE).
2039 # Emit the line then.
2040 yield from self.line()
2042 # While DEDENT has no value, its prefix may contain standalone comments
2043 # that belong to the current indentation level. Get 'em.
2044 yield from self.visit_default(node)
2046 # Finally, emit the dedent.
2047 yield from self.line(-1)
2050 self, node: Node, keywords: Set[str], parens: Set[str]
2051 ) -> Iterator[Line]:
2052 """Visit a statement.
2054 This implementation is shared for `if`, `while`, `for`, `try`, `except`,
2055 `def`, `with`, `class`, `assert` and assignments.
2057 The relevant Python language `keywords` for a given statement will be
2058 NAME leaves within it. This methods puts those on a separate line.
2060 `parens` holds a set of string leaf values immediately after which
2061 invisible parens should be put.
2063 normalize_invisible_parens(node, parens_after=parens)
2064 for child in node.children:
2065 if child.type == token.NAME and child.value in keywords: # type: ignore
2066 yield from self.line()
2068 yield from self.visit(child)
2070 def visit_suite(self, node: Node) -> Iterator[Line]:
2071 """Visit a suite."""
2072 if self.mode.is_pyi and is_stub_suite(node):
2073 yield from self.visit(node.children[2])
2075 yield from self.visit_default(node)
2077 def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
2078 """Visit a statement without nested statements."""
2079 if first_child_is_arith(node):
2080 wrap_in_parentheses(node, node.children[0], visible=False)
2081 is_suite_like = node.parent and node.parent.type in STATEMENT
2083 if self.mode.is_pyi and is_stub_body(node):
2084 yield from self.visit_default(node)
2086 yield from self.line(+1)
2087 yield from self.visit_default(node)
2088 yield from self.line(-1)
2092 not self.mode.is_pyi
2094 or not is_stub_suite(node.parent)
2096 yield from self.line()
2097 yield from self.visit_default(node)
2099 def visit_async_stmt(self, node: Node) -> Iterator[Line]:
2100 """Visit `async def`, `async for`, `async with`."""
2101 yield from self.line()
2103 children = iter(node.children)
2104 for child in children:
2105 yield from self.visit(child)
2107 if child.type == token.ASYNC:
2110 internal_stmt = next(children)
2111 for child in internal_stmt.children:
2112 yield from self.visit(child)
2114 def visit_decorators(self, node: Node) -> Iterator[Line]:
2115 """Visit decorators."""
2116 for child in node.children:
2117 yield from self.line()
2118 yield from self.visit(child)
2120 def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
2121 """Remove a semicolon and put the other statement on a separate line."""
2122 yield from self.line()
2124 def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
2125 """End of file. Process outstanding comments and end with a newline."""
2126 yield from self.visit_default(leaf)
2127 yield from self.line()
2129 def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
2130 if not self.current_line.bracket_tracker.any_open_brackets():
2131 yield from self.line()
2132 yield from self.visit_default(leaf)
2134 def visit_factor(self, node: Node) -> Iterator[Line]:
2135 """Force parentheses between a unary op and a binary power:
2137 -2 ** 8 -> -(2 ** 8)
2139 _operator, operand = node.children
2141 operand.type == syms.power
2142 and len(operand.children) == 3
2143 and operand.children[1].type == token.DOUBLESTAR
2145 lpar = Leaf(token.LPAR, "(")
2146 rpar = Leaf(token.RPAR, ")")
2147 index = operand.remove() or 0
2148 node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
2149 yield from self.visit_default(node)
2151 def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
2152 if is_docstring(leaf) and "\\\n" not in leaf.value:
2153 # We're ignoring docstrings with backslash newline escapes because changing
2154 # indentation of those changes the AST representation of the code.
2155 prefix = get_string_prefix(leaf.value)
2156 lead_len = len(prefix) + 3
2158 indent = " " * 4 * self.current_line.depth
2159 docstring = fix_docstring(leaf.value[lead_len:tail_len], indent)
2161 if leaf.value[lead_len - 1] == docstring[0]:
2162 docstring = " " + docstring
2163 if leaf.value[tail_len + 1] == docstring[-1]:
2164 docstring = docstring + " "
2165 leaf.value = leaf.value[0:lead_len] + docstring + leaf.value[tail_len:]
2167 yield from self.visit_default(leaf)
2169 def __post_init__(self) -> None:
2170 """You are in a twisty little maze of passages."""
2171 self.current_line = Line(mode=self.mode)
2175 self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
2176 self.visit_if_stmt = partial(
2177 v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
2179 self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
2180 self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
2181 self.visit_try_stmt = partial(
2182 v, keywords={"try", "except", "else", "finally"}, parens=Ø
2184 self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
2185 self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
2186 self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
2187 self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
2188 self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
2189 self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
2190 self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
2191 self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
2192 self.visit_async_funcdef = self.visit_async_stmt
2193 self.visit_decorated = self.visit_decorators
2196 IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
2197 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
2198 OPENING_BRACKETS = set(BRACKET.keys())
2199 CLOSING_BRACKETS = set(BRACKET.values())
2200 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
2201 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
2204 def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901
2205 """Return whitespace prefix if needed for the given `leaf`.
2207 `complex_subscript` signals whether the given leaf is part of a subscription
2208 which has non-trivial arguments, like arithmetic expressions or function calls.
2216 if t in ALWAYS_NO_SPACE:
2219 if t == token.COMMENT:
2222 assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
2223 if t == token.COLON and p.type not in {
2230 prev = leaf.prev_sibling
2232 prevp = preceding_leaf(p)
2233 if not prevp or prevp.type in OPENING_BRACKETS:
2236 if t == token.COLON:
2237 if prevp.type == token.COLON:
2240 elif prevp.type != token.COMMA and not complex_subscript:
2245 if prevp.type == token.EQUAL:
2247 if prevp.parent.type in {
2255 elif prevp.parent.type == syms.typedargslist:
2256 # A bit hacky: if the equal sign has whitespace, it means we
2257 # previously found it's a typed argument. So, we're using
2261 elif prevp.type in VARARGS_SPECIALS:
2262 if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
2265 elif prevp.type == token.COLON:
2266 if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
2267 return SPACE if complex_subscript else NO
2271 and prevp.parent.type == syms.factor
2272 and prevp.type in MATH_OPERATORS
2277 prevp.type == token.RIGHTSHIFT
2279 and prevp.parent.type == syms.shift_expr
2280 and prevp.prev_sibling
2281 and prevp.prev_sibling.type == token.NAME
2282 and prevp.prev_sibling.value == "print" # type: ignore
2284 # Python 2 print chevron
2286 elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator:
2287 # no space in decorators
2290 elif prev.type in OPENING_BRACKETS:
2293 if p.type in {syms.parameters, syms.arglist}:
2294 # untyped function signatures or calls
2295 if not prev or prev.type != token.COMMA:
2298 elif p.type == syms.varargslist:
2300 if prev and prev.type != token.COMMA:
2303 elif p.type == syms.typedargslist:
2304 # typed function signatures
2308 if t == token.EQUAL:
2309 if prev.type != syms.tname:
2312 elif prev.type == token.EQUAL:
2313 # A bit hacky: if the equal sign has whitespace, it means we
2314 # previously found it's a typed argument. So, we're using that, too.
2317 elif prev.type != token.COMMA:
2320 elif p.type == syms.tname:
2323 prevp = preceding_leaf(p)
2324 if not prevp or prevp.type != token.COMMA:
2327 elif p.type == syms.trailer:
2328 # attributes and calls
2329 if t == token.LPAR or t == token.RPAR:
2334 prevp = preceding_leaf(p)
2335 if not prevp or prevp.type != token.NUMBER:
2338 elif t == token.LSQB:
2341 elif prev.type != token.COMMA:
2344 elif p.type == syms.argument:
2346 if t == token.EQUAL:
2350 prevp = preceding_leaf(p)
2351 if not prevp or prevp.type == token.LPAR:
2354 elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
2357 elif p.type == syms.decorator:
2361 elif p.type == syms.dotted_name:
2365 prevp = preceding_leaf(p)
2366 if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
2369 elif p.type == syms.classdef:
2373 if prev and prev.type == token.LPAR:
2376 elif p.type in {syms.subscript, syms.sliceop}:
2379 assert p.parent is not None, "subscripts are always parented"
2380 if p.parent.type == syms.subscriptlist:
2385 elif not complex_subscript:
2388 elif p.type == syms.atom:
2389 if prev and t == token.DOT:
2390 # dots, but not the first one.
2393 elif p.type == syms.dictsetmaker:
2395 if prev and prev.type == token.DOUBLESTAR:
2398 elif p.type in {syms.factor, syms.star_expr}:
2401 prevp = preceding_leaf(p)
2402 if not prevp or prevp.type in OPENING_BRACKETS:
2405 prevp_parent = prevp.parent
2406 assert prevp_parent is not None
2407 if prevp.type == token.COLON and prevp_parent.type in {
2413 elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
2416 elif t in {token.NAME, token.NUMBER, token.STRING}:
2419 elif p.type == syms.import_from:
2421 if prev and prev.type == token.DOT:
2424 elif t == token.NAME:
2428 if prev and prev.type == token.DOT:
2431 elif p.type == syms.sliceop:
2437 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
2438 """Return the first leaf that precedes `node`, if any."""
2440 res = node.prev_sibling
2442 if isinstance(res, Leaf):
2446 return list(res.leaves())[-1]
2455 def prev_siblings_are(node: Optional[LN], tokens: List[Optional[NodeType]]) -> bool:
2456 """Return if the `node` and its previous siblings match types against the provided
2457 list of tokens; the provided `node`has its type matched against the last element in
2458 the list. `None` can be used as the first element to declare that the start of the
2459 list is anchored at the start of its parent's children."""
2462 if tokens[-1] is None:
2466 if node.type != tokens[-1]:
2468 return prev_siblings_are(node.prev_sibling, tokens[:-1])
2471 def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
2472 """Return the child of `ancestor` that contains `descendant`."""
2473 node: Optional[LN] = descendant
2474 while node and node.parent != ancestor:
2479 def container_of(leaf: Leaf) -> LN:
2480 """Return `leaf` or one of its ancestors that is the topmost container of it.
2482 By "container" we mean a node where `leaf` is the very first child.
2484 same_prefix = leaf.prefix
2485 container: LN = leaf
2487 parent = container.parent
2491 if parent.children[0].prefix != same_prefix:
2494 if parent.type == syms.file_input:
2497 if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
2504 def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2505 """Return the priority of the `leaf` delimiter, given a line break after it.
2507 The delimiter priorities returned here are from those delimiters that would
2508 cause a line break after themselves.
2510 Higher numbers are higher priority.
2512 if leaf.type == token.COMMA:
2513 return COMMA_PRIORITY
2518 def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
2519 """Return the priority of the `leaf` delimiter, given a line break before it.
2521 The delimiter priorities returned here are from those delimiters that would
2522 cause a line break before themselves.
2524 Higher numbers are higher priority.
2526 if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
2527 # * and ** might also be MATH_OPERATORS but in this case they are not.
2528 # Don't treat them as a delimiter.
2532 leaf.type == token.DOT
2534 and leaf.parent.type not in {syms.import_from, syms.dotted_name}
2535 and (previous is None or previous.type in CLOSING_BRACKETS)
2540 leaf.type in MATH_OPERATORS
2542 and leaf.parent.type not in {syms.factor, syms.star_expr}
2544 return MATH_PRIORITIES[leaf.type]
2546 if leaf.type in COMPARATORS:
2547 return COMPARATOR_PRIORITY
2550 leaf.type == token.STRING
2551 and previous is not None
2552 and previous.type == token.STRING
2554 return STRING_PRIORITY
2556 if leaf.type not in {token.NAME, token.ASYNC}:
2562 and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
2563 or leaf.type == token.ASYNC
2566 not isinstance(leaf.prev_sibling, Leaf)
2567 or leaf.prev_sibling.value != "async"
2569 return COMPREHENSION_PRIORITY
2574 and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
2576 return COMPREHENSION_PRIORITY
2578 if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
2579 return TERNARY_PRIORITY
2581 if leaf.value == "is":
2582 return COMPARATOR_PRIORITY
2587 and leaf.parent.type in {syms.comp_op, syms.comparison}
2589 previous is not None
2590 and previous.type == token.NAME
2591 and previous.value == "not"
2594 return COMPARATOR_PRIORITY
2599 and leaf.parent.type == syms.comp_op
2601 previous is not None
2602 and previous.type == token.NAME
2603 and previous.value == "is"
2606 return COMPARATOR_PRIORITY
2608 if leaf.value in LOGIC_OPERATORS and leaf.parent:
2609 return LOGIC_PRIORITY
2614 FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
2615 FMT_SKIP = {"# fmt: skip", "# fmt:skip"}
2616 FMT_PASS = {*FMT_OFF, *FMT_SKIP}
2617 FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
2620 def generate_comments(leaf: LN) -> Iterator[Leaf]:
2621 """Clean the prefix of the `leaf` and generate comments from it, if any.
2623 Comments in lib2to3 are shoved into the whitespace prefix. This happens
2624 in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
2625 move because it does away with modifying the grammar to include all the
2626 possible places in which comments can be placed.
2628 The sad consequence for us though is that comments don't "belong" anywhere.
2629 This is why this function generates simple parentless Leaf objects for
2630 comments. We simply don't know what the correct parent should be.
2632 No matter though, we can live without this. We really only need to
2633 differentiate between inline and standalone comments. The latter don't
2634 share the line with any code.
2636 Inline comments are emitted as regular token.COMMENT leaves. Standalone
2637 are emitted with a fake STANDALONE_COMMENT token identifier.
2639 for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
2640 yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
2645 """Describes a piece of syntax that is a comment.
2647 It's not a :class:`blib2to3.pytree.Leaf` so that:
2649 * it can be cached (`Leaf` objects should not be reused more than once as
2650 they store their lineno, column, prefix, and parent information);
2651 * `newlines` and `consumed` fields are kept separate from the `value`. This
2652 simplifies handling of special marker comments like ``# fmt: off/on``.
2655 type: int # token.COMMENT or STANDALONE_COMMENT
2656 value: str # content of the comment
2657 newlines: int # how many newlines before the comment
2658 consumed: int # how many characters of the original leaf's prefix did we consume
2661 @lru_cache(maxsize=4096)
2662 def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
2663 """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
2664 result: List[ProtoComment] = []
2665 if not prefix or "#" not in prefix:
2671 for index, line in enumerate(re.split("\r?\n", prefix)):
2672 consumed += len(line) + 1 # adding the length of the split '\n'
2673 line = line.lstrip()
2676 if not line.startswith("#"):
2677 # Escaped newlines outside of a comment are not really newlines at
2678 # all. We treat a single-line comment following an escaped newline
2679 # as a simple trailing comment.
2680 if line.endswith("\\"):
2684 if index == ignored_lines and not is_endmarker:
2685 comment_type = token.COMMENT # simple trailing comment
2687 comment_type = STANDALONE_COMMENT
2688 comment = make_comment(line)
2691 type=comment_type, value=comment, newlines=nlines, consumed=consumed
2698 def make_comment(content: str) -> str:
2699 """Return a consistently formatted comment from the given `content` string.
2701 All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
2702 space between the hash sign and the content.
2704 If `content` didn't start with a hash sign, one is provided.
2706 content = content.rstrip()
2710 if content[0] == "#":
2711 content = content[1:]
2712 if content and content[0] not in " !:#'%":
2713 content = " " + content
2714 return "#" + content
2718 line: Line, mode: Mode, features: Collection[Feature] = ()
2719 ) -> Iterator[Line]:
2720 """Transform a `line`, potentially splitting it into many lines.
2722 They should fit in the allotted `line_length` but might not be able to.
2724 `features` are syntactical features that may be used in the output.
2730 line_str = line_to_string(line)
2732 def init_st(ST: Type[StringTransformer]) -> StringTransformer:
2733 """Initialize StringTransformer"""
2734 return ST(mode.line_length, mode.string_normalization)
2736 string_merge = init_st(StringMerger)
2737 string_paren_strip = init_st(StringParenStripper)
2738 string_split = init_st(StringSplitter)
2739 string_paren_wrap = init_st(StringParenWrapper)
2741 transformers: List[Transformer]
2743 not line.contains_uncollapsable_type_comments()
2744 and not line.should_split_rhs
2745 and not line.magic_trailing_comma
2747 is_line_short_enough(line, line_length=mode.line_length, line_str=line_str)
2748 or line.contains_unsplittable_type_ignore()
2750 and not (line.inside_brackets and line.contains_standalone_comments())
2752 # Only apply basic string preprocessing, since lines shouldn't be split here.
2753 if mode.experimental_string_processing:
2754 transformers = [string_merge, string_paren_strip]
2758 transformers = [left_hand_split]
2761 def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
2762 """Wraps calls to `right_hand_split`.
2764 The calls increasingly `omit` right-hand trailers (bracket pairs with
2765 content), meaning the trailers get glued together to split on another
2766 bracket pair instead.
2768 for omit in generate_trailers_to_omit(line, mode.line_length):
2770 right_hand_split(line, mode.line_length, features, omit=omit)
2772 # Note: this check is only able to figure out if the first line of the
2773 # *current* transformation fits in the line length. This is true only
2774 # for simple cases. All others require running more transforms via
2775 # `transform_line()`. This check doesn't know if those would succeed.
2776 if is_line_short_enough(lines[0], line_length=mode.line_length):
2780 # All splits failed, best effort split with no omits.
2781 # This mostly happens to multiline strings that are by definition
2782 # reported as not fitting a single line, as well as lines that contain
2783 # trailing commas (those have to be exploded).
2784 yield from right_hand_split(
2785 line, line_length=mode.line_length, features=features
2788 if mode.experimental_string_processing:
2789 if line.inside_brackets:
2795 standalone_comment_split,
2808 if line.inside_brackets:
2809 transformers = [delimiter_split, standalone_comment_split, rhs]
2811 transformers = [rhs]
2813 for transform in transformers:
2814 # We are accumulating lines in `result` because we might want to abort
2815 # mission and return the original line in the end, or attempt a different
2818 result = run_transformer(line, transform, mode, features, line_str=line_str)
2819 except CannotTransform:
2829 @dataclass # type: ignore
2830 class StringTransformer(ABC):
2832 An implementation of the Transformer protocol that relies on its
2833 subclasses overriding the template methods `do_match(...)` and
2834 `do_transform(...)`.
2836 This Transformer works exclusively on strings (for example, by merging
2839 The following sections can be found among the docstrings of each concrete
2840 StringTransformer subclass.
2843 Which requirements must be met of the given Line for this
2844 StringTransformer to be applied?
2847 If the given Line meets all of the above requirements, which string
2848 transformations can you expect to be applied to it by this
2852 What contractual agreements does this StringTransformer have with other
2853 StringTransfomers? Such collaborations should be eliminated/minimized
2854 as much as possible.
2858 normalize_strings: bool
2859 __name__ = "StringTransformer"
2862 def do_match(self, line: Line) -> TMatchResult:
2865 * Ok(string_idx) such that `line.leaves[string_idx]` is our target
2866 string, if a match was able to be made.
2868 * Err(CannotTransform), if a match was not able to be made.
2872 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
2875 * Ok(new_line) where new_line is the new transformed line.
2877 * Err(CannotTransform) if the transformation failed for some reason. The
2878 `do_match(...)` template method should usually be used to reject
2879 the form of the given Line, but in some cases it is difficult to
2880 know whether or not a Line meets the StringTransformer's
2881 requirements until the transformation is already midway.
2884 This method should NOT mutate @line directly, but it MAY mutate the
2885 Line's underlying Node structure. (WARNING: If the underlying Node
2886 structure IS altered, then this method should NOT be allowed to
2887 yield an CannotTransform after that point.)
2890 def __call__(self, line: Line, _features: Collection[Feature]) -> Iterator[Line]:
2892 StringTransformer instances have a call signature that mirrors that of
2893 the Transformer type.
2896 CannotTransform(...) if the concrete StringTransformer class is unable
2899 # Optimization to avoid calling `self.do_match(...)` when the line does
2900 # not contain any string.
2901 if not any(leaf.type == token.STRING for leaf in line.leaves):
2902 raise CannotTransform("There are no strings in this line.")
2904 match_result = self.do_match(line)
2906 if isinstance(match_result, Err):
2907 cant_transform = match_result.err()
2908 raise CannotTransform(
2909 f"The string transformer {self.__class__.__name__} does not recognize"
2910 " this line as one that it can transform."
2911 ) from cant_transform
2913 string_idx = match_result.ok()
2915 for line_result in self.do_transform(line, string_idx):
2916 if isinstance(line_result, Err):
2917 cant_transform = line_result.err()
2918 raise CannotTransform(
2919 "StringTransformer failed while attempting to transform string."
2920 ) from cant_transform
2921 line = line_result.ok()
2927 """A custom (i.e. manual) string split.
2929 A single CustomSplit instance represents a single substring.
2932 Consider the following string:
2939 This string will correspond to the following three CustomSplit instances:
2941 CustomSplit(False, 16)
2942 CustomSplit(False, 17)
2943 CustomSplit(True, 16)
2951 class CustomSplitMapMixin:
2953 This mixin class is used to map merged strings to a sequence of
2954 CustomSplits, which will then be used to re-split the strings iff none of
2955 the resultant substrings go over the configured max line length.
2958 _Key = Tuple[StringID, str]
2959 _CUSTOM_SPLIT_MAP: Dict[_Key, Tuple[CustomSplit, ...]] = defaultdict(tuple)
2962 def _get_key(string: str) -> "CustomSplitMapMixin._Key":
2965 A unique identifier that is used internally to map @string to a
2966 group of custom splits.
2968 return (id(string), string)
2970 def add_custom_splits(
2971 self, string: str, custom_splits: Iterable[CustomSplit]
2973 """Custom Split Map Setter Method
2976 Adds a mapping from @string to the custom splits @custom_splits.
2978 key = self._get_key(string)
2979 self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
2981 def pop_custom_splits(self, string: str) -> List[CustomSplit]:
2982 """Custom Split Map Getter Method
2985 * A list of the custom splits that are mapped to @string, if any
2991 Deletes the mapping between @string and its associated custom
2992 splits (which are returned to the caller).
2994 key = self._get_key(string)
2996 custom_splits = self._CUSTOM_SPLIT_MAP[key]
2997 del self._CUSTOM_SPLIT_MAP[key]
2999 return list(custom_splits)
3001 def has_custom_splits(self, string: str) -> bool:
3004 True iff @string is associated with a set of custom splits.
3006 key = self._get_key(string)
3007 return key in self._CUSTOM_SPLIT_MAP
3010 class StringMerger(CustomSplitMapMixin, StringTransformer):
3011 """StringTransformer that merges strings together.
3014 (A) The line contains adjacent strings such that ALL of the validation checks
3015 listed in StringMerger.__validate_msg(...)'s docstring pass.
3017 (B) The line contains a string which uses line continuation backslashes.
3020 Depending on which of the two requirements above where met, either:
3022 (A) The string group associated with the target string is merged.
3024 (B) All line-continuation backslashes are removed from the target string.
3027 StringMerger provides custom split information to StringSplitter.
3030 def do_match(self, line: Line) -> TMatchResult:
3033 is_valid_index = is_valid_index_factory(LL)
3035 for (i, leaf) in enumerate(LL):
3037 leaf.type == token.STRING
3038 and is_valid_index(i + 1)
3039 and LL[i + 1].type == token.STRING
3043 if leaf.type == token.STRING and "\\\n" in leaf.value:
3046 return TErr("This line has no strings that need merging.")
3048 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
3050 rblc_result = self.__remove_backslash_line_continuation_chars(
3051 new_line, string_idx
3053 if isinstance(rblc_result, Ok):
3054 new_line = rblc_result.ok()
3056 msg_result = self.__merge_string_group(new_line, string_idx)
3057 if isinstance(msg_result, Ok):
3058 new_line = msg_result.ok()
3060 if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
3061 msg_cant_transform = msg_result.err()
3062 rblc_cant_transform = rblc_result.err()
3063 cant_transform = CannotTransform(
3064 "StringMerger failed to merge any strings in this line."
3067 # Chain the errors together using `__cause__`.
3068 msg_cant_transform.__cause__ = rblc_cant_transform
3069 cant_transform.__cause__ = msg_cant_transform
3071 yield Err(cant_transform)
3076 def __remove_backslash_line_continuation_chars(
3077 line: Line, string_idx: int
3080 Merge strings that were split across multiple lines using
3081 line-continuation backslashes.
3084 Ok(new_line), if @line contains backslash line-continuation
3087 Err(CannotTransform), otherwise.
3091 string_leaf = LL[string_idx]
3093 string_leaf.type == token.STRING
3094 and "\\\n" in string_leaf.value
3095 and not has_triple_quotes(string_leaf.value)
3098 f"String leaf {string_leaf} does not contain any backslash line"
3099 " continuation characters."
3102 new_line = line.clone()
3103 new_line.comments = line.comments.copy()
3104 append_leaves(new_line, line, LL)
3106 new_string_leaf = new_line.leaves[string_idx]
3107 new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
3111 def __merge_string_group(self, line: Line, string_idx: int) -> TResult[Line]:
3113 Merges string group (i.e. set of adjacent strings) where the first
3114 string in the group is `line.leaves[string_idx]`.
3117 Ok(new_line), if ALL of the validation checks found in
3118 __validate_msg(...) pass.
3120 Err(CannotTransform), otherwise.
3124 is_valid_index = is_valid_index_factory(LL)
3126 vresult = self.__validate_msg(line, string_idx)
3127 if isinstance(vresult, Err):
3130 # If the string group is wrapped inside an Atom node, we must make sure
3131 # to later replace that Atom with our new (merged) string leaf.
3132 atom_node = LL[string_idx].parent
3134 # We will place BREAK_MARK in between every two substrings that we
3135 # merge. We will then later go through our final result and use the
3136 # various instances of BREAK_MARK we find to add the right values to
3137 # the custom split map.
3138 BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
3140 QUOTE = LL[string_idx].value[-1]
3142 def make_naked(string: str, string_prefix: str) -> str:
3143 """Strip @string (i.e. make it a "naked" string)
3146 * assert_is_leaf_string(@string)
3149 A string that is identical to @string except that
3150 @string_prefix has been stripped, the surrounding QUOTE
3151 characters have been removed, and any remaining QUOTE
3152 characters have been escaped.
3154 assert_is_leaf_string(string)
3156 RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
3157 naked_string = string[len(string_prefix) + 1 : -1]
3158 naked_string = re.sub(
3159 "(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
3163 # Holds the CustomSplit objects that will later be added to the custom
3167 # Temporary storage for the 'has_prefix' part of the CustomSplit objects.
3170 # Sets the 'prefix' variable. This is the prefix that the final merged
3172 next_str_idx = string_idx
3176 and is_valid_index(next_str_idx)
3177 and LL[next_str_idx].type == token.STRING
3179 prefix = get_string_prefix(LL[next_str_idx].value)
3182 # The next loop merges the string group. The final string will be
3185 # The following convenience variables are used:
3190 # NSS: naked next string
3194 next_str_idx = string_idx
3195 while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
3198 SS = LL[next_str_idx].value
3199 next_prefix = get_string_prefix(SS)
3201 # If this is an f-string group but this substring is not prefixed
3203 if "f" in prefix and "f" not in next_prefix:
3204 # Then we must escape any braces contained in this substring.
3205 SS = re.subf(r"(\{|\})", "{1}{1}", SS)
3207 NSS = make_naked(SS, next_prefix)
3209 has_prefix = bool(next_prefix)
3210 prefix_tracker.append(has_prefix)
3212 S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
3213 NS = make_naked(S, prefix)
3217 S_leaf = Leaf(token.STRING, S)
3218 if self.normalize_strings:
3219 normalize_string_quotes(S_leaf)
3221 # Fill the 'custom_splits' list with the appropriate CustomSplit objects.
3222 temp_string = S_leaf.value[len(prefix) + 1 : -1]
3223 for has_prefix in prefix_tracker:
3224 mark_idx = temp_string.find(BREAK_MARK)
3227 ), "Logic error while filling the custom string breakpoint cache."
3229 temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
3230 breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
3231 custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
3233 string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
3235 if atom_node is not None:
3236 replace_child(atom_node, string_leaf)
3238 # Build the final line ('new_line') that this method will later return.
3239 new_line = line.clone()
3240 for (i, leaf) in enumerate(LL):
3242 new_line.append(string_leaf)
3244 if string_idx <= i < string_idx + num_of_strings:
3245 for comment_leaf in line.comments_after(LL[i]):
3246 new_line.append(comment_leaf, preformatted=True)
3249 append_leaves(new_line, line, [leaf])
3251 self.add_custom_splits(string_leaf.value, custom_splits)
3255 def __validate_msg(line: Line, string_idx: int) -> TResult[None]:
3256 """Validate (M)erge (S)tring (G)roup
3258 Transform-time string validation logic for __merge_string_group(...).
3261 * Ok(None), if ALL validation checks (listed below) pass.
3263 * Err(CannotTransform), if any of the following are true:
3264 - The target string group does not contain ANY stand-alone comments.
3265 - The target string is not in a string group (i.e. it has no
3267 - The string group has more than one inline comment.
3268 - The string group has an inline comment that appears to be a pragma.
3269 - The set of all string prefixes in the string group is of
3270 length greater than one and is not equal to {"", "f"}.
3271 - The string group consists of raw strings.
3273 # We first check for "inner" stand-alone comments (i.e. stand-alone
3274 # comments that have a string leaf before them AND after them).
3277 found_sa_comment = False
3278 is_valid_index = is_valid_index_factory(line.leaves)
3279 while is_valid_index(i) and line.leaves[i].type in [
3283 if line.leaves[i].type == STANDALONE_COMMENT:
3284 found_sa_comment = True
3285 elif found_sa_comment:
3287 "StringMerger does NOT merge string groups which contain "
3288 "stand-alone comments."
3293 num_of_inline_string_comments = 0
3294 set_of_prefixes = set()
3296 for leaf in line.leaves[string_idx:]:
3297 if leaf.type != token.STRING:
3298 # If the string group is trailed by a comma, we count the
3299 # comments trailing the comma to be one of the string group's
3301 if leaf.type == token.COMMA and id(leaf) in line.comments:
3302 num_of_inline_string_comments += 1
3305 if has_triple_quotes(leaf.value):
3306 return TErr("StringMerger does NOT merge multiline strings.")
3309 prefix = get_string_prefix(leaf.value)
3311 return TErr("StringMerger does NOT merge raw strings.")
3313 set_of_prefixes.add(prefix)
3315 if id(leaf) in line.comments:
3316 num_of_inline_string_comments += 1
3317 if contains_pragma_comment(line.comments[id(leaf)]):
3318 return TErr("Cannot merge strings which have pragma comments.")
3320 if num_of_strings < 2:
3322 f"Not enough strings to merge (num_of_strings={num_of_strings})."
3325 if num_of_inline_string_comments > 1:
3327 f"Too many inline string comments ({num_of_inline_string_comments})."
3330 if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
3331 return TErr(f"Too many different prefixes ({set_of_prefixes}).")
3336 class StringParenStripper(StringTransformer):
3337 """StringTransformer that strips surrounding parentheses from strings.
3340 The line contains a string which is surrounded by parentheses and:
3341 - The target string is NOT the only argument to a function call.
3342 - The target string is NOT a "pointless" string.
3343 - If the target string contains a PERCENT, the brackets are not
3344 preceeded or followed by an operator with higher precedence than
3348 The parentheses mentioned in the 'Requirements' section are stripped.
3351 StringParenStripper has its own inherent usefulness, but it is also
3352 relied on to clean up the parentheses created by StringParenWrapper (in
3353 the event that they are no longer needed).
3356 def do_match(self, line: Line) -> TMatchResult:
3359 is_valid_index = is_valid_index_factory(LL)
3361 for (idx, leaf) in enumerate(LL):
3362 # Should be a string...
3363 if leaf.type != token.STRING:
3366 # If this is a "pointless" string...
3369 and leaf.parent.parent
3370 and leaf.parent.parent.type == syms.simple_stmt
3374 # Should be preceded by a non-empty LPAR...
3376 not is_valid_index(idx - 1)
3377 or LL[idx - 1].type != token.LPAR
3378 or is_empty_lpar(LL[idx - 1])
3382 # That LPAR should NOT be preceded by a function name or a closing
3383 # bracket (which could be a function which returns a function or a
3384 # list/dictionary that contains a function)...
3385 if is_valid_index(idx - 2) and (
3386 LL[idx - 2].type == token.NAME or LL[idx - 2].type in CLOSING_BRACKETS
3392 # Skip the string trailer, if one exists.
3393 string_parser = StringParser()
3394 next_idx = string_parser.parse(LL, string_idx)
3396 # if the leaves in the parsed string include a PERCENT, we need to
3397 # make sure the initial LPAR is NOT preceded by an operator with
3398 # higher or equal precedence to PERCENT
3399 if is_valid_index(idx - 2):
3400 # mypy can't quite follow unless we name this
3401 before_lpar = LL[idx - 2]
3402 if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and (
3419 # only unary PLUS/MINUS
3421 and before_lpar.parent.type == syms.factor
3422 and (before_lpar.type in {token.PLUS, token.MINUS})
3427 # Should be followed by a non-empty RPAR...
3429 is_valid_index(next_idx)
3430 and LL[next_idx].type == token.RPAR
3431 and not is_empty_rpar(LL[next_idx])
3433 # That RPAR should NOT be followed by anything with higher
3434 # precedence than PERCENT
3435 if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in {
3443 return Ok(string_idx)
3445 return TErr("This line has no strings wrapped in parens.")
3447 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
3450 string_parser = StringParser()
3451 rpar_idx = string_parser.parse(LL, string_idx)
3453 for leaf in (LL[string_idx - 1], LL[rpar_idx]):
3454 if line.comments_after(leaf):
3456 "Will not strip parentheses which have comments attached to them."
3460 new_line = line.clone()
3461 new_line.comments = line.comments.copy()
3463 append_leaves(new_line, line, LL[: string_idx - 1])
3464 except BracketMatchError:
3465 # HACK: I believe there is currently a bug somewhere in
3466 # right_hand_split() that is causing brackets to not be tracked
3467 # properly by a shared BracketTracker.
3468 append_leaves(new_line, line, LL[: string_idx - 1], preformatted=True)
3470 string_leaf = Leaf(token.STRING, LL[string_idx].value)
3471 LL[string_idx - 1].remove()
3472 replace_child(LL[string_idx], string_leaf)
3473 new_line.append(string_leaf)
3476 new_line, line, LL[string_idx + 1 : rpar_idx] + LL[rpar_idx + 1 :]
3479 LL[rpar_idx].remove()
3484 class BaseStringSplitter(StringTransformer):
3486 Abstract class for StringTransformers which transform a Line's strings by splitting
3487 them or placing them on their own lines where necessary to avoid going over
3488 the configured line length.
3491 * The target string value is responsible for the line going over the
3492 line length limit. It follows that after all of black's other line
3493 split methods have been exhausted, this line (or one of the resulting
3494 lines after all line splits are performed) would still be over the
3495 line_length limit unless we split this string.
3497 * The target string is NOT a "pointless" string (i.e. a string that has
3498 no parent or siblings).
3500 * The target string is not followed by an inline comment that appears
3503 * The target string is not a multiline (i.e. triple-quote) string.
3507 def do_splitter_match(self, line: Line) -> TMatchResult:
3509 BaseStringSplitter asks its clients to override this method instead of
3510 `StringTransformer.do_match(...)`.
3512 Follows the same protocol as `StringTransformer.do_match(...)`.
3514 Refer to `help(StringTransformer.do_match)` for more information.
3517 def do_match(self, line: Line) -> TMatchResult:
3518 match_result = self.do_splitter_match(line)
3519 if isinstance(match_result, Err):
3522 string_idx = match_result.ok()
3523 vresult = self.__validate(line, string_idx)
3524 if isinstance(vresult, Err):
3529 def __validate(self, line: Line, string_idx: int) -> TResult[None]:
3531 Checks that @line meets all of the requirements listed in this classes'
3532 docstring. Refer to `help(BaseStringSplitter)` for a detailed
3533 description of those requirements.
3536 * Ok(None), if ALL of the requirements are met.
3538 * Err(CannotTransform), if ANY of the requirements are NOT met.
3542 string_leaf = LL[string_idx]
3544 max_string_length = self.__get_max_string_length(line, string_idx)
3545 if len(string_leaf.value) <= max_string_length:
3547 "The string itself is not what is causing this line to be too long."
3550 if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
3555 f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
3559 if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
3560 line.comments[id(line.leaves[string_idx])]
3563 "Line appears to end with an inline pragma comment. Splitting the line"
3564 " could modify the pragma's behavior."
3567 if has_triple_quotes(string_leaf.value):
3568 return TErr("We cannot split multiline strings.")
3572 def __get_max_string_length(self, line: Line, string_idx: int) -> int:
3574 Calculates the max string length used when attempting to determine
3575 whether or not the target string is responsible for causing the line to
3576 go over the line length limit.
3578 WARNING: This method is tightly coupled to both StringSplitter and
3579 (especially) StringParenWrapper. There is probably a better way to
3580 accomplish what is being done here.
3583 max_string_length: such that `line.leaves[string_idx].value >
3584 max_string_length` implies that the target string IS responsible
3585 for causing this line to exceed the line length limit.
3589 is_valid_index = is_valid_index_factory(LL)
3591 # We use the shorthand "WMA4" in comments to abbreviate "We must
3592 # account for". When giving examples, we use STRING to mean some/any
3595 # Finally, we use the following convenience variables:
3597 # P: The leaf that is before the target string leaf.
3598 # N: The leaf that is after the target string leaf.
3599 # NN: The leaf that is after N.
3601 # WMA4 the whitespace at the beginning of the line.
3602 offset = line.depth * 4
3604 if is_valid_index(string_idx - 1):
3605 p_idx = string_idx - 1
3607 LL[string_idx - 1].type == token.LPAR
3608 and LL[string_idx - 1].value == ""
3611 # If the previous leaf is an empty LPAR placeholder, we should skip it.
3615 if P.type == token.PLUS:
3616 # WMA4 a space and a '+' character (e.g. `+ STRING`).
3619 if P.type == token.COMMA:
3620 # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
3623 if P.type in [token.COLON, token.EQUAL, token.NAME]:
3624 # This conditional branch is meant to handle dictionary keys,
3625 # variable assignments, 'return STRING' statement lines, and
3626 # 'else STRING' ternary expression lines.
3628 # WMA4 a single space.
3631 # WMA4 the lengths of any leaves that came before that space,
3632 # but after any closing bracket before that space.
3633 for leaf in reversed(LL[: p_idx + 1]):
3634 offset += len(str(leaf))
3635 if leaf.type in CLOSING_BRACKETS:
3638 if is_valid_index(string_idx + 1):
3639 N = LL[string_idx + 1]
3640 if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
3641 # If the next leaf is an empty RPAR placeholder, we should skip it.
3642 N = LL[string_idx + 2]
3644 if N.type == token.COMMA:
3645 # WMA4 a single comma at the end of the string (e.g `STRING,`).
3648 if is_valid_index(string_idx + 2):
3649 NN = LL[string_idx + 2]
3651 if N.type == token.DOT and NN.type == token.NAME:
3652 # This conditional branch is meant to handle method calls invoked
3653 # off of a string literal up to and including the LPAR character.
3655 # WMA4 the '.' character.
3659 is_valid_index(string_idx + 3)
3660 and LL[string_idx + 3].type == token.LPAR
3662 # WMA4 the left parenthesis character.
3665 # WMA4 the length of the method's name.
3666 offset += len(NN.value)
3668 has_comments = False
3669 for comment_leaf in line.comments_after(LL[string_idx]):
3670 if not has_comments:
3672 # WMA4 two spaces before the '#' character.
3675 # WMA4 the length of the inline comment.
3676 offset += len(comment_leaf.value)
3678 max_string_length = self.line_length - offset
3679 return max_string_length
3682 class StringSplitter(CustomSplitMapMixin, BaseStringSplitter):
3684 StringTransformer that splits "atom" strings (i.e. strings which exist on
3685 lines by themselves).
3688 * The line consists ONLY of a single string (with the exception of a
3689 '+' symbol which MAY exist at the start of the line), MAYBE a string
3690 trailer, and MAYBE a trailing comma.
3692 * All of the requirements listed in BaseStringSplitter's docstring.
3695 The string mentioned in the 'Requirements' section is split into as
3696 many substrings as necessary to adhere to the configured line length.
3698 In the final set of substrings, no substring should be smaller than
3699 MIN_SUBSTR_SIZE characters.
3701 The string will ONLY be split on spaces (i.e. each new substring should
3702 start with a space). Note that the string will NOT be split on a space
3703 which is escaped with a backslash.
3705 If the string is an f-string, it will NOT be split in the middle of an
3706 f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x
3707 else bar()} is an f-expression).
3709 If the string that is being split has an associated set of custom split
3710 records and those custom splits will NOT result in any line going over
3711 the configured line length, those custom splits are used. Otherwise the
3712 string is split as late as possible (from left-to-right) while still
3713 adhering to the transformation rules listed above.
3716 StringSplitter relies on StringMerger to construct the appropriate
3717 CustomSplit objects and add them to the custom split map.
3721 # Matches an "f-expression" (e.g. {var}) that might be found in an f-string.
3723 (?<!\{) (?:\{\{)* \{ (?!\{)
3730 (?<!\}) \} (?:\}\})* (?!\})
3733 def do_splitter_match(self, line: Line) -> TMatchResult:
3736 is_valid_index = is_valid_index_factory(LL)
3740 # The first leaf MAY be a '+' symbol...
3741 if is_valid_index(idx) and LL[idx].type == token.PLUS:
3744 # The next/first leaf MAY be an empty LPAR...
3745 if is_valid_index(idx) and is_empty_lpar(LL[idx]):
3748 # The next/first leaf MUST be a string...
3749 if not is_valid_index(idx) or LL[idx].type != token.STRING:
3750 return TErr("Line does not start with a string.")
3754 # Skip the string trailer, if one exists.
3755 string_parser = StringParser()
3756 idx = string_parser.parse(LL, string_idx)
3758 # That string MAY be followed by an empty RPAR...
3759 if is_valid_index(idx) and is_empty_rpar(LL[idx]):
3762 # That string / empty RPAR leaf MAY be followed by a comma...
3763 if is_valid_index(idx) and LL[idx].type == token.COMMA:
3766 # But no more leaves are allowed...
3767 if is_valid_index(idx):
3768 return TErr("This line does not end with a string.")
3770 return Ok(string_idx)
3772 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
3775 QUOTE = LL[string_idx].value[-1]
3777 is_valid_index = is_valid_index_factory(LL)
3778 insert_str_child = insert_str_child_factory(LL[string_idx])
3780 prefix = get_string_prefix(LL[string_idx].value)
3782 # We MAY choose to drop the 'f' prefix from substrings that don't
3783 # contain any f-expressions, but ONLY if the original f-string
3784 # contains at least one f-expression. Otherwise, we will alter the AST
3786 drop_pointless_f_prefix = ("f" in prefix) and re.search(
3787 self.RE_FEXPR, LL[string_idx].value, re.VERBOSE
3790 first_string_line = True
3791 starts_with_plus = LL[0].type == token.PLUS
3793 def line_needs_plus() -> bool:
3794 return first_string_line and starts_with_plus
3796 def maybe_append_plus(new_line: Line) -> None:
3799 If @line starts with a plus and this is the first line we are
3800 constructing, this function appends a PLUS leaf to @new_line
3801 and replaces the old PLUS leaf in the node structure. Otherwise
3802 this function does nothing.
3804 if line_needs_plus():
3805 plus_leaf = Leaf(token.PLUS, "+")
3806 replace_child(LL[0], plus_leaf)
3807 new_line.append(plus_leaf)
3810 is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA
3813 def max_last_string() -> int:
3816 The max allowed length of the string value used for the last
3817 line we will construct.
3819 result = self.line_length
3820 result -= line.depth * 4
3821 result -= 1 if ends_with_comma else 0
3822 result -= 2 if line_needs_plus() else 0
3825 # --- Calculate Max Break Index (for string value)
3826 # We start with the line length limit
3827 max_break_idx = self.line_length
3828 # The last index of a string of length N is N-1.
3830 # Leading whitespace is not present in the string value (e.g. Leaf.value).
3831 max_break_idx -= line.depth * 4
3832 if max_break_idx < 0:
3834 f"Unable to split {LL[string_idx].value} at such high of a line depth:"
3839 # Check if StringMerger registered any custom splits.
3840 custom_splits = self.pop_custom_splits(LL[string_idx].value)
3841 # We use them ONLY if none of them would produce lines that exceed the
3843 use_custom_breakpoints = bool(
3845 and all(csplit.break_idx <= max_break_idx for csplit in custom_splits)
3848 # Temporary storage for the remaining chunk of the string line that
3849 # can't fit onto the line currently being constructed.
3850 rest_value = LL[string_idx].value
3852 def more_splits_should_be_made() -> bool:
3855 True iff `rest_value` (the remaining string value from the last
3856 split), should be split again.
3858 if use_custom_breakpoints:
3859 return len(custom_splits) > 1
3861 return len(rest_value) > max_last_string()
3863 string_line_results: List[Ok[Line]] = []
3864 while more_splits_should_be_made():
3865 if use_custom_breakpoints:
3866 # Custom User Split (manual)
3867 csplit = custom_splits.pop(0)
3868 break_idx = csplit.break_idx
3870 # Algorithmic Split (automatic)
3871 max_bidx = max_break_idx - 2 if line_needs_plus() else max_break_idx
3872 maybe_break_idx = self.__get_break_idx(rest_value, max_bidx)
3873 if maybe_break_idx is None:
3874 # If we are unable to algorithmically determine a good split
3875 # and this string has custom splits registered to it, we
3876 # fall back to using them--which means we have to start
3877 # over from the beginning.
3879 rest_value = LL[string_idx].value
3880 string_line_results = []
3881 first_string_line = True
3882 use_custom_breakpoints = True
3885 # Otherwise, we stop splitting here.
3888 break_idx = maybe_break_idx
3890 # --- Construct `next_value`
3891 next_value = rest_value[:break_idx] + QUOTE
3893 # Are we allowed to try to drop a pointless 'f' prefix?
3894 drop_pointless_f_prefix
3895 # If we are, will we be successful?
3896 and next_value != self.__normalize_f_string(next_value, prefix)
3898 # If the current custom split did NOT originally use a prefix,
3899 # then `csplit.break_idx` will be off by one after removing
3903 if use_custom_breakpoints and not csplit.has_prefix
3906 next_value = rest_value[:break_idx] + QUOTE
3907 next_value = self.__normalize_f_string(next_value, prefix)
3909 # --- Construct `next_leaf`
3910 next_leaf = Leaf(token.STRING, next_value)
3911 insert_str_child(next_leaf)
3912 self.__maybe_normalize_string_quotes(next_leaf)
3914 # --- Construct `next_line`
3915 next_line = line.clone()
3916 maybe_append_plus(next_line)
3917 next_line.append(next_leaf)
3918 string_line_results.append(Ok(next_line))
3920 rest_value = prefix + QUOTE + rest_value[break_idx:]
3921 first_string_line = False
3923 yield from string_line_results
3925 if drop_pointless_f_prefix:
3926 rest_value = self.__normalize_f_string(rest_value, prefix)
3928 rest_leaf = Leaf(token.STRING, rest_value)
3929 insert_str_child(rest_leaf)
3931 # NOTE: I could not find a test case that verifies that the following
3932 # line is actually necessary, but it seems to be. Otherwise we risk
3933 # not normalizing the last substring, right?
3934 self.__maybe_normalize_string_quotes(rest_leaf)
3936 last_line = line.clone()
3937 maybe_append_plus(last_line)
3939 # If there are any leaves to the right of the target string...
3940 if is_valid_index(string_idx + 1):
3941 # We use `temp_value` here to determine how long the last line
3942 # would be if we were to append all the leaves to the right of the
3943 # target string to the last string line.
3944 temp_value = rest_value
3945 for leaf in LL[string_idx + 1 :]:
3946 temp_value += str(leaf)
3947 if leaf.type == token.LPAR:
3950 # Try to fit them all on the same line with the last substring...
3952 len(temp_value) <= max_last_string()
3953 or LL[string_idx + 1].type == token.COMMA
3955 last_line.append(rest_leaf)
3956 append_leaves(last_line, line, LL[string_idx + 1 :])
3958 # Otherwise, place the last substring on one line and everything
3959 # else on a line below that...
3961 last_line.append(rest_leaf)
3964 non_string_line = line.clone()
3965 append_leaves(non_string_line, line, LL[string_idx + 1 :])
3966 yield Ok(non_string_line)
3967 # Else the target string was the last leaf...
3969 last_line.append(rest_leaf)
3970 last_line.comments = line.comments.copy()
3973 def __get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]:
3975 This method contains the algorithm that StringSplitter uses to
3976 determine which character to split each string at.
3979 @string: The substring that we are attempting to split.
3980 @max_break_idx: The ideal break index. We will return this value if it
3981 meets all the necessary conditions. In the likely event that it
3982 doesn't we will try to find the closest index BELOW @max_break_idx
3983 that does. If that fails, we will expand our search by also
3984 considering all valid indices ABOVE @max_break_idx.
3987 * assert_is_leaf_string(@string)
3988 * 0 <= @max_break_idx < len(@string)
3991 break_idx, if an index is able to be found that meets all of the
3992 conditions listed in the 'Transformations' section of this classes'
3997 is_valid_index = is_valid_index_factory(string)
3999 assert is_valid_index(max_break_idx)
4000 assert_is_leaf_string(string)
4002 _fexpr_slices: Optional[List[Tuple[Index, Index]]] = None
4004 def fexpr_slices() -> Iterator[Tuple[Index, Index]]:
4007 All ranges of @string which, if @string were to be split there,
4008 would result in the splitting of an f-expression (which is NOT
4011 nonlocal _fexpr_slices
4013 if _fexpr_slices is None:
4015 for match in re.finditer(self.RE_FEXPR, string, re.VERBOSE):
4016 _fexpr_slices.append(match.span())
4018 yield from _fexpr_slices
4020 is_fstring = "f" in get_string_prefix(string)
4022 def breaks_fstring_expression(i: Index) -> bool:
4025 True iff returning @i would result in the splitting of an
4026 f-expression (which is NOT allowed).
4031 for (start, end) in fexpr_slices():
4032 if start <= i < end:
4037 def passes_all_checks(i: Index) -> bool:
4040 True iff ALL of the conditions listed in the 'Transformations'
4041 section of this classes' docstring would be be met by returning @i.
4043 is_space = string[i] == " "
4045 is_not_escaped = True
4047 while is_valid_index(j) and string[j] == "\\":
4048 is_not_escaped = not is_not_escaped
4052 len(string[i:]) >= self.MIN_SUBSTR_SIZE
4053 and len(string[:i]) >= self.MIN_SUBSTR_SIZE
4059 and not breaks_fstring_expression(i)
4062 # First, we check all indices BELOW @max_break_idx.
4063 break_idx = max_break_idx
4064 while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx):
4067 if not passes_all_checks(break_idx):
4068 # If that fails, we check all indices ABOVE @max_break_idx.
4070 # If we are able to find a valid index here, the next line is going
4071 # to be longer than the specified line length, but it's probably
4072 # better than doing nothing at all.
4073 break_idx = max_break_idx + 1
4074 while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx):
4077 if not is_valid_index(break_idx) or not passes_all_checks(break_idx):
4082 def __maybe_normalize_string_quotes(self, leaf: Leaf) -> None:
4083 if self.normalize_strings:
4084 normalize_string_quotes(leaf)
4086 def __normalize_f_string(self, string: str, prefix: str) -> str:
4089 * assert_is_leaf_string(@string)
4092 * If @string is an f-string that contains no f-expressions, we
4093 return a string identical to @string except that the 'f' prefix
4094 has been stripped and all double braces (i.e. '{{' or '}}') have
4095 been normalized (i.e. turned into '{' or '}').
4097 * Otherwise, we return @string.
4099 assert_is_leaf_string(string)
4101 if "f" in prefix and not re.search(self.RE_FEXPR, string, re.VERBOSE):
4102 new_prefix = prefix.replace("f", "")
4104 temp = string[len(prefix) :]
4105 temp = re.sub(r"\{\{", "{", temp)
4106 temp = re.sub(r"\}\}", "}", temp)
4109 return f"{new_prefix}{new_string}"
4114 class StringParenWrapper(CustomSplitMapMixin, BaseStringSplitter):
4116 StringTransformer that splits non-"atom" strings (i.e. strings that do not
4117 exist on lines by themselves).
4120 All of the requirements listed in BaseStringSplitter's docstring in
4121 addition to the requirements listed below:
4123 * The line is a return/yield statement, which returns/yields a string.
4125 * The line is part of a ternary expression (e.g. `x = y if cond else
4126 z`) such that the line starts with `else <string>`, where <string> is
4129 * The line is an assert statement, which ends with a string.
4131 * The line is an assignment statement (e.g. `x = <string>` or `x +=
4132 <string>`) such that the variable is being assigned the value of some
4135 * The line is a dictionary key assignment where some valid key is being
4136 assigned the value of some string.
4139 The chosen string is wrapped in parentheses and then split at the LPAR.
4141 We then have one line which ends with an LPAR and another line that
4142 starts with the chosen string. The latter line is then split again at
4143 the RPAR. This results in the RPAR (and possibly a trailing comma)
4144 being placed on its own line.
4146 NOTE: If any leaves exist to the right of the chosen string (except
4147 for a trailing comma, which would be placed after the RPAR), those
4148 leaves are placed inside the parentheses. In effect, the chosen
4149 string is not necessarily being "wrapped" by parentheses. We can,
4150 however, count on the LPAR being placed directly before the chosen
4153 In other words, StringParenWrapper creates "atom" strings. These
4154 can then be split again by StringSplitter, if necessary.
4157 In the event that a string line split by StringParenWrapper is
4158 changed such that it no longer needs to be given its own line,
4159 StringParenWrapper relies on StringParenStripper to clean up the
4160 parentheses it created.
4163 def do_splitter_match(self, line: Line) -> TMatchResult:
4167 self._return_match(LL)
4168 or self._else_match(LL)
4169 or self._assert_match(LL)
4170 or self._assign_match(LL)
4171 or self._dict_match(LL)
4174 if string_idx is not None:
4175 string_value = line.leaves[string_idx].value
4176 # If the string has no spaces...
4177 if " " not in string_value:
4178 # And will still violate the line length limit when split...
4179 max_string_length = self.line_length - ((line.depth + 1) * 4)
4180 if len(string_value) > max_string_length:
4181 # And has no associated custom splits...
4182 if not self.has_custom_splits(string_value):
4183 # Then we should NOT put this string on its own line.
4185 "We do not wrap long strings in parentheses when the"
4186 " resultant line would still be over the specified line"
4187 " length and can't be split further by StringSplitter."
4189 return Ok(string_idx)
4191 return TErr("This line does not contain any non-atomic strings.")
4194 def _return_match(LL: List[Leaf]) -> Optional[int]:
4197 string_idx such that @LL[string_idx] is equal to our target (i.e.
4198 matched) string, if this line matches the return/yield statement
4199 requirements listed in the 'Requirements' section of this classes'
4204 # If this line is apart of a return/yield statement and the first leaf
4205 # contains either the "return" or "yield" keywords...
4206 if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
4208 ].value in ["return", "yield"]:
4209 is_valid_index = is_valid_index_factory(LL)
4211 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
4212 # The next visible leaf MUST contain a string...
4213 if is_valid_index(idx) and LL[idx].type == token.STRING:
4219 def _else_match(LL: List[Leaf]) -> Optional[int]:
4222 string_idx such that @LL[string_idx] is equal to our target (i.e.
4223 matched) string, if this line matches the ternary expression
4224 requirements listed in the 'Requirements' section of this classes'
4229 # If this line is apart of a ternary expression and the first leaf
4230 # contains the "else" keyword...
4232 parent_type(LL[0]) == syms.test
4233 and LL[0].type == token.NAME
4234 and LL[0].value == "else"
4236 is_valid_index = is_valid_index_factory(LL)
4238 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
4239 # The next visible leaf MUST contain a string...
4240 if is_valid_index(idx) and LL[idx].type == token.STRING:
4246 def _assert_match(LL: List[Leaf]) -> Optional[int]:
4249 string_idx such that @LL[string_idx] is equal to our target (i.e.
4250 matched) string, if this line matches the assert statement
4251 requirements listed in the 'Requirements' section of this classes'
4256 # If this line is apart of an assert statement and the first leaf
4257 # contains the "assert" keyword...
4258 if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
4259 is_valid_index = is_valid_index_factory(LL)
4261 for (i, leaf) in enumerate(LL):
4262 # We MUST find a comma...
4263 if leaf.type == token.COMMA:
4264 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
4266 # That comma MUST be followed by a string...
4267 if is_valid_index(idx) and LL[idx].type == token.STRING:
4270 # Skip the string trailer, if one exists.
4271 string_parser = StringParser()
4272 idx = string_parser.parse(LL, string_idx)
4274 # But no more leaves are allowed...
4275 if not is_valid_index(idx):
4281 def _assign_match(LL: List[Leaf]) -> Optional[int]:
4284 string_idx such that @LL[string_idx] is equal to our target (i.e.
4285 matched) string, if this line matches the assignment statement
4286 requirements listed in the 'Requirements' section of this classes'
4291 # If this line is apart of an expression statement or is a function
4292 # argument AND the first leaf contains a variable name...
4294 parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
4295 and LL[0].type == token.NAME
4297 is_valid_index = is_valid_index_factory(LL)
4299 for (i, leaf) in enumerate(LL):
4300 # We MUST find either an '=' or '+=' symbol...
4301 if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
4302 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
4304 # That symbol MUST be followed by a string...
4305 if is_valid_index(idx) and LL[idx].type == token.STRING:
4308 # Skip the string trailer, if one exists.
4309 string_parser = StringParser()
4310 idx = string_parser.parse(LL, string_idx)
4312 # The next leaf MAY be a comma iff this line is apart
4313 # of a function argument...
4315 parent_type(LL[0]) == syms.argument
4316 and is_valid_index(idx)
4317 and LL[idx].type == token.COMMA
4321 # But no more leaves are allowed...
4322 if not is_valid_index(idx):
4328 def _dict_match(LL: List[Leaf]) -> Optional[int]:
4331 string_idx such that @LL[string_idx] is equal to our target (i.e.
4332 matched) string, if this line matches the dictionary key assignment
4333 statement requirements listed in the 'Requirements' section of this
4338 # If this line is apart of a dictionary key assignment...
4339 if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]:
4340 is_valid_index = is_valid_index_factory(LL)
4342 for (i, leaf) in enumerate(LL):
4343 # We MUST find a colon...
4344 if leaf.type == token.COLON:
4345 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
4347 # That colon MUST be followed by a string...
4348 if is_valid_index(idx) and LL[idx].type == token.STRING:
4351 # Skip the string trailer, if one exists.
4352 string_parser = StringParser()
4353 idx = string_parser.parse(LL, string_idx)
4355 # That string MAY be followed by a comma...
4356 if is_valid_index(idx) and LL[idx].type == token.COMMA:
4359 # But no more leaves are allowed...
4360 if not is_valid_index(idx):
4365 def do_transform(self, line: Line, string_idx: int) -> Iterator[TResult[Line]]:
4368 is_valid_index = is_valid_index_factory(LL)
4369 insert_str_child = insert_str_child_factory(LL[string_idx])
4372 ends_with_comma = False
4373 if LL[comma_idx].type == token.COMMA:
4374 ends_with_comma = True
4376 leaves_to_steal_comments_from = [LL[string_idx]]
4378 leaves_to_steal_comments_from.append(LL[comma_idx])
4381 first_line = line.clone()
4382 left_leaves = LL[:string_idx]
4384 # We have to remember to account for (possibly invisible) LPAR and RPAR
4385 # leaves that already wrapped the target string. If these leaves do
4386 # exist, we will replace them with our own LPAR and RPAR leaves.
4387 old_parens_exist = False
4388 if left_leaves and left_leaves[-1].type == token.LPAR:
4389 old_parens_exist = True
4390 leaves_to_steal_comments_from.append(left_leaves[-1])
4393 append_leaves(first_line, line, left_leaves)
4395 lpar_leaf = Leaf(token.LPAR, "(")
4396 if old_parens_exist:
4397 replace_child(LL[string_idx - 1], lpar_leaf)
4399 insert_str_child(lpar_leaf)
4400 first_line.append(lpar_leaf)
4402 # We throw inline comments that were originally to the right of the
4403 # target string to the top line. They will now be shown to the right of
4405 for leaf in leaves_to_steal_comments_from:
4406 for comment_leaf in line.comments_after(leaf):
4407 first_line.append(comment_leaf, preformatted=True)
4409 yield Ok(first_line)
4411 # --- Middle (String) Line
4412 # We only need to yield one (possibly too long) string line, since the
4413 # `StringSplitter` will break it down further if necessary.
4414 string_value = LL[string_idx].value
4417 depth=line.depth + 1,
4418 inside_brackets=True,
4419 should_split_rhs=line.should_split_rhs,
4420 magic_trailing_comma=line.magic_trailing_comma,
4422 string_leaf = Leaf(token.STRING, string_value)
4423 insert_str_child(string_leaf)
4424 string_line.append(string_leaf)
4426 old_rpar_leaf = None
4427 if is_valid_index(string_idx + 1):
4428 right_leaves = LL[string_idx + 1 :]
4432 if old_parens_exist:
4434 right_leaves and right_leaves[-1].type == token.RPAR
4435 ), "Apparently, old parentheses do NOT exist?!"
4436 old_rpar_leaf = right_leaves.pop()
4438 append_leaves(string_line, line, right_leaves)
4440 yield Ok(string_line)
4443 last_line = line.clone()
4444 last_line.bracket_tracker = first_line.bracket_tracker
4446 new_rpar_leaf = Leaf(token.RPAR, ")")
4447 if old_rpar_leaf is not None:
4448 replace_child(old_rpar_leaf, new_rpar_leaf)
4450 insert_str_child(new_rpar_leaf)
4451 last_line.append(new_rpar_leaf)
4453 # If the target string ended with a comma, we place this comma to the
4454 # right of the RPAR on the last line.
4456 comma_leaf = Leaf(token.COMMA, ",")
4457 replace_child(LL[comma_idx], comma_leaf)
4458 last_line.append(comma_leaf)
4465 A state machine that aids in parsing a string's "trailer", which can be
4466 either non-existent, an old-style formatting sequence (e.g. `% varX` or `%
4467 (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX,
4470 NOTE: A new StringParser object MUST be instantiated for each string
4471 trailer we need to parse.
4474 We shall assume that `line` equals the `Line` object that corresponds
4475 to the following line of python code:
4477 x = "Some {}.".format("String") + some_other_string
4480 Furthermore, we will assume that `string_idx` is some index such that:
4482 assert line.leaves[string_idx].value == "Some {}."
4485 The following code snippet then holds:
4487 string_parser = StringParser()
4488 idx = string_parser.parse(line.leaves, string_idx)
4489 assert line.leaves[idx].type == token.PLUS
4495 # String Parser States
4505 # Lookup Table for Next State
4506 _goto: Dict[Tuple[ParserState, NodeType], ParserState] = {
4507 # A string trailer may start with '.' OR '%'.
4508 (START, token.DOT): DOT,
4509 (START, token.PERCENT): PERCENT,
4510 (START, DEFAULT_TOKEN): DONE,
4511 # A '.' MUST be followed by an attribute or method name.
4512 (DOT, token.NAME): NAME,
4513 # A method name MUST be followed by an '(', whereas an attribute name
4514 # is the last symbol in the string trailer.
4515 (NAME, token.LPAR): LPAR,
4516 (NAME, DEFAULT_TOKEN): DONE,
4517 # A '%' symbol can be followed by an '(' or a single argument (e.g. a
4518 # string or variable name).
4519 (PERCENT, token.LPAR): LPAR,
4520 (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG,
4521 # If a '%' symbol is followed by a single argument, that argument is
4522 # the last leaf in the string trailer.
4523 (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE,
4524 # If present, a ')' symbol is the last symbol in a string trailer.
4525 # (NOTE: LPARS and nested RPARS are not included in this lookup table,
4526 # since they are treated as a special case by the parsing logic in this
4527 # classes' implementation.)
4528 (RPAR, DEFAULT_TOKEN): DONE,
4531 def __init__(self) -> None:
4532 self._state = self.START
4533 self._unmatched_lpars = 0
4535 def parse(self, leaves: List[Leaf], string_idx: int) -> int:
4538 * @leaves[@string_idx].type == token.STRING
4541 The index directly after the last leaf which is apart of the string
4542 trailer, if a "trailer" exists.
4544 @string_idx + 1, if no string "trailer" exists.
4546 assert leaves[string_idx].type == token.STRING
4548 idx = string_idx + 1
4549 while idx < len(leaves) and self._next_state(leaves[idx]):
4553 def _next_state(self, leaf: Leaf) -> bool:
4556 * On the first call to this function, @leaf MUST be the leaf that
4557 was directly after the string leaf in question (e.g. if our target
4558 string is `line.leaves[i]` then the first call to this method must
4559 be `line.leaves[i + 1]`).
4560 * On the next call to this function, the leaf parameter passed in
4561 MUST be the leaf directly following @leaf.
4564 True iff @leaf is apart of the string's trailer.
4566 # We ignore empty LPAR or RPAR leaves.
4567 if is_empty_par(leaf):
4570 next_token = leaf.type
4571 if next_token == token.LPAR:
4572 self._unmatched_lpars += 1
4574 current_state = self._state
4576 # The LPAR parser state is a special case. We will return True until we
4577 # find the matching RPAR token.
4578 if current_state == self.LPAR:
4579 if next_token == token.RPAR:
4580 self._unmatched_lpars -= 1
4581 if self._unmatched_lpars == 0:
4582 self._state = self.RPAR
4583 # Otherwise, we use a lookup table to determine the next state.
4585 # If the lookup table matches the current state to the next
4586 # token, we use the lookup table.
4587 if (current_state, next_token) in self._goto:
4588 self._state = self._goto[current_state, next_token]
4590 # Otherwise, we check if a the current state was assigned a
4592 if (current_state, self.DEFAULT_TOKEN) in self._goto:
4593 self._state = self._goto[current_state, self.DEFAULT_TOKEN]
4594 # If no default has been assigned, then this parser has a logic
4597 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
4599 if self._state == self.DONE:
4605 def TErr(err_msg: str) -> Err[CannotTransform]:
4608 Convenience function used when working with the TResult type.
4610 cant_transform = CannotTransform(err_msg)
4611 return Err(cant_transform)
4614 def contains_pragma_comment(comment_list: List[Leaf]) -> bool:
4617 True iff one of the comments in @comment_list is a pragma used by one
4618 of the more common static analysis tools for python (e.g. mypy, flake8,
4621 for comment in comment_list:
4622 if comment.value.startswith(("# type:", "# noqa", "# pylint:")):
4628 def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
4630 Factory for a convenience function that is used to orphan @string_leaf
4631 and then insert multiple new leaves into the same part of the node
4632 structure that @string_leaf had originally occupied.
4635 Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N =
4636 string_leaf.parent`. Assume the node `N` has the following
4643 Leaf(STRING, '"foo"'),
4647 We then run the code snippet shown below.
4649 insert_str_child = insert_str_child_factory(string_leaf)
4651 lpar = Leaf(token.LPAR, '(')
4652 insert_str_child(lpar)
4654 bar = Leaf(token.STRING, '"bar"')
4655 insert_str_child(bar)
4657 rpar = Leaf(token.RPAR, ')')
4658 insert_str_child(rpar)
4661 After which point, it follows that `string_leaf.parent is None` and
4662 the node `N` now has the following structure:
4669 Leaf(STRING, '"bar"'),
4674 string_parent = string_leaf.parent
4675 string_child_idx = string_leaf.remove()
4677 def insert_str_child(child: LN) -> None:
4678 nonlocal string_child_idx
4680 assert string_parent is not None
4681 assert string_child_idx is not None
4683 string_parent.insert_child(string_child_idx, child)
4684 string_child_idx += 1
4686 return insert_str_child
4689 def has_triple_quotes(string: str) -> bool:
4692 True iff @string starts with three quotation characters.
4694 raw_string = string.lstrip(STRING_PREFIX_CHARS)
4695 return raw_string[:3] in {'"""', "'''"}
4698 def parent_type(node: Optional[LN]) -> Optional[NodeType]:
4701 @node.parent.type, if @node is not None and has a parent.
4705 if node is None or node.parent is None:
4708 return node.parent.type
4711 def is_empty_par(leaf: Leaf) -> bool:
4712 return is_empty_lpar(leaf) or is_empty_rpar(leaf)
4715 def is_empty_lpar(leaf: Leaf) -> bool:
4716 return leaf.type == token.LPAR and leaf.value == ""
4719 def is_empty_rpar(leaf: Leaf) -> bool:
4720 return leaf.type == token.RPAR and leaf.value == ""
4723 def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
4729 is_valid_index = is_valid_index_factory(my_list)
4731 assert is_valid_index(0)
4732 assert is_valid_index(2)
4734 assert not is_valid_index(3)
4735 assert not is_valid_index(-1)
4739 def is_valid_index(idx: int) -> bool:
4742 True iff @idx is positive AND seq[@idx] does NOT raise an
4745 return 0 <= idx < len(seq)
4747 return is_valid_index
4750 def line_to_string(line: Line) -> str:
4751 """Returns the string representation of @line.
4753 WARNING: This is known to be computationally expensive.
4755 return str(line).strip("\n")
4759 new_line: Line, old_line: Line, leaves: List[Leaf], preformatted: bool = False
4762 Append leaves (taken from @old_line) to @new_line, making sure to fix the
4763 underlying Node structure where appropriate.
4765 All of the leaves in @leaves are duplicated. The duplicates are then
4766 appended to @new_line and used to replace their originals in the underlying
4767 Node structure. Any comments attached to the old leaves are reattached to
4771 set(@leaves) is a subset of set(@old_line.leaves).
4773 for old_leaf in leaves:
4774 new_leaf = Leaf(old_leaf.type, old_leaf.value)
4775 replace_child(old_leaf, new_leaf)
4776 new_line.append(new_leaf, preformatted=preformatted)
4778 for comment_leaf in old_line.comments_after(old_leaf):
4779 new_line.append(comment_leaf, preformatted=True)
4782 def replace_child(old_child: LN, new_child: LN) -> None:
4785 * If @old_child.parent is set, replace @old_child with @new_child in
4786 @old_child's underlying Node structure.
4788 * Otherwise, this function does nothing.
4790 parent = old_child.parent
4794 child_idx = old_child.remove()
4795 if child_idx is not None:
4796 parent.insert_child(child_idx, new_child)
4799 def get_string_prefix(string: str) -> str:
4802 * assert_is_leaf_string(@string)
4805 @string's prefix (e.g. '', 'r', 'f', or 'rf').
4807 assert_is_leaf_string(string)
4811 while string[prefix_idx] in STRING_PREFIX_CHARS:
4812 prefix += string[prefix_idx].lower()
4818 def assert_is_leaf_string(string: str) -> None:
4820 Checks the pre-condition that @string has the format that you would expect
4821 of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==
4822 token.STRING`. A more precise description of the pre-conditions that are
4823 checked are listed below.
4826 * @string starts with either ', ", <prefix>', or <prefix>" where
4827 `set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.
4828 * @string ends with a quote character (' or ").
4831 AssertionError(...) if the pre-conditions listed above are not
4834 dquote_idx = string.find('"')
4835 squote_idx = string.find("'")
4836 if -1 in [dquote_idx, squote_idx]:
4837 quote_idx = max(dquote_idx, squote_idx)
4839 quote_idx = min(squote_idx, dquote_idx)
4842 0 <= quote_idx < len(string) - 1
4843 ), f"{string!r} is missing a starting quote character (' or \")."
4844 assert string[-1] in (
4847 ), f"{string!r} is missing an ending quote character (' or \")."
4848 assert set(string[:quote_idx]).issubset(
4849 set(STRING_PREFIX_CHARS)
4850 ), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}."
4853 def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]:
4854 """Split line into many lines, starting with the first matching bracket pair.
4856 Note: this usually looks weird, only use this for function definitions.
4857 Prefer RHS otherwise. This is why this function is not symmetrical with
4858 :func:`right_hand_split` which also handles optional parentheses.
4860 tail_leaves: List[Leaf] = []
4861 body_leaves: List[Leaf] = []
4862 head_leaves: List[Leaf] = []
4863 current_leaves = head_leaves
4864 matching_bracket: Optional[Leaf] = None
4865 for leaf in line.leaves:
4867 current_leaves is body_leaves
4868 and leaf.type in CLOSING_BRACKETS
4869 and leaf.opening_bracket is matching_bracket
4871 current_leaves = tail_leaves if body_leaves else head_leaves
4872 current_leaves.append(leaf)
4873 if current_leaves is head_leaves:
4874 if leaf.type in OPENING_BRACKETS:
4875 matching_bracket = leaf
4876 current_leaves = body_leaves
4877 if not matching_bracket:
4878 raise CannotSplit("No brackets found")
4880 head = bracket_split_build_line(head_leaves, line, matching_bracket)
4881 body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
4882 tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
4883 bracket_split_succeeded_or_raise(head, body, tail)
4884 for result in (head, body, tail):
4889 def right_hand_split(
4892 features: Collection[Feature] = (),
4893 omit: Collection[LeafID] = (),
4894 ) -> Iterator[Line]:
4895 """Split line into many lines, starting with the last matching bracket pair.
4897 If the split was by optional parentheses, attempt splitting without them, too.
4898 `omit` is a collection of closing bracket IDs that shouldn't be considered for
4901 Note: running this function modifies `bracket_depth` on the leaves of `line`.
4903 tail_leaves: List[Leaf] = []
4904 body_leaves: List[Leaf] = []
4905 head_leaves: List[Leaf] = []
4906 current_leaves = tail_leaves
4907 opening_bracket: Optional[Leaf] = None
4908 closing_bracket: Optional[Leaf] = None
4909 for leaf in reversed(line.leaves):
4910 if current_leaves is body_leaves:
4911 if leaf is opening_bracket:
4912 current_leaves = head_leaves if body_leaves else tail_leaves
4913 current_leaves.append(leaf)
4914 if current_leaves is tail_leaves:
4915 if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
4916 opening_bracket = leaf.opening_bracket
4917 closing_bracket = leaf
4918 current_leaves = body_leaves
4919 if not (opening_bracket and closing_bracket and head_leaves):
4920 # If there is no opening or closing_bracket that means the split failed and
4921 # all content is in the tail. Otherwise, if `head_leaves` are empty, it means
4922 # the matching `opening_bracket` wasn't available on `line` anymore.
4923 raise CannotSplit("No brackets found")
4925 tail_leaves.reverse()
4926 body_leaves.reverse()
4927 head_leaves.reverse()
4928 head = bracket_split_build_line(head_leaves, line, opening_bracket)
4929 body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
4930 tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
4931 bracket_split_succeeded_or_raise(head, body, tail)
4933 Feature.FORCE_OPTIONAL_PARENTHESES not in features
4934 # the opening bracket is an optional paren
4935 and opening_bracket.type == token.LPAR
4936 and not opening_bracket.value
4937 # the closing bracket is an optional paren
4938 and closing_bracket.type == token.RPAR
4939 and not closing_bracket.value
4940 # it's not an import (optional parens are the only thing we can split on
4941 # in this case; attempting a split without them is a waste of time)
4942 and not line.is_import
4943 # there are no standalone comments in the body
4944 and not body.contains_standalone_comments(0)
4945 # and we can actually remove the parens
4946 and can_omit_invisible_parens(body, line_length, omit_on_explode=omit)
4948 omit = {id(closing_bracket), *omit}
4950 yield from right_hand_split(line, line_length, features=features, omit=omit)
4956 or is_line_short_enough(body, line_length=line_length)
4959 "Splitting failed, body is still too long and can't be split."
4962 elif head.contains_multiline_strings() or tail.contains_multiline_strings():
4964 "The current optional pair of parentheses is bound to fail to"
4965 " satisfy the splitting algorithm because the head or the tail"
4966 " contains multiline strings which by definition never fit one"
4970 ensure_visible(opening_bracket)
4971 ensure_visible(closing_bracket)
4972 for result in (head, body, tail):
4977 def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
4978 """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
4980 Do nothing otherwise.
4982 A left- or right-hand split is based on a pair of brackets. Content before
4983 (and including) the opening bracket is left on one line, content inside the
4984 brackets is put on a separate line, and finally content starting with and
4985 following the closing bracket is put on a separate line.
4987 Those are called `head`, `body`, and `tail`, respectively. If the split
4988 produced the same line (all content in `head`) or ended up with an empty `body`
4989 and the `tail` is just the closing bracket, then it's considered failed.
4991 tail_len = len(str(tail).strip())
4994 raise CannotSplit("Splitting brackets produced the same line")
4998 f"Splitting brackets on an empty body to save {tail_len} characters is"
5003 def bracket_split_build_line(
5004 leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
5006 """Return a new line with given `leaves` and respective comments from `original`.
5008 If `is_body` is True, the result line is one-indented inside brackets and as such
5009 has its first leaf's prefix normalized and a trailing comma added when expected.
5011 result = Line(mode=original.mode, depth=original.depth)
5013 result.inside_brackets = True
5016 # Since body is a new indent level, remove spurious leading whitespace.
5017 normalize_prefix(leaves[0], inside_brackets=True)
5018 # Ensure a trailing comma for imports and standalone function arguments, but
5019 # be careful not to add one after any comments or within type annotations.
5022 and opening_bracket.value == "("
5023 and not any(leaf.type == token.COMMA for leaf in leaves)
5026 if original.is_import or no_commas:
5027 for i in range(len(leaves) - 1, -1, -1):
5028 if leaves[i].type == STANDALONE_COMMENT:
5031 if leaves[i].type != token.COMMA:
5032 new_comma = Leaf(token.COMMA, ",")
5033 leaves.insert(i + 1, new_comma)
5038 result.append(leaf, preformatted=True)
5039 for comment_after in original.comments_after(leaf):
5040 result.append(comment_after, preformatted=True)
5041 if is_body and should_split_line(result, opening_bracket):
5042 result.should_split_rhs = True
5046 def dont_increase_indentation(split_func: Transformer) -> Transformer:
5047 """Normalize prefix of the first leaf in every line returned by `split_func`.
5049 This is a decorator over relevant split functions.
5053 def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
5054 for line in split_func(line, features):
5055 normalize_prefix(line.leaves[0], inside_brackets=True)
5058 return split_wrapper
5061 @dont_increase_indentation
5062 def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
5063 """Split according to delimiters of the highest priority.
5065 If the appropriate Features are given, the split will add trailing commas
5066 also in function signatures and calls that contain `*` and `**`.
5069 last_leaf = line.leaves[-1]
5071 raise CannotSplit("Line empty")
5073 bt = line.bracket_tracker
5075 delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
5077 raise CannotSplit("No delimiters found")
5079 if delimiter_priority == DOT_PRIORITY:
5080 if bt.delimiter_count_with_priority(delimiter_priority) == 1:
5081 raise CannotSplit("Splitting a single attribute from its owner looks wrong")
5083 current_line = Line(
5084 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5086 lowest_depth = sys.maxsize
5087 trailing_comma_safe = True
5089 def append_to_line(leaf: Leaf) -> Iterator[Line]:
5090 """Append `leaf` to current line or to new line if appending impossible."""
5091 nonlocal current_line
5093 current_line.append_safe(leaf, preformatted=True)
5097 current_line = Line(
5098 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5100 current_line.append(leaf)
5102 for leaf in line.leaves:
5103 yield from append_to_line(leaf)
5105 for comment_after in line.comments_after(leaf):
5106 yield from append_to_line(comment_after)
5108 lowest_depth = min(lowest_depth, leaf.bracket_depth)
5109 if leaf.bracket_depth == lowest_depth:
5110 if is_vararg(leaf, within={syms.typedargslist}):
5111 trailing_comma_safe = (
5112 trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
5114 elif is_vararg(leaf, within={syms.arglist, syms.argument}):
5115 trailing_comma_safe = (
5116 trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
5119 leaf_priority = bt.delimiters.get(id(leaf))
5120 if leaf_priority == delimiter_priority:
5123 current_line = Line(
5124 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5129 and delimiter_priority == COMMA_PRIORITY
5130 and current_line.leaves[-1].type != token.COMMA
5131 and current_line.leaves[-1].type != STANDALONE_COMMENT
5133 new_comma = Leaf(token.COMMA, ",")
5134 current_line.append(new_comma)
5138 @dont_increase_indentation
5139 def standalone_comment_split(
5140 line: Line, features: Collection[Feature] = ()
5141 ) -> Iterator[Line]:
5142 """Split standalone comments from the rest of the line."""
5143 if not line.contains_standalone_comments(0):
5144 raise CannotSplit("Line does not have any standalone comments")
5146 current_line = Line(
5147 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5150 def append_to_line(leaf: Leaf) -> Iterator[Line]:
5151 """Append `leaf` to current line or to new line if appending impossible."""
5152 nonlocal current_line
5154 current_line.append_safe(leaf, preformatted=True)
5158 current_line = Line(
5159 line.mode, depth=line.depth, inside_brackets=line.inside_brackets
5161 current_line.append(leaf)
5163 for leaf in line.leaves:
5164 yield from append_to_line(leaf)
5166 for comment_after in line.comments_after(leaf):
5167 yield from append_to_line(comment_after)
5173 def is_import(leaf: Leaf) -> bool:
5174 """Return True if the given leaf starts an import statement."""
5181 (v == "import" and p and p.type == syms.import_name)
5182 or (v == "from" and p and p.type == syms.import_from)
5187 def is_type_comment(leaf: Leaf, suffix: str = "") -> bool:
5188 """Return True if the given leaf is a special comment.
5189 Only returns true for type comments for now."""
5192 return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix)
5195 def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
5196 """Leave existing extra newlines if not `inside_brackets`. Remove everything
5199 Note: don't use backslashes for formatting or you'll lose your voting rights.
5201 if not inside_brackets:
5202 spl = leaf.prefix.split("#")
5203 if "\\" not in spl[0]:
5204 nl_count = spl[-1].count("\n")
5207 leaf.prefix = "\n" * nl_count
5213 def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
5214 """Make all string prefixes lowercase.
5216 If remove_u_prefix is given, also removes any u prefix from the string.
5218 Note: Mutates its argument.
5220 match = re.match(r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", leaf.value, re.DOTALL)
5221 assert match is not None, f"failed to match string {leaf.value!r}"
5222 orig_prefix = match.group(1)
5223 new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u")
5225 new_prefix = new_prefix.replace("u", "")
5226 leaf.value = f"{new_prefix}{match.group(2)}"
5229 def normalize_string_quotes(leaf: Leaf) -> None:
5230 """Prefer double quotes but only if it doesn't cause more escaping.
5232 Adds or removes backslashes as appropriate. Doesn't parse and fix
5233 strings nested in f-strings (yet).
5235 Note: Mutates its argument.
5237 value = leaf.value.lstrip(STRING_PREFIX_CHARS)
5238 if value[:3] == '"""':
5241 elif value[:3] == "'''":
5244 elif value[0] == '"':
5250 first_quote_pos = leaf.value.find(orig_quote)
5251 if first_quote_pos == -1:
5252 return # There's an internal error
5254 prefix = leaf.value[:first_quote_pos]
5255 unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
5256 escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
5257 escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
5258 body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
5259 if "r" in prefix.casefold():
5260 if unescaped_new_quote.search(body):
5261 # There's at least one unescaped new_quote in this raw string
5262 # so converting is impossible
5265 # Do not introduce or remove backslashes in raw strings
5268 # remove unnecessary escapes
5269 new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
5270 if body != new_body:
5271 # Consider the string without unnecessary escapes as the original
5273 leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
5274 new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
5275 new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
5276 if "f" in prefix.casefold():
5277 matches = re.findall(
5279 (?:[^{]|^)\{ # start of the string or a non-{ followed by a single {
5280 ([^{].*?) # contents of the brackets except if begins with {{
5281 \}(?:[^}]|$) # A } followed by end of the string or a non-}
5288 # Do not introduce backslashes in interpolated expressions
5291 if new_quote == '"""' and new_body[-1:] == '"':
5293 new_body = new_body[:-1] + '\\"'
5294 orig_escape_count = body.count("\\")
5295 new_escape_count = new_body.count("\\")
5296 if new_escape_count > orig_escape_count:
5297 return # Do not introduce more escaping
5299 if new_escape_count == orig_escape_count and orig_quote == '"':
5300 return # Prefer double quotes
5302 leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
5305 def normalize_numeric_literal(leaf: Leaf) -> None:
5306 """Normalizes numeric (float, int, and complex) literals.
5308 All letters used in the representation are normalized to lowercase (except
5309 in Python 2 long literals).
5311 text = leaf.value.lower()
5312 if text.startswith(("0o", "0b")):
5313 # Leave octal and binary literals alone.
5315 elif text.startswith("0x"):
5316 text = format_hex(text)
5318 text = format_scientific_notation(text)
5319 elif text.endswith(("j", "l")):
5320 text = format_long_or_complex_number(text)
5322 text = format_float_or_int_string(text)
5326 def format_hex(text: str) -> str:
5328 Formats a hexadecimal string like "0x12b3"
5330 Uses lowercase because of similarity between "B" and "8", which
5331 can cause security issues.
5332 see: https://github.com/psf/black/issues/1692
5335 before, after = text[:2], text[2:]
5336 return f"{before}{after.lower()}"
5339 def format_scientific_notation(text: str) -> str:
5340 """Formats a numeric string utilizing scentific notation"""
5341 before, after = text.split("e")
5343 if after.startswith("-"):
5346 elif after.startswith("+"):
5348 before = format_float_or_int_string(before)
5349 return f"{before}e{sign}{after}"
5352 def format_long_or_complex_number(text: str) -> str:
5353 """Formats a long or complex string like `10L` or `10j`"""
5356 # Capitalize in "2L" because "l" looks too similar to "1".
5359 return f"{format_float_or_int_string(number)}{suffix}"
5362 def format_float_or_int_string(text: str) -> str:
5363 """Formats a float string like "1.0"."""
5367 before, after = text.split(".")
5368 return f"{before or 0}.{after or 0}"
5371 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
5372 """Make existing optional parentheses invisible or create new ones.
5374 `parens_after` is a set of string leaf values immediately after which parens
5377 Standardizes on visible parentheses for single-element tuples, and keeps
5378 existing visible parentheses for other tuples and generator expressions.
5380 for pc in list_comments(node.prefix, is_endmarker=False):
5381 if pc.value in FMT_OFF:
5382 # This `node` has a prefix with `# fmt: off`, don't mess with parens.
5385 for index, child in enumerate(list(node.children)):
5386 # Fixes a bug where invisible parens are not properly stripped from
5387 # assignment statements that contain type annotations.
5388 if isinstance(child, Node) and child.type == syms.annassign:
5389 normalize_invisible_parens(child, parens_after=parens_after)
5391 # Add parentheses around long tuple unpacking in assignments.
5394 and isinstance(child, Node)
5395 and child.type == syms.testlist_star_expr
5400 if child.type == syms.atom:
5401 if maybe_make_parens_invisible_in_atom(child, parent=node):
5402 wrap_in_parentheses(node, child, visible=False)
5403 elif is_one_tuple(child):
5404 wrap_in_parentheses(node, child, visible=True)
5405 elif node.type == syms.import_from:
5406 # "import from" nodes store parentheses directly as part of
5408 if child.type == token.LPAR:
5409 # make parentheses invisible
5410 child.value = "" # type: ignore
5411 node.children[-1].value = "" # type: ignore
5412 elif child.type != token.STAR:
5413 # insert invisible parentheses
5414 node.insert_child(index, Leaf(token.LPAR, ""))
5415 node.append_child(Leaf(token.RPAR, ""))
5418 elif not (isinstance(child, Leaf) and is_multiline_string(child)):
5419 wrap_in_parentheses(node, child, visible=False)
5421 check_lpar = isinstance(child, Leaf) and child.value in parens_after
5424 def normalize_fmt_off(node: Node) -> None:
5425 """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
5428 try_again = convert_one_fmt_off_pair(node)
5431 def convert_one_fmt_off_pair(node: Node) -> bool:
5432 """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
5434 Returns True if a pair was converted.
5436 for leaf in node.leaves():
5437 previous_consumed = 0
5438 for comment in list_comments(leaf.prefix, is_endmarker=False):
5439 if comment.value not in FMT_PASS:
5440 previous_consumed = comment.consumed
5442 # We only want standalone comments. If there's no previous leaf or
5443 # the previous leaf is indentation, it's a standalone comment in
5445 if comment.value in FMT_PASS and comment.type != STANDALONE_COMMENT:
5446 prev = preceding_leaf(leaf)
5448 if comment.value in FMT_OFF and prev.type not in WHITESPACE:
5450 if comment.value in FMT_SKIP and prev.type in WHITESPACE:
5453 ignored_nodes = list(generate_ignored_nodes(leaf, comment))
5454 if not ignored_nodes:
5457 first = ignored_nodes[0] # Can be a container node with the `leaf`.
5458 parent = first.parent
5459 prefix = first.prefix
5460 first.prefix = prefix[comment.consumed :]
5461 hidden_value = "".join(str(n) for n in ignored_nodes)
5462 if comment.value in FMT_OFF:
5463 hidden_value = comment.value + "\n" + hidden_value
5464 if comment.value in FMT_SKIP:
5465 hidden_value += " " + comment.value
5466 if hidden_value.endswith("\n"):
5467 # That happens when one of the `ignored_nodes` ended with a NEWLINE
5468 # leaf (possibly followed by a DEDENT).
5469 hidden_value = hidden_value[:-1]
5470 first_idx: Optional[int] = None
5471 for ignored in ignored_nodes:
5472 index = ignored.remove()
5473 if first_idx is None:
5475 assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
5476 assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
5477 parent.insert_child(
5482 prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
5490 def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]:
5491 """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
5493 If comment is skip, returns leaf only.
5494 Stops at the end of the block.
5496 container: Optional[LN] = container_of(leaf)
5497 if comment.value in FMT_SKIP:
5498 prev_sibling = leaf.prev_sibling
5499 if comment.value in leaf.prefix and prev_sibling is not None:
5500 leaf.prefix = leaf.prefix.replace(comment.value, "")
5501 siblings = [prev_sibling]
5503 "\n" not in prev_sibling.prefix
5504 and prev_sibling.prev_sibling is not None
5506 prev_sibling = prev_sibling.prev_sibling
5507 siblings.insert(0, prev_sibling)
5508 for sibling in siblings:
5510 elif leaf.parent is not None:
5513 while container is not None and container.type != token.ENDMARKER:
5514 if is_fmt_on(container):
5517 # fix for fmt: on in children
5518 if contains_fmt_on_at_column(container, leaf.column):
5519 for child in container.children:
5520 if contains_fmt_on_at_column(child, leaf.column):
5525 container = container.next_sibling
5528 def is_fmt_on(container: LN) -> bool:
5529 """Determine whether formatting is switched on within a container.
5530 Determined by whether the last `# fmt:` comment is `on` or `off`.
5533 for comment in list_comments(container.prefix, is_endmarker=False):
5534 if comment.value in FMT_ON:
5536 elif comment.value in FMT_OFF:
5541 def contains_fmt_on_at_column(container: LN, column: int) -> bool:
5542 """Determine if children at a given column have formatting switched on."""
5543 for child in container.children:
5545 isinstance(child, Node)
5546 and first_leaf_column(child) == column
5547 or isinstance(child, Leaf)
5548 and child.column == column
5550 if is_fmt_on(child):
5556 def first_leaf_column(node: Node) -> Optional[int]:
5557 """Returns the column of the first leaf child of a node."""
5558 for child in node.children:
5559 if isinstance(child, Leaf):
5564 def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
5565 """If it's safe, make the parens in the atom `node` invisible, recursively.
5566 Additionally, remove repeated, adjacent invisible parens from the atom `node`
5567 as they are redundant.
5569 Returns whether the node should itself be wrapped in invisible parentheses.
5574 node.type != syms.atom
5575 or is_empty_tuple(node)
5576 or is_one_tuple(node)
5577 or (is_yield(node) and parent.type != syms.expr_stmt)
5578 or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
5582 if is_walrus_assignment(node):
5583 if parent.type in [syms.annassign, syms.expr_stmt]:
5586 first = node.children[0]
5587 last = node.children[-1]
5588 if first.type == token.LPAR and last.type == token.RPAR:
5589 middle = node.children[1]
5590 # make parentheses invisible
5591 first.value = "" # type: ignore
5592 last.value = "" # type: ignore
5593 maybe_make_parens_invisible_in_atom(middle, parent=parent)
5595 if is_atom_with_invisible_parens(middle):
5596 # Strip the invisible parens from `middle` by replacing
5597 # it with the child in-between the invisible parens
5598 middle.replace(middle.children[1])
5605 def is_atom_with_invisible_parens(node: LN) -> bool:
5606 """Given a `LN`, determines whether it's an atom `node` with invisible
5607 parens. Useful in dedupe-ing and normalizing parens.
5609 if isinstance(node, Leaf) or node.type != syms.atom:
5612 first, last = node.children[0], node.children[-1]
5614 isinstance(first, Leaf)
5615 and first.type == token.LPAR
5616 and first.value == ""
5617 and isinstance(last, Leaf)
5618 and last.type == token.RPAR
5619 and last.value == ""
5623 def is_empty_tuple(node: LN) -> bool:
5624 """Return True if `node` holds an empty tuple."""
5626 node.type == syms.atom
5627 and len(node.children) == 2
5628 and node.children[0].type == token.LPAR
5629 and node.children[1].type == token.RPAR
5633 def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
5634 """Returns `wrapped` if `node` is of the shape ( wrapped ).
5636 Parenthesis can be optional. Returns None otherwise"""
5637 if len(node.children) != 3:
5640 lpar, wrapped, rpar = node.children
5641 if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
5647 def first_child_is_arith(node: Node) -> bool:
5648 """Whether first child is an arithmetic or a binary arithmetic expression"""
5655 return bool(node.children and node.children[0].type in expr_types)
5658 def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
5659 """Wrap `child` in parentheses.
5661 This replaces `child` with an atom holding the parentheses and the old
5662 child. That requires moving the prefix.
5664 If `visible` is False, the leaves will be valueless (and thus invisible).
5666 lpar = Leaf(token.LPAR, "(" if visible else "")
5667 rpar = Leaf(token.RPAR, ")" if visible else "")
5668 prefix = child.prefix
5670 index = child.remove() or 0
5671 new_child = Node(syms.atom, [lpar, child, rpar])
5672 new_child.prefix = prefix
5673 parent.insert_child(index, new_child)
5676 def is_one_tuple(node: LN) -> bool:
5677 """Return True if `node` holds a tuple with one element, with or without parens."""
5678 if node.type == syms.atom:
5679 gexp = unwrap_singleton_parenthesis(node)
5680 if gexp is None or gexp.type != syms.testlist_gexp:
5683 return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
5686 node.type in IMPLICIT_TUPLE
5687 and len(node.children) == 2
5688 and node.children[1].type == token.COMMA
5692 def is_walrus_assignment(node: LN) -> bool:
5693 """Return True iff `node` is of the shape ( test := test )"""
5694 inner = unwrap_singleton_parenthesis(node)
5695 return inner is not None and inner.type == syms.namedexpr_test
5698 def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool:
5699 """Return True iff `node` is a trailer valid in a simple decorator"""
5700 return node.type == syms.trailer and (
5702 len(node.children) == 2
5703 and node.children[0].type == token.DOT
5704 and node.children[1].type == token.NAME
5706 # last trailer can be arguments
5709 and len(node.children) == 3
5710 and node.children[0].type == token.LPAR
5711 # and node.children[1].type == syms.argument
5712 and node.children[2].type == token.RPAR
5717 def is_simple_decorator_expression(node: LN) -> bool:
5718 """Return True iff `node` could be a 'dotted name' decorator
5720 This function takes the node of the 'namedexpr_test' of the new decorator
5721 grammar and test if it would be valid under the old decorator grammar.
5723 The old grammar was: decorator: @ dotted_name [arguments] NEWLINE
5724 The new grammar is : decorator: @ namedexpr_test NEWLINE
5726 if node.type == token.NAME:
5728 if node.type == syms.power:
5731 node.children[0].type == token.NAME
5732 and all(map(is_simple_decorator_trailer, node.children[1:-1]))
5734 len(node.children) < 2
5735 or is_simple_decorator_trailer(node.children[-1], last=True)
5741 def is_yield(node: LN) -> bool:
5742 """Return True if `node` holds a `yield` or `yield from` expression."""
5743 if node.type == syms.yield_expr:
5746 if node.type == token.NAME and node.value == "yield": # type: ignore
5749 if node.type != syms.atom:
5752 if len(node.children) != 3:
5755 lpar, expr, rpar = node.children
5756 if lpar.type == token.LPAR and rpar.type == token.RPAR:
5757 return is_yield(expr)
5762 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
5763 """Return True if `leaf` is a star or double star in a vararg or kwarg.
5765 If `within` includes VARARGS_PARENTS, this applies to function signatures.
5766 If `within` includes UNPACKING_PARENTS, it applies to right hand-side
5767 extended iterable unpacking (PEP 3132) and additional unpacking
5768 generalizations (PEP 448).
5770 if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
5774 if p.type == syms.star_expr:
5775 # Star expressions are also used as assignment targets in extended
5776 # iterable unpacking (PEP 3132). See what its parent is instead.
5782 return p.type in within
5785 def is_multiline_string(leaf: Leaf) -> bool:
5786 """Return True if `leaf` is a multiline string that actually spans many lines."""
5787 return has_triple_quotes(leaf.value) and "\n" in leaf.value
5790 def is_stub_suite(node: Node) -> bool:
5791 """Return True if `node` is a suite with a stub body."""
5793 len(node.children) != 4
5794 or node.children[0].type != token.NEWLINE
5795 or node.children[1].type != token.INDENT
5796 or node.children[3].type != token.DEDENT
5800 return is_stub_body(node.children[2])
5803 def is_stub_body(node: LN) -> bool:
5804 """Return True if `node` is a simple statement containing an ellipsis."""
5805 if not isinstance(node, Node) or node.type != syms.simple_stmt:
5808 if len(node.children) != 2:
5811 child = node.children[0]
5813 child.type == syms.atom
5814 and len(child.children) == 3
5815 and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
5819 def max_delimiter_priority_in_atom(node: LN) -> Priority:
5820 """Return maximum delimiter priority inside `node`.
5822 This is specific to atoms with contents contained in a pair of parentheses.
5823 If `node` isn't an atom or there are no enclosing parentheses, returns 0.
5825 if node.type != syms.atom:
5828 first = node.children[0]
5829 last = node.children[-1]
5830 if not (first.type == token.LPAR and last.type == token.RPAR):
5833 bt = BracketTracker()
5834 for c in node.children[1:-1]:
5835 if isinstance(c, Leaf):
5838 for leaf in c.leaves():
5841 return bt.max_delimiter_priority()
5847 def ensure_visible(leaf: Leaf) -> None:
5848 """Make sure parentheses are visible.
5850 They could be invisible as part of some statements (see
5851 :func:`normalize_invisible_parens` and :func:`visit_import_from`).
5853 if leaf.type == token.LPAR:
5855 elif leaf.type == token.RPAR:
5859 def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
5860 """Should `line` be immediately split with `delimiter_split()` after RHS?"""
5862 if not (opening_bracket.parent and opening_bracket.value in "[{("):
5865 # We're essentially checking if the body is delimited by commas and there's more
5866 # than one of them (we're excluding the trailing comma and if the delimiter priority
5867 # is still commas, that means there's more).
5869 trailing_comma = False
5871 last_leaf = line.leaves[-1]
5872 if last_leaf.type == token.COMMA:
5873 trailing_comma = True
5874 exclude.add(id(last_leaf))
5875 max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
5876 except (IndexError, ValueError):
5879 return max_priority == COMMA_PRIORITY and (
5880 (line.mode.magic_trailing_comma and trailing_comma)
5881 # always explode imports
5882 or opening_bracket.parent.type in {syms.atom, syms.import_from}
5886 def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool:
5887 """Return True if content between `opening` and `closing` looks like a one-tuple."""
5888 if opening.type != token.LPAR and closing.type != token.RPAR:
5891 depth = closing.bracket_depth + 1
5892 for _opening_index, leaf in enumerate(leaves):
5897 raise LookupError("Opening paren not found in `leaves`")
5901 for leaf in leaves[_opening_index:]:
5905 bracket_depth = leaf.bracket_depth
5906 if bracket_depth == depth and leaf.type == token.COMMA:
5908 if leaf.parent and leaf.parent.type in {
5918 def get_features_used(node: Node) -> Set[Feature]:
5919 """Return a set of (relatively) new Python features used in this file.
5921 Currently looking for:
5923 - underscores in numeric literals;
5924 - trailing commas after * or ** in function signatures and calls;
5925 - positional only arguments in function signatures and lambdas;
5926 - assignment expression;
5927 - relaxed decorator syntax;
5929 features: Set[Feature] = set()
5930 for n in node.pre_order():
5931 if n.type == token.STRING:
5932 value_head = n.value[:2] # type: ignore
5933 if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
5934 features.add(Feature.F_STRINGS)
5936 elif n.type == token.NUMBER:
5937 if "_" in n.value: # type: ignore
5938 features.add(Feature.NUMERIC_UNDERSCORES)
5940 elif n.type == token.SLASH:
5941 if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}:
5942 features.add(Feature.POS_ONLY_ARGUMENTS)
5944 elif n.type == token.COLONEQUAL:
5945 features.add(Feature.ASSIGNMENT_EXPRESSIONS)
5947 elif n.type == syms.decorator:
5948 if len(n.children) > 1 and not is_simple_decorator_expression(
5951 features.add(Feature.RELAXED_DECORATORS)
5954 n.type in {syms.typedargslist, syms.arglist}
5956 and n.children[-1].type == token.COMMA
5958 if n.type == syms.typedargslist:
5959 feature = Feature.TRAILING_COMMA_IN_DEF
5961 feature = Feature.TRAILING_COMMA_IN_CALL
5963 for ch in n.children:
5964 if ch.type in STARS:
5965 features.add(feature)
5967 if ch.type == syms.argument:
5968 for argch in ch.children:
5969 if argch.type in STARS:
5970 features.add(feature)
5975 def detect_target_versions(node: Node) -> Set[TargetVersion]:
5976 """Detect the version to target based on the nodes used."""
5977 features = get_features_used(node)
5979 version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
5983 def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
5984 """Generate sets of closing bracket IDs that should be omitted in a RHS.
5986 Brackets can be omitted if the entire trailer up to and including
5987 a preceding closing bracket fits in one line.
5989 Yielded sets are cumulative (contain results of previous yields, too). First
5990 set is empty, unless the line should explode, in which case bracket pairs until
5991 the one that needs to explode are omitted.
5994 omit: Set[LeafID] = set()
5995 if not line.magic_trailing_comma:
5998 length = 4 * line.depth
5999 opening_bracket: Optional[Leaf] = None
6000 closing_bracket: Optional[Leaf] = None
6001 inner_brackets: Set[LeafID] = set()
6002 for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
6003 length += leaf_length
6004 if length > line_length:
6007 has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
6008 if leaf.type == STANDALONE_COMMENT or has_inline_comment:
6012 if leaf is opening_bracket:
6013 opening_bracket = None
6014 elif leaf.type in CLOSING_BRACKETS:
6015 prev = line.leaves[index - 1] if index > 0 else None
6018 and prev.type == token.COMMA
6019 and not is_one_tuple_between(
6020 leaf.opening_bracket, leaf, line.leaves
6023 # Never omit bracket pairs with trailing commas.
6024 # We need to explode on those.
6027 inner_brackets.add(id(leaf))
6028 elif leaf.type in CLOSING_BRACKETS:
6029 prev = line.leaves[index - 1] if index > 0 else None
6030 if prev and prev.type in OPENING_BRACKETS:
6031 # Empty brackets would fail a split so treat them as "inner"
6032 # brackets (e.g. only add them to the `omit` set if another
6033 # pair of brackets was good enough.
6034 inner_brackets.add(id(leaf))
6038 omit.add(id(closing_bracket))
6039 omit.update(inner_brackets)
6040 inner_brackets.clear()
6045 and prev.type == token.COMMA
6046 and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves)
6048 # Never omit bracket pairs with trailing commas.
6049 # We need to explode on those.
6053 opening_bracket = leaf.opening_bracket
6054 closing_bracket = leaf
6057 def get_future_imports(node: Node) -> Set[str]:
6058 """Return a set of __future__ imports in the file."""
6059 imports: Set[str] = set()
6061 def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
6062 for child in children:
6063 if isinstance(child, Leaf):
6064 if child.type == token.NAME:
6067 elif child.type == syms.import_as_name:
6068 orig_name = child.children[0]
6069 assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
6070 assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
6071 yield orig_name.value
6073 elif child.type == syms.import_as_names:
6074 yield from get_imports_from_children(child.children)
6077 raise AssertionError("Invalid syntax parsing imports")
6079 for child in node.children:
6080 if child.type != syms.simple_stmt:
6083 first_child = child.children[0]
6084 if isinstance(first_child, Leaf):
6085 # Continue looking if we see a docstring; otherwise stop.
6087 len(child.children) == 2
6088 and first_child.type == token.STRING
6089 and child.children[1].type == token.NEWLINE
6095 elif first_child.type == syms.import_from:
6096 module_name = first_child.children[1]
6097 if not isinstance(module_name, Leaf) or module_name.value != "__future__":
6100 imports |= set(get_imports_from_children(first_child.children[3:]))
6108 def get_gitignore(root: Path) -> PathSpec:
6109 """ Return a PathSpec matching gitignore content if present."""
6110 gitignore = root / ".gitignore"
6111 lines: List[str] = []
6112 if gitignore.is_file():
6113 with gitignore.open() as gf:
6114 lines = gf.readlines()
6115 return PathSpec.from_lines("gitwildmatch", lines)
6118 def normalize_path_maybe_ignore(
6119 path: Path, root: Path, report: "Report"
6121 """Normalize `path`. May return `None` if `path` was ignored.
6123 `report` is where "path ignored" output goes.
6126 abspath = path if path.is_absolute() else Path.cwd() / path
6127 normalized_path = abspath.resolve().relative_to(root).as_posix()
6128 except OSError as e:
6129 report.path_ignored(path, f"cannot be read because {e}")
6133 if path.is_symlink():
6134 report.path_ignored(path, f"is a symbolic link that points outside {root}")
6139 return normalized_path
6142 def path_is_excluded(
6143 normalized_path: str,
6144 pattern: Optional[Pattern[str]],
6146 match = pattern.search(normalized_path) if pattern else None
6147 return bool(match and match.group(0))
6150 def gen_python_files(
6151 paths: Iterable[Path],
6153 include: Optional[Pattern[str]],
6154 exclude: Pattern[str],
6155 extend_exclude: Optional[Pattern[str]],
6156 force_exclude: Optional[Pattern[str]],
6158 gitignore: PathSpec,
6159 ) -> Iterator[Path]:
6160 """Generate all files under `path` whose paths are not excluded by the
6161 `exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
6162 but are included by the `include` regex.
6164 Symbolic links pointing outside of the `root` directory are ignored.
6166 `report` is where output about exclusions goes.
6168 assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
6170 normalized_path = normalize_path_maybe_ignore(child, root, report)
6171 if normalized_path is None:
6174 # First ignore files matching .gitignore
6175 if gitignore.match_file(normalized_path):
6176 report.path_ignored(child, "matches the .gitignore file content")
6179 # Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
6180 normalized_path = "/" + normalized_path
6182 normalized_path += "/"
6184 if path_is_excluded(normalized_path, exclude):
6185 report.path_ignored(child, "matches the --exclude regular expression")
6188 if path_is_excluded(normalized_path, extend_exclude):
6189 report.path_ignored(
6190 child, "matches the --extend-exclude regular expression"
6194 if path_is_excluded(normalized_path, force_exclude):
6195 report.path_ignored(child, "matches the --force-exclude regular expression")
6199 yield from gen_python_files(
6210 elif child.is_file():
6211 include_match = include.search(normalized_path) if include else True
6217 def find_project_root(srcs: Iterable[str]) -> Path:
6218 """Return a directory containing .git, .hg, or pyproject.toml.
6220 That directory will be a common parent of all files and directories
6223 If no directory in the tree contains a marker that would specify it's the
6224 project root, the root of the file system is returned.
6227 return Path("/").resolve()
6229 path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
6231 # A list of lists of parents for each 'src'. 'src' is included as a
6232 # "parent" of itself if it is a directory
6234 list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
6238 set.intersection(*(set(parents) for parents in src_parents)),
6239 key=lambda path: path.parts,
6242 for directory in (common_base, *common_base.parents):
6243 if (directory / ".git").exists():
6246 if (directory / ".hg").is_dir():
6249 if (directory / "pyproject.toml").is_file():
6256 def find_user_pyproject_toml() -> Path:
6257 r"""Return the path to the top-level user configuration for black.
6259 This looks for ~\.black on Windows and ~/.config/black on Linux and other
6262 if sys.platform == "win32":
6264 user_config_path = Path.home() / ".black"
6266 config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
6267 user_config_path = Path(config_root).expanduser() / "black"
6268 return user_config_path.resolve()
6273 """Provides a reformatting counter. Can be rendered with `str(report)`."""
6278 verbose: bool = False
6279 change_count: int = 0
6281 failure_count: int = 0
6283 def done(self, src: Path, changed: Changed) -> None:
6284 """Increment the counter for successful reformatting. Write out a message."""
6285 if changed is Changed.YES:
6286 reformatted = "would reformat" if self.check or self.diff else "reformatted"
6287 if self.verbose or not self.quiet:
6288 out(f"{reformatted} {src}")
6289 self.change_count += 1
6292 if changed is Changed.NO:
6293 msg = f"{src} already well formatted, good job."
6295 msg = f"{src} wasn't modified on disk since last run."
6296 out(msg, bold=False)
6297 self.same_count += 1
6299 def failed(self, src: Path, message: str) -> None:
6300 """Increment the counter for failed reformatting. Write out a message."""
6301 err(f"error: cannot format {src}: {message}")
6302 self.failure_count += 1
6304 def path_ignored(self, path: Path, message: str) -> None:
6306 out(f"{path} ignored: {message}", bold=False)
6309 def return_code(self) -> int:
6310 """Return the exit code that the app should use.
6312 This considers the current state of changed files and failures:
6313 - if there were any failures, return 123;
6314 - if any files were changed and --check is being used, return 1;
6315 - otherwise return 0.
6317 # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
6318 # 126 we have special return codes reserved by the shell.
6319 if self.failure_count:
6322 elif self.change_count and self.check:
6327 def __str__(self) -> str:
6328 """Render a color report of the current state.
6330 Use `click.unstyle` to remove colors.
6332 if self.check or self.diff:
6333 reformatted = "would be reformatted"
6334 unchanged = "would be left unchanged"
6335 failed = "would fail to reformat"
6337 reformatted = "reformatted"
6338 unchanged = "left unchanged"
6339 failed = "failed to reformat"
6341 if self.change_count:
6342 s = "s" if self.change_count > 1 else ""
6344 click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
6347 s = "s" if self.same_count > 1 else ""
6348 report.append(f"{self.same_count} file{s} {unchanged}")
6349 if self.failure_count:
6350 s = "s" if self.failure_count > 1 else ""
6352 click.style(f"{self.failure_count} file{s} {failed}", fg="red")
6354 return ", ".join(report) + "."
6357 def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
6358 filename = "<unknown>"
6359 if sys.version_info >= (3, 8):
6360 # TODO: support Python 4+ ;)
6361 for minor_version in range(sys.version_info[1], 4, -1):
6363 return ast.parse(src, filename, feature_version=(3, minor_version))
6367 for feature_version in (7, 6):
6369 return ast3.parse(src, filename, feature_version=feature_version)
6372 if ast27.__name__ == "ast":
6374 "The requested source code has invalid Python 3 syntax.\n"
6375 "If you are trying to format Python 2 files please reinstall Black"
6376 " with the 'python2' extra: `python3 -m pip install black[python2]`."
6378 return ast27.parse(src)
6381 def _fixup_ast_constants(
6382 node: Union[ast.AST, ast3.AST, ast27.AST]
6383 ) -> Union[ast.AST, ast3.AST, ast27.AST]:
6384 """Map ast nodes deprecated in 3.8 to Constant."""
6385 if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
6386 return ast.Constant(value=node.s)
6388 if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
6389 return ast.Constant(value=node.n)
6391 if isinstance(node, (ast.NameConstant, ast3.NameConstant)):
6392 return ast.Constant(value=node.value)
6398 node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0
6400 """Simple visitor generating strings to compare ASTs by content."""
6402 node = _fixup_ast_constants(node)
6404 yield f"{' ' * depth}{node.__class__.__name__}("
6406 for field in sorted(node._fields): # noqa: F402
6407 # TypeIgnore has only one field 'lineno' which breaks this comparison
6408 type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
6409 if sys.version_info >= (3, 8):
6410 type_ignore_classes += (ast.TypeIgnore,)
6411 if isinstance(node, type_ignore_classes):
6415 value = getattr(node, field)
6416 except AttributeError:
6419 yield f"{' ' * (depth+1)}{field}="
6421 if isinstance(value, list):
6423 # Ignore nested tuples within del statements, because we may insert
6424 # parentheses and they change the AST.
6427 and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
6428 and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
6430 for item in item.elts:
6431 yield from _stringify_ast(item, depth + 2)
6433 elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
6434 yield from _stringify_ast(item, depth + 2)
6436 elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)):
6437 yield from _stringify_ast(value, depth + 2)
6440 # Constant strings may be indented across newlines, if they are
6441 # docstrings; fold spaces after newlines when comparing. Similarly,
6442 # trailing and leading space may be removed.
6444 isinstance(node, ast.Constant)
6445 and field == "value"
6446 and isinstance(value, str)
6448 normalized = re.sub(r" *\n[ \t]*", "\n", value).strip()
6451 yield f"{' ' * (depth+2)}{normalized!r}, # {value.__class__.__name__}"
6453 yield f"{' ' * depth}) # /{node.__class__.__name__}"
6456 def assert_equivalent(src: str, dst: str) -> None:
6457 """Raise AssertionError if `src` and `dst` aren't equivalent."""
6459 src_ast = parse_ast(src)
6460 except Exception as exc:
6461 raise AssertionError(
6462 "cannot use --safe with this file; failed to parse source file. AST"
6463 f" error message: {exc}"
6467 dst_ast = parse_ast(dst)
6468 except Exception as exc:
6469 log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
6470 raise AssertionError(
6471 f"INTERNAL ERROR: Black produced invalid code: {exc}. Please report a bug"
6472 " on https://github.com/psf/black/issues. This invalid output might be"
6476 src_ast_str = "\n".join(_stringify_ast(src_ast))
6477 dst_ast_str = "\n".join(_stringify_ast(dst_ast))
6478 if src_ast_str != dst_ast_str:
6479 log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
6480 raise AssertionError(
6481 "INTERNAL ERROR: Black produced code that is not equivalent to the"
6482 " source. Please report a bug on https://github.com/psf/black/issues. "
6483 f" This diff might be helpful: {log}"
6487 def assert_stable(src: str, dst: str, mode: Mode) -> None:
6488 """Raise AssertionError if `dst` reformats differently the second time."""
6489 newdst = format_str(dst, mode=mode)
6493 diff(src, dst, "source", "first pass"),
6494 diff(dst, newdst, "first pass", "second pass"),
6496 raise AssertionError(
6497 "INTERNAL ERROR: Black produced different code on the second pass of the"
6498 " formatter. Please report a bug on https://github.com/psf/black/issues."
6499 f" This diff might be helpful: {log}"
6503 @mypyc_attr(patchable=True)
6504 def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str:
6505 """Dump `output` to a temporary file. Return path to the file."""
6506 with tempfile.NamedTemporaryFile(
6507 mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
6509 for lines in output:
6511 if ensure_final_newline and lines and lines[-1] != "\n":
6517 def nullcontext() -> Iterator[None]:
6518 """Return an empty context manager.
6520 To be used like `nullcontext` in Python 3.7.
6525 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
6526 """Return a unified diff string between strings `a` and `b`."""
6529 a_lines = [line for line in a.splitlines(keepends=True)]
6530 b_lines = [line for line in b.splitlines(keepends=True)]
6532 for line in difflib.unified_diff(
6533 a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5
6535 # Work around https://bugs.python.org/issue2142
6536 # See https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html
6537 if line[-1] == "\n":
6538 diff_lines.append(line)
6540 diff_lines.append(line + "\n")
6541 diff_lines.append("\\ No newline at end of file\n")
6542 return "".join(diff_lines)
6545 def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None:
6546 """asyncio signal handler that cancels all `tasks` and reports to stderr."""
6552 def shutdown(loop: asyncio.AbstractEventLoop) -> None:
6553 """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
6555 if sys.version_info[:2] >= (3, 7):
6556 all_tasks = asyncio.all_tasks
6558 all_tasks = asyncio.Task.all_tasks
6559 # This part is borrowed from asyncio/runners.py in Python 3.7b2.
6560 to_cancel = [task for task in all_tasks(loop) if not task.done()]
6564 for task in to_cancel:
6566 loop.run_until_complete(
6567 asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
6570 # `concurrent.futures.Future` objects cannot be cancelled once they
6571 # are already running. There might be some when the `shutdown()` happened.
6572 # Silence their logger's spew about the event loop being closed.
6573 cf_logger = logging.getLogger("concurrent.futures")
6574 cf_logger.setLevel(logging.CRITICAL)
6578 def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
6579 """Replace `regex` with `replacement` twice on `original`.
6581 This is used by string normalization to perform replaces on
6582 overlapping matches.
6584 return regex.sub(replacement, regex.sub(replacement, original))
6587 def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
6588 """Compile a regular expression string in `regex`.
6590 If it contains newlines, use verbose mode.
6593 regex = "(?x)" + regex
6594 compiled: Pattern[str] = re.compile(regex)
6598 def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
6599 """Like `reversed(enumerate(sequence))` if that were possible."""
6600 index = len(sequence) - 1
6601 for element in reversed(sequence):
6602 yield (index, element)
6606 def enumerate_with_length(
6607 line: Line, reversed: bool = False
6608 ) -> Iterator[Tuple[Index, Leaf, int]]:
6609 """Return an enumeration of leaves with their length.
6611 Stops prematurely on multiline strings and standalone comments.
6614 Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
6615 enumerate_reversed if reversed else enumerate,
6617 for index, leaf in op(line.leaves):
6618 length = len(leaf.prefix) + len(leaf.value)
6619 if "\n" in leaf.value:
6620 return # Multiline strings, we can't continue.
6622 for comment in line.comments_after(leaf):
6623 length += len(comment.value)
6625 yield index, leaf, length
6628 def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
6629 """Return True if `line` is no longer than `line_length`.
6631 Uses the provided `line_str` rendering, if any, otherwise computes a new one.
6634 line_str = line_to_string(line)
6636 len(line_str) <= line_length
6637 and "\n" not in line_str # multiline strings
6638 and not line.contains_standalone_comments()
6642 def can_be_split(line: Line) -> bool:
6643 """Return False if the line cannot be split *for sure*.
6645 This is not an exhaustive search but a cheap heuristic that we can use to
6646 avoid some unfortunate formattings (mostly around wrapping unsplittable code
6647 in unnecessary parentheses).
6649 leaves = line.leaves
6653 if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
6657 for leaf in leaves[-2::-1]:
6658 if leaf.type in OPENING_BRACKETS:
6659 if next.type not in CLOSING_BRACKETS:
6663 elif leaf.type == token.DOT:
6665 elif leaf.type == token.NAME:
6666 if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
6669 elif leaf.type not in CLOSING_BRACKETS:
6672 if dot_count > 1 and call_count > 1:
6678 def can_omit_invisible_parens(
6681 omit_on_explode: Collection[LeafID] = (),
6683 """Does `line` have a shape safe to reformat without optional parens around it?
6685 Returns True for only a subset of potentially nice looking formattings but
6686 the point is to not return false positives that end up producing lines that
6689 bt = line.bracket_tracker
6690 if not bt.delimiters:
6691 # Without delimiters the optional parentheses are useless.
6694 max_priority = bt.max_delimiter_priority()
6695 if bt.delimiter_count_with_priority(max_priority) > 1:
6696 # With more than one delimiter of a kind the optional parentheses read better.
6699 if max_priority == DOT_PRIORITY:
6700 # A single stranded method call doesn't require optional parentheses.
6703 assert len(line.leaves) >= 2, "Stranded delimiter"
6705 # With a single delimiter, omit if the expression starts or ends with
6707 first = line.leaves[0]
6708 second = line.leaves[1]
6709 if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
6710 if _can_omit_opening_paren(line, first=first, line_length=line_length):
6713 # Note: we are not returning False here because a line might have *both*
6714 # a leading opening bracket and a trailing closing bracket. If the
6715 # opening bracket doesn't match our rule, maybe the closing will.
6717 penultimate = line.leaves[-2]
6718 last = line.leaves[-1]
6719 if line.magic_trailing_comma:
6721 penultimate, last = last_two_except(line.leaves, omit=omit_on_explode)
6723 # Turns out we'd omit everything. We cannot skip the optional parentheses.
6727 last.type == token.RPAR
6728 or last.type == token.RBRACE
6730 # don't use indexing for omitting optional parentheses;
6732 last.type == token.RSQB
6734 and last.parent.type != syms.trailer
6737 if penultimate.type in OPENING_BRACKETS:
6738 # Empty brackets don't help.
6741 if is_multiline_string(first):
6742 # Additional wrapping of a multiline string in this situation is
6746 if line.magic_trailing_comma and penultimate.type == token.COMMA:
6747 # The rightmost non-omitted bracket pair is the one we want to explode on.
6750 if _can_omit_closing_paren(line, last=last, line_length=line_length):
6756 def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:
6757 """See `can_omit_invisible_parens`."""
6759 length = 4 * line.depth
6761 for _index, leaf, leaf_length in enumerate_with_length(line):
6762 if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
6765 length += leaf_length
6766 if length > line_length:
6769 if leaf.type in OPENING_BRACKETS:
6770 # There are brackets we can further split on.
6774 # checked the entire string and line length wasn't exceeded
6775 if len(line.leaves) == _index + 1:
6781 def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:
6782 """See `can_omit_invisible_parens`."""
6783 length = 4 * line.depth
6784 seen_other_brackets = False
6785 for _index, leaf, leaf_length in enumerate_with_length(line):
6786 length += leaf_length
6787 if leaf is last.opening_bracket:
6788 if seen_other_brackets or length <= line_length:
6791 elif leaf.type in OPENING_BRACKETS:
6792 # There are brackets we can further split on.
6793 seen_other_brackets = True
6798 def last_two_except(leaves: List[Leaf], omit: Collection[LeafID]) -> Tuple[Leaf, Leaf]:
6799 """Return (penultimate, last) leaves skipping brackets in `omit` and contents."""
6802 for leaf in reversed(leaves):
6804 if leaf is stop_after:
6811 if id(leaf) in omit:
6812 stop_after = leaf.opening_bracket
6816 raise LookupError("Last two leaves were also skipped")
6819 def run_transformer(
6821 transform: Transformer,
6823 features: Collection[Feature],
6828 line_str = line_to_string(line)
6829 result: List[Line] = []
6830 for transformed_line in transform(line, features):
6831 if str(transformed_line).strip("\n") == line_str:
6832 raise CannotTransform("Line transformer returned an unchanged result")
6834 result.extend(transform_line(transformed_line, mode=mode, features=features))
6837 transform.__name__ == "rhs"
6838 and line.bracket_tracker.invisible
6839 and not any(bracket.value for bracket in line.bracket_tracker.invisible)
6840 and not line.contains_multiline_strings()
6841 and not result[0].contains_uncollapsable_type_comments()
6842 and not result[0].contains_unsplittable_type_ignore()
6843 and not is_line_short_enough(result[0], line_length=mode.line_length)
6847 line_copy = line.clone()
6848 append_leaves(line_copy, line, line.leaves)
6849 features_fop = set(features) | {Feature.FORCE_OPTIONAL_PARENTHESES}
6850 second_opinion = run_transformer(
6851 line_copy, transform, mode, features_fop, line_str=line_str
6854 is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion
6856 result = second_opinion
6860 def get_cache_file(mode: Mode) -> Path:
6861 return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
6864 def read_cache(mode: Mode) -> Cache:
6865 """Read the cache if it exists and is well formed.
6867 If it is not well formed, the call to write_cache later should resolve the issue.
6869 cache_file = get_cache_file(mode)
6870 if not cache_file.exists():
6873 with cache_file.open("rb") as fobj:
6875 cache: Cache = pickle.load(fobj)
6876 except (pickle.UnpicklingError, ValueError):
6882 def get_cache_info(path: Path) -> CacheInfo:
6883 """Return the information used to check if a file is already formatted or not."""
6885 return stat.st_mtime, stat.st_size
6888 def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
6889 """Split an iterable of paths in `sources` into two sets.
6891 The first contains paths of files that modified on disk or are not in the
6892 cache. The other contains paths to non-modified files.
6894 todo, done = set(), set()
6896 res_src = src.resolve()
6897 if cache.get(str(res_src)) != get_cache_info(res_src):
6904 def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:
6905 """Update the cache file."""
6906 cache_file = get_cache_file(mode)
6908 CACHE_DIR.mkdir(parents=True, exist_ok=True)
6911 **{str(src.resolve()): get_cache_info(src) for src in sources},
6913 with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
6914 pickle.dump(new_cache, f, protocol=4)
6915 os.replace(f.name, cache_file)
6920 def patch_click() -> None:
6921 """Make Click not crash.
6923 On certain misconfigured environments, Python 3 selects the ASCII encoding as the
6924 default which restricts paths that it can access during the lifetime of the
6925 application. Click refuses to work in this scenario by raising a RuntimeError.
6927 In case of Black the likelihood that non-ASCII characters are going to be used in
6928 file paths is minimal since it's Python source code. Moreover, this crash was
6929 spurious on Python 3.7 thanks to PEP 538 and PEP 540.
6932 from click import core
6933 from click import _unicodefun # type: ignore
6934 except ModuleNotFoundError:
6937 for module in (core, _unicodefun):
6938 if hasattr(module, "_verify_python3_env"):
6939 module._verify_python3_env = lambda: None
6942 def patched_main() -> None:
6948 def is_docstring(leaf: Leaf) -> bool:
6949 if not is_multiline_string(leaf):
6950 # For the purposes of docstring re-indentation, we don't need to do anything
6951 # with single-line docstrings.
6954 if prev_siblings_are(
6955 leaf.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt]
6959 # Multiline docstring on the same line as the `def`.
6960 if prev_siblings_are(leaf.parent, [syms.parameters, token.COLON, syms.simple_stmt]):
6961 # `syms.parameters` is only used in funcdefs and async_funcdefs in the Python
6962 # grammar. We're safe to return True without further checks.
6968 def lines_with_leading_tabs_expanded(s: str) -> List[str]:
6970 Splits string into lines and expands only leading tabs (following the normal
6974 for line in s.splitlines():
6975 # Find the index of the first non-whitespace character after a string of
6976 # whitespace that includes at least one tab
6977 match = re.match(r"\s*\t+\s*(\S)", line)
6979 first_non_whitespace_idx = match.start(1)
6982 line[:first_non_whitespace_idx].expandtabs()
6983 + line[first_non_whitespace_idx:]
6990 def fix_docstring(docstring: str, prefix: str) -> str:
6991 # https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
6994 lines = lines_with_leading_tabs_expanded(docstring)
6995 # Determine minimum indentation (first line doesn't count):
6996 indent = sys.maxsize
6997 for line in lines[1:]:
6998 stripped = line.lstrip()
7000 indent = min(indent, len(line) - len(stripped))
7001 # Remove indentation (first line is special):
7002 trimmed = [lines[0].strip()]
7003 if indent < sys.maxsize:
7004 last_line_idx = len(lines) - 2
7005 for i, line in enumerate(lines[1:]):
7006 stripped_line = line[indent:].rstrip()
7007 if stripped_line or i == last_line_idx:
7008 trimmed.append(prefix + stripped_line)
7011 return "\n".join(trimmed)
7014 if __name__ == "__main__":