madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Update heredoc marker case to conform with vim patch 8.1.1723 (#1348)
[etc/vim.git] / black.py
index 24c57ca4aee1d54ae862e8e879a4d27f9577f029..d9348a37b42d4c45a4bf4fd126f23c8c6626d848 100644 (file)
--- a/black.py
+++ b/black.py
-#!/usr/bin/env python3
+import ast
 import asyncio
 import asyncio
-from asyncio.base_events import BaseEventLoop
 from concurrent.futures import Executor, ProcessPoolExecutor
 from concurrent.futures import Executor, ProcessPoolExecutor
-from functools import partial
-import keyword
+from contextlib import contextmanager
+from datetime import datetime
+from enum import Enum
+from functools import lru_cache, partial, wraps
+import io
+import itertools
+import logging
+from multiprocessing import Manager, freeze_support
 import os
 from pathlib import Path
 import os
 from pathlib import Path
+import pickle
+import regex as re
+import signal
+import sys
+import tempfile
 import tokenize
 import tokenize
+import traceback
 from typing import (
 from typing import (
-    Dict, Generic, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union
+    Any,
+    Callable,
+    Collection,
+    Dict,
+    Generator,
+    Generic,
+    Iterable,
+    Iterator,
+    List,
+    Optional,
+    Pattern,
+    Sequence,
+    Set,
+    Tuple,
+    TypeVar,
+    Union,
+    cast,
 )
 )
+from typing_extensions import Final
+from mypy_extensions import mypyc_attr
 
 
-from attr import attrib, dataclass, Factory
+from appdirs import user_cache_dir
+from dataclasses import dataclass, field, replace
 import click
 import click
+import toml
+from typed_ast import ast3, ast27
+from pathspec import PathSpec
 
 # lib2to3 fork
 from blib2to3.pytree import Node, Leaf, type_repr
 from blib2to3 import pygram, pytree
 from blib2to3.pgen2 import driver, token
 
 # lib2to3 fork
 from blib2to3.pytree import Node, Leaf, type_repr
 from blib2to3 import pygram, pytree
 from blib2to3.pgen2 import driver, token
+from blib2to3.pgen2.grammar import Grammar
 from blib2to3.pgen2.parse import ParseError
 
 from blib2to3.pgen2.parse import ParseError
 
-__version__ = "18.3a0"
+from _black_version import version as __version__
+
 DEFAULT_LINE_LENGTH = 88
 DEFAULT_LINE_LENGTH = 88
+DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/"  # noqa: B950
+DEFAULT_INCLUDES = r"\.pyi?$"
+CACHE_DIR = Path(user_cache_dir("black", version=__version__))
+
+
 # types
 # types
-syms = pygram.python_symbols
 FileContent = str
 Encoding = str
 FileContent = str
 Encoding = str
+NewLine = str
 Depth = int
 NodeType = int
 LeafID = int
 Priority = int
 Depth = int
 NodeType = int
 LeafID = int
 Priority = int
+Index = int
 LN = Union[Leaf, Node]
 LN = Union[Leaf, Node]
+SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]]
+Timestamp = float
+FileSize = int
+CacheInfo = Tuple[Timestamp, FileSize]
+Cache = Dict[Path, CacheInfo]
 out = partial(click.secho, bold=True, err=True)
 out = partial(click.secho, bold=True, err=True)
-err = partial(click.secho, fg='red', err=True)
+err = partial(click.secho, fg="red", err=True)
+
+pygram.initialize(CACHE_DIR)
+syms = pygram.python_symbols
 
 
 class NothingChanged(UserWarning):
 
 
 class NothingChanged(UserWarning):
-    """Raised by `format_file` when the reformatted code is the same as source."""
+    """Raised when reformatted code is the same as source."""
 
 
 class CannotSplit(Exception):
 
 
 class CannotSplit(Exception):
-    """A readable split that fits the allotted line length is impossible.
+    """A readable split that fits the allotted line length is impossible."""
+
+
+class InvalidInput(ValueError):
+    """Raised when input source code fails all parse attempts."""
+
+
+class WriteBack(Enum):
+    NO = 0
+    YES = 1
+    DIFF = 2
+    CHECK = 3
+
+    @classmethod
+    def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack":
+        if check and not diff:
+            return cls.CHECK
+
+        return cls.DIFF if diff else cls.YES
+
+
+class Changed(Enum):
+    NO = 0
+    CACHED = 1
+    YES = 2
+
+
+class TargetVersion(Enum):
+    PY27 = 2
+    PY33 = 3
+    PY34 = 4
+    PY35 = 5
+    PY36 = 6
+    PY37 = 7
+    PY38 = 8
+
+    def is_python2(self) -> bool:
+        return self is TargetVersion.PY27
+
+
+PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38}
+
+
+class Feature(Enum):
+    # All string literals are unicode
+    UNICODE_LITERALS = 1
+    F_STRINGS = 2
+    NUMERIC_UNDERSCORES = 3
+    TRAILING_COMMA_IN_CALL = 4
+    TRAILING_COMMA_IN_DEF = 5
+    # The following two feature-flags are mutually exclusive, and exactly one should be
+    # set for every version of python.
+    ASYNC_IDENTIFIERS = 6
+    ASYNC_KEYWORDS = 7
+    ASSIGNMENT_EXPRESSIONS = 8
+    POS_ONLY_ARGUMENTS = 9
+
+
+VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
+    TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS},
+    TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
+    TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS},
+    TargetVersion.PY35: {
+        Feature.UNICODE_LITERALS,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.ASYNC_IDENTIFIERS,
+    },
+    TargetVersion.PY36: {
+        Feature.UNICODE_LITERALS,
+        Feature.F_STRINGS,
+        Feature.NUMERIC_UNDERSCORES,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.TRAILING_COMMA_IN_DEF,
+        Feature.ASYNC_IDENTIFIERS,
+    },
+    TargetVersion.PY37: {
+        Feature.UNICODE_LITERALS,
+        Feature.F_STRINGS,
+        Feature.NUMERIC_UNDERSCORES,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.TRAILING_COMMA_IN_DEF,
+        Feature.ASYNC_KEYWORDS,
+    },
+    TargetVersion.PY38: {
+        Feature.UNICODE_LITERALS,
+        Feature.F_STRINGS,
+        Feature.NUMERIC_UNDERSCORES,
+        Feature.TRAILING_COMMA_IN_CALL,
+        Feature.TRAILING_COMMA_IN_DEF,
+        Feature.ASYNC_KEYWORDS,
+        Feature.ASSIGNMENT_EXPRESSIONS,
+        Feature.POS_ONLY_ARGUMENTS,
+    },
+}
+
+
+@dataclass
+class Mode:
+    target_versions: Set[TargetVersion] = field(default_factory=set)
+    line_length: int = DEFAULT_LINE_LENGTH
+    string_normalization: bool = True
+    is_pyi: bool = False
+
+    def get_cache_key(self) -> str:
+        if self.target_versions:
+            version_str = ",".join(
+                str(version.value)
+                for version in sorted(self.target_versions, key=lambda v: v.value)
+            )
+        else:
+            version_str = "-"
+        parts = [
+            version_str,
+            str(self.line_length),
+            str(int(self.string_normalization)),
+            str(int(self.is_pyi)),
+        ]
+        return ".".join(parts)
+
+
+# Legacy name, left for integrations.
+FileMode = Mode
+
+
+def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
+    return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
+
+
+def find_pyproject_toml(path_search_start: str) -> Optional[str]:
+    """Find the absolute filepath to a pyproject.toml if it exists"""
+    path_project_root = find_project_root(path_search_start)
+    path_pyproject_toml = path_project_root / "pyproject.toml"
+    return str(path_pyproject_toml) if path_pyproject_toml.is_file() else None
+
+
+def parse_pyproject_toml(path_config: str) -> Dict[str, Any]:
+    """Parse a pyproject toml file, pulling out relevant parts for Black
+
+    If parsing fails, will raise a toml.TomlDecodeError
+    """
+    pyproject_toml = toml.load(path_config)
+    config = pyproject_toml.get("tool", {}).get("black", {})
+    return {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
+
+
+def read_pyproject_toml(
+    ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None]
+) -> Optional[str]:
+    """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
+
+    Returns the path to a successfully found and read configuration file, None
+    otherwise.
+    """
+    assert not isinstance(value, (int, bool)), "Invalid parameter type passed"
+    if not value:
+        value = find_pyproject_toml(ctx.params.get("src", ()))
+        if value is None:
+            return None
+
+    try:
+        config = parse_pyproject_toml(value)
+    except (toml.TomlDecodeError, OSError) as e:
+        raise click.FileError(
+            filename=value, hint=f"Error reading configuration file: {e}"
+        )
+
+    if not config:
+        return None
+
+    if ctx.default_map is None:
+        ctx.default_map = {}
+    ctx.default_map.update(config)  # type: ignore  # bad types in .pyi
+    return value
+
 
 
-    Raised by `left_hand_split()` and `right_hand_split()`.
+def target_version_option_callback(
+    c: click.Context, p: Union[click.Option, click.Parameter], v: Tuple[str, ...]
+) -> List[TargetVersion]:
+    """Compute the target versions from a --target-version flag.
+
+    This is its own function because mypy couldn't infer the type correctly
+    when it was a lambda, causing mypyc trouble.
     """
     """
+    return [TargetVersion[val.upper()] for val in v]
 
 
 
 
-@click.command()
+@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
+@click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
 @click.option(
 @click.option(
-    '-l',
-    '--line-length',
+    "-l",
+    "--line-length",
     type=int,
     default=DEFAULT_LINE_LENGTH,
     type=int,
     default=DEFAULT_LINE_LENGTH,
-    help='How many character per line to allow.',
+    help="How many characters per line to allow.",
+    show_default=True,
+)
+@click.option(
+    "-t",
+    "--target-version",
+    type=click.Choice([v.name.lower() for v in TargetVersion]),
+    callback=target_version_option_callback,
+    multiple=True,
+    help=(
+        "Python versions that should be supported by Black's output. [default: "
+        "per-file auto-detection]"
+    ),
+)
+@click.option(
+    "--py36",
+    is_flag=True,
+    help=(
+        "Allow using Python 3.6-only syntax on all input files.  This will put "
+        "trailing commas in function signatures and calls also after *args and "
+        "**kwargs. Deprecated; use --target-version instead. "
+        "[default: per-file auto-detection]"
+    ),
+)
+@click.option(
+    "--pyi",
+    is_flag=True,
+    help=(
+        "Format all input files like typing stubs regardless of file extension "
+        "(useful when piping source on standard input)."
+    ),
+)
+@click.option(
+    "-S",
+    "--skip-string-normalization",
+    is_flag=True,
+    help="Don't normalize string quotes or prefixes.",
+)
+@click.option(
+    "--check",
+    is_flag=True,
+    help=(
+        "Don't write the files back, just return the status.  Return code 0 "
+        "means nothing would change.  Return code 1 means some files would be "
+        "reformatted.  Return code 123 means there was an internal error."
+    ),
+)
+@click.option(
+    "--diff",
+    is_flag=True,
+    help="Don't write the files back, just output a diff for each file on stdout.",
+)
+@click.option(
+    "--fast/--safe",
+    is_flag=True,
+    help="If --fast given, skip temporary sanity checks. [default: --safe]",
+)
+@click.option(
+    "--include",
+    type=str,
+    default=DEFAULT_INCLUDES,
+    help=(
+        "A regular expression that matches files and directories that should be "
+        "included on recursive searches.  An empty value means all files are "
+        "included regardless of the name.  Use forward slashes for directories on "
+        "all platforms (Windows, too).  Exclusions are calculated first, inclusions "
+        "later."
+    ),
     show_default=True,
 )
 @click.option(
     show_default=True,
 )
 @click.option(
-    '--fast/--safe',
+    "--exclude",
+    type=str,
+    default=DEFAULT_EXCLUDES,
+    help=(
+        "A regular expression that matches files and directories that should be "
+        "excluded on recursive searches.  An empty value means no paths are excluded. "
+        "Use forward slashes for directories on all platforms (Windows, too).  "
+        "Exclusions are calculated first, inclusions later."
+    ),
+    show_default=True,
+)
+@click.option(
+    "-q",
+    "--quiet",
+    is_flag=True,
+    help=(
+        "Don't emit non-error messages to stderr. Errors are still emitted; "
+        "silence those with 2>/dev/null."
+    ),
+)
+@click.option(
+    "-v",
+    "--verbose",
     is_flag=True,
     is_flag=True,
-    help='If --fast given, skip temporary sanity checks. [default: --safe]',
+    help=(
+        "Also emit messages to stderr about files that were not changed or were "
+        "ignored due to --exclude=."
+    ),
 )
 @click.version_option(version=__version__)
 @click.argument(
 )
 @click.version_option(version=__version__)
 @click.argument(
-    'src',
+    "src",
     nargs=-1,
     nargs=-1,
-    type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True),
+    type=click.Path(
+        exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
+    ),
+    is_eager=True,
+)
+@click.option(
+    "--config",
+    type=click.Path(
+        exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False
+    ),
+    is_eager=True,
+    callback=read_pyproject_toml,
+    help="Read configuration from PATH.",
 )
 @click.pass_context
 )
 @click.pass_context
-def main(ctx: click.Context, line_length: int, fast: bool, src: List[str]) -> None:
+def main(
+    ctx: click.Context,
+    code: Optional[str],
+    line_length: int,
+    target_version: List[TargetVersion],
+    check: bool,
+    diff: bool,
+    fast: bool,
+    pyi: bool,
+    py36: bool,
+    skip_string_normalization: bool,
+    quiet: bool,
+    verbose: bool,
+    include: str,
+    exclude: str,
+    src: Tuple[str, ...],
+    config: Optional[str],
+) -> None:
     """The uncompromising code formatter."""
     """The uncompromising code formatter."""
-    sources: List[Path] = []
+    write_back = WriteBack.from_configuration(check=check, diff=diff)
+    if target_version:
+        if py36:
+            err("Cannot use both --target-version and --py36")
+            ctx.exit(2)
+        else:
+            versions = set(target_version)
+    elif py36:
+        err(
+            "--py36 is deprecated and will be removed in a future version. "
+            "Use --target-version py36 instead."
+        )
+        versions = PY36_VERSIONS
+    else:
+        # We'll autodetect later.
+        versions = set()
+    mode = Mode(
+        target_versions=versions,
+        line_length=line_length,
+        is_pyi=pyi,
+        string_normalization=not skip_string_normalization,
+    )
+    if config and verbose:
+        out(f"Using configuration from {config}.", bold=False, fg="blue")
+    if code is not None:
+        print(format_str(code, mode=mode))
+        ctx.exit(0)
+    try:
+        include_regex = re_compile_maybe_verbose(include)
+    except re.error:
+        err(f"Invalid regular expression for include given: {include!r}")
+        ctx.exit(2)
+    try:
+        exclude_regex = re_compile_maybe_verbose(exclude)
+    except re.error:
+        err(f"Invalid regular expression for exclude given: {exclude!r}")
+        ctx.exit(2)
+    report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
+    root = find_project_root(src)
+    sources: Set[Path] = set()
+    path_empty(src, quiet, verbose, ctx)
     for s in src:
         p = Path(s)
         if p.is_dir():
     for s in src:
         p = Path(s)
         if p.is_dir():
-            sources.extend(gen_python_files_in_dir(p))
-        elif p.is_file():
+            sources.update(
+                gen_python_files_in_dir(
+                    p, root, include_regex, exclude_regex, report, get_gitignore(root)
+                )
+            )
+        elif p.is_file() or s == "-":
             # if a file was explicitly given, we don't care about its extension
             # if a file was explicitly given, we don't care about its extension
-            sources.append(p)
+            sources.add(p)
         else:
         else:
-            err(f'invalid path: {s}')
+            err(f"invalid path: {s}")
     if len(sources) == 0:
     if len(sources) == 0:
+        if verbose or not quiet:
+            out("No Python files are present to be formatted. Nothing to do 😴")
         ctx.exit(0)
         ctx.exit(0)
-    elif len(sources) == 1:
-        p = sources[0]
-        report = Report()
-        try:
-            changed = format_file_in_place(p, line_length=line_length, fast=fast)
-            report.done(p, changed)
-        except Exception as exc:
-            report.failed(p, str(exc))
-        ctx.exit(report.return_code)
+
+    if len(sources) == 1:
+        reformat_one(
+            src=sources.pop(),
+            fast=fast,
+            write_back=write_back,
+            mode=mode,
+            report=report,
+        )
     else:
     else:
-        loop = asyncio.get_event_loop()
-        executor = ProcessPoolExecutor(max_workers=os.cpu_count())
-        return_code = 1
-        try:
-            return_code = loop.run_until_complete(
-                schedule_formatting(sources, line_length, fast, loop, executor)
+        reformat_many(
+            sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
+        )
+
+    if verbose or not quiet:
+        out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨")
+        click.secho(str(report), err=True)
+    ctx.exit(report.return_code)
+
+
+def path_empty(
+    src: Tuple[str, ...], quiet: bool, verbose: bool, ctx: click.Context
+) -> None:
+    """
+    Exit if there is no `src` provided for formatting
+    """
+    if not src:
+        if verbose or not quiet:
+            out("No Path provided. Nothing to do 😴")
+            ctx.exit(0)
+
+
+def reformat_one(
+    src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
+) -> None:
+    """Reformat a single file under `src` without spawning child processes.
+
+    `fast`, `write_back`, and `mode` options are passed to
+    :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
+    """
+    try:
+        changed = Changed.NO
+        if not src.is_file() and str(src) == "-":
+            if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
+                changed = Changed.YES
+        else:
+            cache: Cache = {}
+            if write_back != WriteBack.DIFF:
+                cache = read_cache(mode)
+                res_src = src.resolve()
+                if res_src in cache and cache[res_src] == get_cache_info(res_src):
+                    changed = Changed.CACHED
+            if changed is not Changed.CACHED and format_file_in_place(
+                src, fast=fast, write_back=write_back, mode=mode
+            ):
+                changed = Changed.YES
+            if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
+                write_back is WriteBack.CHECK and changed is Changed.NO
+            ):
+                write_cache(cache, [src], mode)
+        report.done(src, changed)
+    except Exception as exc:
+        report.failed(src, str(exc))
+
+
+def reformat_many(
+    sources: Set[Path], fast: bool, write_back: WriteBack, mode: Mode, report: "Report"
+) -> None:
+    """Reformat multiple files using a ProcessPoolExecutor."""
+    loop = asyncio.get_event_loop()
+    worker_count = os.cpu_count()
+    if sys.platform == "win32":
+        # Work around https://bugs.python.org/issue26903
+        worker_count = min(worker_count, 61)
+    executor = ProcessPoolExecutor(max_workers=worker_count)
+    try:
+        loop.run_until_complete(
+            schedule_formatting(
+                sources=sources,
+                fast=fast,
+                write_back=write_back,
+                mode=mode,
+                report=report,
+                loop=loop,
+                executor=executor,
             )
             )
-        finally:
-            loop.close()
-            ctx.exit(return_code)
+        )
+    finally:
+        shutdown(loop)
+        executor.shutdown()
 
 
 async def schedule_formatting(
 
 
 async def schedule_formatting(
-    sources: List[Path],
-    line_length: int,
+    sources: Set[Path],
     fast: bool,
     fast: bool,
-    loop: BaseEventLoop,
+    write_back: WriteBack,
+    mode: Mode,
+    report: "Report",
+    loop: asyncio.AbstractEventLoop,
     executor: Executor,
     executor: Executor,
-) -> int:
+) -> None:
+    """Run formatting of `sources` in parallel using the provided `executor`.
+
+    (Use ProcessPoolExecutors for actual parallelism.)
+
+    `write_back`, `fast`, and `mode` options are passed to
+    :func:`format_file_in_place`.
+    """
+    cache: Cache = {}
+    if write_back != WriteBack.DIFF:
+        cache = read_cache(mode)
+        sources, cached = filter_cached(cache, sources)
+        for src in sorted(cached):
+            report.done(src, Changed.CACHED)
+    if not sources:
+        return
+
+    cancelled = []
+    sources_to_cache = []
+    lock = None
+    if write_back == WriteBack.DIFF:
+        # For diff output, we need locks to ensure we don't interleave output
+        # from different processes.
+        manager = Manager()
+        lock = manager.Lock()
     tasks = {
     tasks = {
-        src: loop.run_in_executor(
-            executor, format_file_in_place, src, line_length, fast
-        )
-        for src in sources
+        asyncio.ensure_future(
+            loop.run_in_executor(
+                executor, format_file_in_place, src, fast, mode, write_back, lock
+            )
+        ): src
+        for src in sorted(sources)
     }
     }
-    await asyncio.wait(tasks.values())
-    cancelled = []
-    report = Report()
-    for src, task in tasks.items():
-        if not task.done():
-            report.failed(src, 'timed out, cancelling')
-            task.cancel()
-            cancelled.append(task)
-        elif task.exception():
-            report.failed(src, str(task.exception()))
-        else:
-            report.done(src, task.result())
+    pending: Iterable["asyncio.Future[bool]"] = tasks.keys()
+    try:
+        loop.add_signal_handler(signal.SIGINT, cancel, pending)
+        loop.add_signal_handler(signal.SIGTERM, cancel, pending)
+    except NotImplementedError:
+        # There are no good alternatives for these on Windows.
+        pass
+    while pending:
+        done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
+        for task in done:
+            src = tasks.pop(task)
+            if task.cancelled():
+                cancelled.append(task)
+            elif task.exception():
+                report.failed(src, str(task.exception()))
+            else:
+                changed = Changed.YES if task.result() else Changed.NO
+                # If the file was written back or was successfully checked as
+                # well-formatted, store this information in the cache.
+                if write_back is WriteBack.YES or (
+                    write_back is WriteBack.CHECK and changed is Changed.NO
+                ):
+                    sources_to_cache.append(src)
+                report.done(src, changed)
     if cancelled:
     if cancelled:
-        await asyncio.wait(cancelled, timeout=2)
-    out('All done! ✨ 🍰 ✨')
-    click.echo(str(report))
-    return report.return_code
+        await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
+    if sources_to_cache:
+        write_cache(cache, sources_to_cache, mode)
+
 
 
+def format_file_in_place(
+    src: Path,
+    fast: bool,
+    mode: Mode,
+    write_back: WriteBack = WriteBack.NO,
+    lock: Any = None,  # multiprocessing.Manager().Lock() is some crazy proxy
+) -> bool:
+    """Format file under `src` path. Return True if changed.
+
+    If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
+    code to the file.
+    `mode` and `fast` options are passed to :func:`format_file_contents`.
+    """
+    if src.suffix == ".pyi":
+        mode = replace(mode, is_pyi=True)
 
 
-def format_file_in_place(src: Path, line_length: int, fast: bool) -> bool:
-    """Format the file and rewrite if changed. Return True if changed."""
+    then = datetime.utcfromtimestamp(src.stat().st_mtime)
+    with open(src, "rb") as buf:
+        src_contents, encoding, newline = decode_bytes(buf.read())
     try:
     try:
-        contents, encoding = format_file(src, line_length=line_length, fast=fast)
+        dst_contents = format_file_contents(src_contents, fast=fast, mode=mode)
     except NothingChanged:
         return False
 
     except NothingChanged:
         return False
 
-    with open(src, "w", encoding=encoding) as f:
-        f.write(contents)
+    if write_back == WriteBack.YES:
+        with open(src, "w", encoding=encoding, newline=newline) as f:
+            f.write(dst_contents)
+    elif write_back == WriteBack.DIFF:
+        now = datetime.utcnow()
+        src_name = f"{src}\t{then} +0000"
+        dst_name = f"{src}\t{now} +0000"
+        diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
+
+        with lock or nullcontext():
+            f = io.TextIOWrapper(
+                sys.stdout.buffer,
+                encoding=encoding,
+                newline=newline,
+                write_through=True,
+            )
+            f.write(diff_contents)
+            f.detach()
+
     return True
 
 
     return True
 
 
-def format_file(
-    src: Path, line_length: int, fast: bool
-) -> Tuple[FileContent, Encoding]:
-    """Reformats a file and returns its contents and encoding."""
-    with tokenize.open(src) as src_buffer:
-        src_contents = src_buffer.read()
-    if src_contents.strip() == '':
-        raise NothingChanged(src)
+def format_stdin_to_stdout(
+    fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: Mode
+) -> bool:
+    """Format file on stdin. Return True if changed.
+
+    If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
+    write a diff to stdout. The `mode` argument is passed to
+    :func:`format_file_contents`.
+    """
+    then = datetime.utcnow()
+    src, encoding, newline = decode_bytes(sys.stdin.buffer.read())
+    dst = src
+    try:
+        dst = format_file_contents(src, fast=fast, mode=mode)
+        return True
+
+    except NothingChanged:
+        return False
+
+    finally:
+        f = io.TextIOWrapper(
+            sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
+        )
+        if write_back == WriteBack.YES:
+            f.write(dst)
+        elif write_back == WriteBack.DIFF:
+            now = datetime.utcnow()
+            src_name = f"STDIN\t{then} +0000"
+            dst_name = f"STDOUT\t{now} +0000"
+            f.write(diff(src, dst, src_name, dst_name))
+        f.detach()
+
+
+def format_file_contents(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
+    """Reformat contents a file and return new contents.
+
+    If `fast` is False, additionally confirm that the reformatted code is
+    valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
+    `mode` is passed to :func:`format_str`.
+    """
+    if src_contents.strip() == "":
+        raise NothingChanged
 
 
-    dst_contents = format_str(src_contents, line_length=line_length)
+    dst_contents = format_str(src_contents, mode=mode)
     if src_contents == dst_contents:
     if src_contents == dst_contents:
-        raise NothingChanged(src)
+        raise NothingChanged
 
     if not fast:
         assert_equivalent(src_contents, dst_contents)
 
     if not fast:
         assert_equivalent(src_contents, dst_contents)
-        assert_stable(src_contents, dst_contents, line_length=line_length)
-    return dst_contents, src_buffer.encoding
+        assert_stable(src_contents, dst_contents, mode=mode)
+    return dst_contents
 
 
 
 
-def format_str(src_contents: str, line_length: int) -> FileContent:
-    """Reformats a string and returns new contents."""
-    src_node = lib2to3_parse(src_contents)
-    dst_contents = ""
-    comments: List[Line] = []
-    lines = LineGenerator()
-    elt = EmptyLineTracker()
+def format_str(src_contents: str, *, mode: Mode) -> FileContent:
+    """Reformat a string and return new contents.
+
+    `mode` determines formatting options, such as how many characters per line are
+    allowed.  Example:
+
+    >>> import black
+    >>> print(black.format_str("def f(arg:str='')->None:...", mode=Mode()))
+    def f(arg: str = "") -> None:
+        ...
+
+    A more complex example:
+    >>> print(
+    ...   black.format_str(
+    ...     "def f(arg:str='')->None: hey",
+    ...     mode=black.Mode(
+    ...       target_versions={black.TargetVersion.PY36},
+    ...       line_length=10,
+    ...       string_normalization=False,
+    ...       is_pyi=False,
+    ...     ),
+    ...   ),
+    ... )
+    def f(
+        arg: str = '',
+    ) -> None:
+        hey
+
+    """
+    src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions)
+    dst_contents = []
+    future_imports = get_future_imports(src_node)
+    if mode.target_versions:
+        versions = mode.target_versions
+    else:
+        versions = detect_target_versions(src_node)
+    normalize_fmt_off(src_node)
+    lines = LineGenerator(
+        remove_u_prefix="unicode_literals" in future_imports
+        or supports_feature(versions, Feature.UNICODE_LITERALS),
+        is_pyi=mode.is_pyi,
+        normalize_strings=mode.string_normalization,
+    )
+    elt = EmptyLineTracker(is_pyi=mode.is_pyi)
     empty_line = Line()
     after = 0
     empty_line = Line()
     after = 0
+    split_line_features = {
+        feature
+        for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
+        if supports_feature(versions, feature)
+    }
     for current_line in lines.visit(src_node):
     for current_line in lines.visit(src_node):
-        for _ in range(after):
-            dst_contents += str(empty_line)
+        dst_contents.append(str(empty_line) * after)
         before, after = elt.maybe_empty_lines(current_line)
         before, after = elt.maybe_empty_lines(current_line)
-        for _ in range(before):
-            dst_contents += str(empty_line)
-        if not current_line.is_comment:
-            for comment in comments:
-                dst_contents += str(comment)
-            comments = []
-            for line in split_line(current_line, line_length=line_length):
-                dst_contents += str(line)
-        else:
-            comments.append(current_line)
-    for comment in comments:
-        dst_contents += str(comment)
-    return dst_contents
+        dst_contents.append(str(empty_line) * before)
+        for line in split_line(
+            current_line, line_length=mode.line_length, features=split_line_features
+        ):
+            dst_contents.append(str(line))
+    return "".join(dst_contents)
+
+
+def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]:
+    """Return a tuple of (decoded_contents, encoding, newline).
+
+    `newline` is either CRLF or LF but `decoded_contents` is decoded with
+    universal newlines (i.e. only contains LF).
+    """
+    srcbuf = io.BytesIO(src)
+    encoding, lines = tokenize.detect_encoding(srcbuf.readline)
+    if not lines:
+        return "", encoding, "\n"
+
+    newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
+    srcbuf.seek(0)
+    with io.TextIOWrapper(srcbuf, encoding) as tiow:
+        return tiow.read(), encoding, newline
+
+
+def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]:
+    if not target_versions:
+        # No target_version specified, so try all grammars.
+        return [
+            # Python 3.7+
+            pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords,
+            # Python 3.0-3.6
+            pygram.python_grammar_no_print_statement_no_exec_statement,
+            # Python 2.7 with future print_function import
+            pygram.python_grammar_no_print_statement,
+            # Python 2.7
+            pygram.python_grammar,
+        ]
+
+    if all(version.is_python2() for version in target_versions):
+        # Python 2-only code, so try Python 2 grammars.
+        return [
+            # Python 2.7 with future print_function import
+            pygram.python_grammar_no_print_statement,
+            # Python 2.7
+            pygram.python_grammar,
+        ]
+
+    # Python 3-compatible code, so only try Python 3 grammar.
+    grammars = []
+    # If we have to parse both, try to parse async as a keyword first
+    if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS):
+        # Python 3.7+
+        grammars.append(
+            pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords
+        )
+    if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
+        # Python 3.0-3.6
+        grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
+    # At least one of the above branches must have been taken, because every Python
+    # version has exactly one of the two 'ASYNC_*' flags
+    return grammars
 
 
 
 
-def lib2to3_parse(src_txt: str) -> Node:
+def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
     """Given a string with source, return the lib2to3 Node."""
     """Given a string with source, return the lib2to3 Node."""
-    grammar = pygram.python_grammar_no_print_statement
-    drv = driver.Driver(grammar, pytree.convert)
-    if src_txt[-1] != '\n':
-        nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n'
-        src_txt += nl
-    try:
-        result = drv.parse_string(src_txt, True)
-    except ParseError as pe:
-        lineno, column = pe.context[1]
-        lines = src_txt.splitlines()
+    if src_txt[-1:] != "\n":
+        src_txt += "\n"
+
+    for grammar in get_grammars(set(target_versions)):
+        drv = driver.Driver(grammar, pytree.convert)
         try:
         try:
-            faulty_line = lines[lineno - 1]
-        except IndexError:
-            faulty_line = "<line number missing in source>"
-        raise ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}") from None
+            result = drv.parse_string(src_txt, True)
+            break
+
+        except ParseError as pe:
+            lineno, column = pe.context[1]
+            lines = src_txt.splitlines()
+            try:
+                faulty_line = lines[lineno - 1]
+            except IndexError:
+                faulty_line = "<line number missing in source>"
+            exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
+    else:
+        raise exc from None
 
     if isinstance(result, Leaf):
         result = Node(syms.file_input, [result])
 
     if isinstance(result, Leaf):
         result = Node(syms.file_input, [result])
@@ -223,20 +891,38 @@ def lib2to3_unparse(node: Node) -> str:
     return code
 
 
     return code
 
 
-T = TypeVar('T')
+T = TypeVar("T")
 
 
 class Visitor(Generic[T]):
 
 
 class Visitor(Generic[T]):
-    """Basic lib2to3 visitor that yields things on visiting."""
+    """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
 
     def visit(self, node: LN) -> Iterator[T]:
 
     def visit(self, node: LN) -> Iterator[T]:
+        """Main method to visit `node` and its children.
+
+        It tries to find a `visit_*()` method for the given `node.type`, like
+        `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
+        If no dedicated `visit_*()` method is found, chooses `visit_default()`
+        instead.
+
+        Then yields objects of type `T` from the selected visitor.
+        """
         if node.type < 256:
             name = token.tok_name[node.type]
         else:
         if node.type < 256:
             name = token.tok_name[node.type]
         else:
-            name = type_repr(node.type)
-        yield from getattr(self, f'visit_{name}', self.visit_default)(node)
+            name = str(type_repr(node.type))
+        # We explicitly branch on whether a visitor exists (instead of
+        # using self.visit_default as the default arg to getattr) in order
+        # to save needing to create a bound method object and so mypyc can
+        # generate a native call to visit_default.
+        visitf = getattr(self, f"visit_{name}", None)
+        if visitf:
+            yield from visitf(node)
+        else:
+            yield from self.visit_default(node)
 
     def visit_default(self, node: LN) -> Iterator[T]:
 
     def visit_default(self, node: LN) -> Iterator[T]:
+        """Default `visit_*()` implementation. Recurses to children of `node`."""
         if isinstance(node, Node):
             for child in node.children:
                 yield from self.visit(child)
         if isinstance(node, Node):
             for child in node.children:
                 yield from self.visit(child)
@@ -244,33 +930,42 @@ class Visitor(Generic[T]):
 
 @dataclass
 class DebugVisitor(Visitor[T]):
 
 @dataclass
 class DebugVisitor(Visitor[T]):
-    tree_depth: int = attrib(default=0)
+    tree_depth: int = 0
 
     def visit_default(self, node: LN) -> Iterator[T]:
 
     def visit_default(self, node: LN) -> Iterator[T]:
-        indent = ' ' * (2 * self.tree_depth)
+        indent = " " * (2 * self.tree_depth)
         if isinstance(node, Node):
             _type = type_repr(node.type)
         if isinstance(node, Node):
             _type = type_repr(node.type)
-            out(f'{indent}{_type}', fg='yellow')
+            out(f"{indent}{_type}", fg="yellow")
             self.tree_depth += 1
             for child in node.children:
                 yield from self.visit(child)
 
             self.tree_depth -= 1
             self.tree_depth += 1
             for child in node.children:
                 yield from self.visit(child)
 
             self.tree_depth -= 1
-            out(f'{indent}/{_type}', fg='yellow', bold=False)
+            out(f"{indent}/{_type}", fg="yellow", bold=False)
         else:
             _type = token.tok_name.get(node.type, str(node.type))
         else:
             _type = token.tok_name.get(node.type, str(node.type))
-            out(f'{indent}{_type}', fg='blue', nl=False)
+            out(f"{indent}{_type}", fg="blue", nl=False)
             if node.prefix:
                 # We don't have to handle prefixes for `Node` objects since
                 # that delegates to the first child anyway.
             if node.prefix:
                 # We don't have to handle prefixes for `Node` objects since
                 # that delegates to the first child anyway.
-                out(f' {node.prefix!r}', fg='green', bold=False, nl=False)
-            out(f' {node.value!r}', fg='blue', bold=False)
+                out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
+            out(f" {node.value!r}", fg="blue", bold=False)
+
+    @classmethod
+    def show(cls, code: Union[str, Leaf, Node]) -> None:
+        """Pretty-print the lib2to3 AST of a given string of `code`.
+
+        Convenience method for debugging.
+        """
+        v: DebugVisitor[None] = DebugVisitor()
+        if isinstance(code, str):
+            code = lib2to3_parse(code)
+        list(v.visit(code))
 
 
 
 
-KEYWORDS = set(keyword.kwlist)
-WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
-FLOW_CONTROL = {'return', 'raise', 'break', 'continue'}
-STATEMENT = {
+WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE}
+STATEMENT: Final = {
     syms.if_stmt,
     syms.while_stmt,
     syms.for_stmt,
     syms.if_stmt,
     syms.while_stmt,
     syms.for_stmt,
@@ -280,9 +975,10 @@ STATEMENT = {
     syms.funcdef,
     syms.classdef,
 }
     syms.funcdef,
     syms.classdef,
 }
-STANDALONE_COMMENT = 153
-LOGIC_OPERATORS = {'and', 'or'}
-COMPARATORS = {
+STANDALONE_COMMENT: Final = 153
+token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
+LOGIC_OPERATORS: Final = {"and", "or"}
+COMPARATORS: Final = {
     token.LESS,
     token.GREATER,
     token.EQEQUAL,
     token.LESS,
     token.GREATER,
     token.EQEQUAL,
@@ -290,131 +986,382 @@ COMPARATORS = {
     token.LESSEQUAL,
     token.GREATEREQUAL,
 }
     token.LESSEQUAL,
     token.GREATEREQUAL,
 }
-MATH_OPERATORS = {
+MATH_OPERATORS: Final = {
+    token.VBAR,
+    token.CIRCUMFLEX,
+    token.AMPER,
+    token.LEFTSHIFT,
+    token.RIGHTSHIFT,
     token.PLUS,
     token.MINUS,
     token.STAR,
     token.SLASH,
     token.PLUS,
     token.MINUS,
     token.STAR,
     token.SLASH,
-    token.VBAR,
-    token.AMPER,
+    token.DOUBLESLASH,
     token.PERCENT,
     token.PERCENT,
-    token.CIRCUMFLEX,
-    token.LEFTSHIFT,
-    token.RIGHTSHIFT,
+    token.AT,
+    token.TILDE,
     token.DOUBLESTAR,
     token.DOUBLESTAR,
-    token.DOUBLESLASH,
 }
 }
-COMPREHENSION_PRIORITY = 20
-COMMA_PRIORITY = 10
-LOGIC_PRIORITY = 5
-STRING_PRIORITY = 4
-COMPARATOR_PRIORITY = 3
-MATH_PRIORITY = 1
+STARS: Final = {token.STAR, token.DOUBLESTAR}
+VARARGS_SPECIALS: Final = STARS | {token.SLASH}
+VARARGS_PARENTS: Final = {
+    syms.arglist,
+    syms.argument,  # double star in arglist
+    syms.trailer,  # single argument to call
+    syms.typedargslist,
+    syms.varargslist,  # lambdas
+}
+UNPACKING_PARENTS: Final = {
+    syms.atom,  # single element of a list or set literal
+    syms.dictsetmaker,
+    syms.listmaker,
+    syms.testlist_gexp,
+    syms.testlist_star_expr,
+}
+TEST_DESCENDANTS: Final = {
+    syms.test,
+    syms.lambdef,
+    syms.or_test,
+    syms.and_test,
+    syms.not_test,
+    syms.comparison,
+    syms.star_expr,
+    syms.expr,
+    syms.xor_expr,
+    syms.and_expr,
+    syms.shift_expr,
+    syms.arith_expr,
+    syms.trailer,
+    syms.term,
+    syms.power,
+}
+ASSIGNMENTS: Final = {
+    "=",
+    "+=",
+    "-=",
+    "*=",
+    "@=",
+    "/=",
+    "%=",
+    "&=",
+    "|=",
+    "^=",
+    "<<=",
+    ">>=",
+    "**=",
+    "//=",
+}
+COMPREHENSION_PRIORITY: Final = 20
+COMMA_PRIORITY: Final = 18
+TERNARY_PRIORITY: Final = 16
+LOGIC_PRIORITY: Final = 14
+STRING_PRIORITY: Final = 12
+COMPARATOR_PRIORITY: Final = 10
+MATH_PRIORITIES: Final = {
+    token.VBAR: 9,
+    token.CIRCUMFLEX: 8,
+    token.AMPER: 7,
+    token.LEFTSHIFT: 6,
+    token.RIGHTSHIFT: 6,
+    token.PLUS: 5,
+    token.MINUS: 5,
+    token.STAR: 4,
+    token.SLASH: 4,
+    token.DOUBLESLASH: 4,
+    token.PERCENT: 4,
+    token.AT: 4,
+    token.TILDE: 3,
+    token.DOUBLESTAR: 2,
+}
+DOT_PRIORITY: Final = 1
 
 
 @dataclass
 class BracketTracker:
 
 
 @dataclass
 class BracketTracker:
-    depth: int = attrib(default=0)
-    bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = attrib(default=Factory(dict))
-    delimiters: Dict[LeafID, Priority] = attrib(default=Factory(dict))
-    previous: Optional[Leaf] = attrib(default=None)
+    """Keeps track of brackets on a line."""
+
+    depth: int = 0
+    bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
+    delimiters: Dict[LeafID, Priority] = field(default_factory=dict)
+    previous: Optional[Leaf] = None
+    _for_loop_depths: List[int] = field(default_factory=list)
+    _lambda_argument_depths: List[int] = field(default_factory=list)
 
     def mark(self, leaf: Leaf) -> None:
 
     def mark(self, leaf: Leaf) -> None:
+        """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
+
+        All leaves receive an int `bracket_depth` field that stores how deep
+        within brackets a given leaf is. 0 means there are no enclosing brackets
+        that started on this line.
+
+        If a leaf is itself a closing bracket, it receives an `opening_bracket`
+        field that it forms a pair with. This is a one-directional link to
+        avoid reference cycles.
+
+        If a leaf is a delimiter (a token on which Black can split the line if
+        needed) and it's on depth 0, its `id()` is stored in the tracker's
+        `delimiters` field.
+        """
         if leaf.type == token.COMMENT:
             return
 
         if leaf.type == token.COMMENT:
             return
 
+        self.maybe_decrement_after_for_loop_variable(leaf)
+        self.maybe_decrement_after_lambda_arguments(leaf)
         if leaf.type in CLOSING_BRACKETS:
             self.depth -= 1
             opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
         if leaf.type in CLOSING_BRACKETS:
             self.depth -= 1
             opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
-            leaf.opening_bracket = opening_bracket  # type: ignore
-        leaf.bracket_depth = self.depth  # type: ignore
+            leaf.opening_bracket = opening_bracket
+        leaf.bracket_depth = self.depth
         if self.depth == 0:
         if self.depth == 0:
-            delim = is_delimiter(leaf)
-            if delim:
-                self.delimiters[id(leaf)] = delim
-            elif self.previous is not None:
-                if leaf.type == token.STRING and self.previous.type == token.STRING:
-                    self.delimiters[id(self.previous)] = STRING_PRIORITY
-                elif (
-                    leaf.type == token.NAME and
-                    leaf.value == 'for' and
-                    leaf.parent and
-                    leaf.parent.type in {syms.comp_for, syms.old_comp_for}
-                ):
-                    self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
-                elif (
-                    leaf.type == token.NAME and
-                    leaf.value == 'if' and
-                    leaf.parent and
-                    leaf.parent.type in {syms.comp_if, syms.old_comp_if}
-                ):
-                    self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
+            delim = is_split_before_delimiter(leaf, self.previous)
+            if delim and self.previous is not None:
+                self.delimiters[id(self.previous)] = delim
+            else:
+                delim = is_split_after_delimiter(leaf, self.previous)
+                if delim:
+                    self.delimiters[id(leaf)] = delim
         if leaf.type in OPENING_BRACKETS:
             self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
             self.depth += 1
         self.previous = leaf
         if leaf.type in OPENING_BRACKETS:
             self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
             self.depth += 1
         self.previous = leaf
+        self.maybe_increment_lambda_arguments(leaf)
+        self.maybe_increment_for_loop_variable(leaf)
 
     def any_open_brackets(self) -> bool:
 
     def any_open_brackets(self) -> bool:
-        """Returns True if there is an yet unmatched open bracket on the line."""
+        """Return True if there is an yet unmatched open bracket on the line."""
         return bool(self.bracket_match)
 
         return bool(self.bracket_match)
 
-    def max_priority(self, exclude: Iterable[LeafID] = ()) -> int:
-        """Returns the highest priority of a delimiter found on the line.
+    def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
+        """Return the highest priority of a delimiter found on the line.
 
 
-        Values are consistent with what `is_delimiter()` returns.
+        Values are consistent with what `is_split_*_delimiter()` return.
+        Raises ValueError on no delimiters.
         """
         return max(v for k, v in self.delimiters.items() if k not in exclude)
 
         """
         return max(v for k, v in self.delimiters.items() if k not in exclude)
 
+    def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
+        """Return the number of delimiters with the given `priority`.
+
+        If no `priority` is passed, defaults to max priority on the line.
+        """
+        if not self.delimiters:
+            return 0
+
+        priority = priority or self.max_delimiter_priority()
+        return sum(1 for p in self.delimiters.values() if p == priority)
+
+    def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
+        """In a for loop, or comprehension, the variables are often unpacks.
+
+        To avoid splitting on the comma in this situation, increase the depth of
+        tokens between `for` and `in`.
+        """
+        if leaf.type == token.NAME and leaf.value == "for":
+            self.depth += 1
+            self._for_loop_depths.append(self.depth)
+            return True
+
+        return False
+
+    def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
+        """See `maybe_increment_for_loop_variable` above for explanation."""
+        if (
+            self._for_loop_depths
+            and self._for_loop_depths[-1] == self.depth
+            and leaf.type == token.NAME
+            and leaf.value == "in"
+        ):
+            self.depth -= 1
+            self._for_loop_depths.pop()
+            return True
+
+        return False
+
+    def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
+        """In a lambda expression, there might be more than one argument.
+
+        To avoid splitting on the comma in this situation, increase the depth of
+        tokens between `lambda` and `:`.
+        """
+        if leaf.type == token.NAME and leaf.value == "lambda":
+            self.depth += 1
+            self._lambda_argument_depths.append(self.depth)
+            return True
+
+        return False
+
+    def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
+        """See `maybe_increment_lambda_arguments` above for explanation."""
+        if (
+            self._lambda_argument_depths
+            and self._lambda_argument_depths[-1] == self.depth
+            and leaf.type == token.COLON
+        ):
+            self.depth -= 1
+            self._lambda_argument_depths.pop()
+            return True
+
+        return False
+
+    def get_open_lsqb(self) -> Optional[Leaf]:
+        """Return the most recent opening square bracket (if any)."""
+        return self.bracket_match.get((self.depth - 1, token.RSQB))
+
 
 @dataclass
 class Line:
 
 @dataclass
 class Line:
-    depth: int = attrib(default=0)
-    leaves: List[Leaf] = attrib(default=Factory(list))
-    comments: Dict[LeafID, Leaf] = attrib(default=Factory(dict))
-    bracket_tracker: BracketTracker = attrib(default=Factory(BracketTracker))
-    inside_brackets: bool = attrib(default=False)
+    """Holds leaves and comments. Can be printed with `str(line)`."""
+
+    depth: int = 0
+    leaves: List[Leaf] = field(default_factory=list)
+    # keys ordered like `leaves`
+    comments: Dict[LeafID, List[Leaf]] = field(default_factory=dict)
+    bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
+    inside_brackets: bool = False
+    should_explode: bool = False
 
     def append(self, leaf: Leaf, preformatted: bool = False) -> None:
 
     def append(self, leaf: Leaf, preformatted: bool = False) -> None:
-        has_value = leaf.value.strip()
+        """Add a new `leaf` to the end of the line.
+
+        Unless `preformatted` is True, the `leaf` will receive a new consistent
+        whitespace prefix and metadata applied by :class:`BracketTracker`.
+        Trailing commas are maybe removed, unpacked for loop variables are
+        demoted from being delimiters.
+
+        Inline comments are put aside.
+        """
+        has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
         if not has_value:
             return
 
         if not has_value:
             return
 
+        if token.COLON == leaf.type and self.is_class_paren_empty:
+            del self.leaves[-2:]
         if self.leaves and not preformatted:
             # Note: at this point leaf.prefix should be empty except for
             # imports, for which we only preserve newlines.
         if self.leaves and not preformatted:
             # Note: at this point leaf.prefix should be empty except for
             # imports, for which we only preserve newlines.
-            leaf.prefix += whitespace(leaf)
+            leaf.prefix += whitespace(
+                leaf, complex_subscript=self.is_complex_subscript(leaf)
+            )
         if self.inside_brackets or not preformatted:
             self.bracket_tracker.mark(leaf)
             self.maybe_remove_trailing_comma(leaf)
         if self.inside_brackets or not preformatted:
             self.bracket_tracker.mark(leaf)
             self.maybe_remove_trailing_comma(leaf)
-            if self.maybe_adapt_standalone_comment(leaf):
-                return
-
         if not self.append_comment(leaf):
             self.leaves.append(leaf)
 
         if not self.append_comment(leaf):
             self.leaves.append(leaf)
 
+    def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
+        """Like :func:`append()` but disallow invalid standalone comment structure.
+
+        Raises ValueError when any `leaf` is appended after a standalone comment
+        or when a standalone comment is not the first leaf on the line.
+        """
+        if self.bracket_tracker.depth == 0:
+            if self.is_comment:
+                raise ValueError("cannot append to standalone comments")
+
+            if self.leaves and leaf.type == STANDALONE_COMMENT:
+                raise ValueError(
+                    "cannot append standalone comments to a populated line"
+                )
+
+        self.append(leaf, preformatted=preformatted)
+
     @property
     def is_comment(self) -> bool:
     @property
     def is_comment(self) -> bool:
-        return bool(self) and self.leaves[0].type == STANDALONE_COMMENT
+        """Is this line a standalone comment?"""
+        return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
 
     @property
     def is_decorator(self) -> bool:
 
     @property
     def is_decorator(self) -> bool:
+        """Is this line a decorator?"""
         return bool(self) and self.leaves[0].type == token.AT
 
     @property
     def is_import(self) -> bool:
         return bool(self) and self.leaves[0].type == token.AT
 
     @property
     def is_import(self) -> bool:
+        """Is this an import line?"""
         return bool(self) and is_import(self.leaves[0])
 
     @property
     def is_class(self) -> bool:
         return bool(self) and is_import(self.leaves[0])
 
     @property
     def is_class(self) -> bool:
+        """Is this line a class definition?"""
         return (
         return (
-            bool(self) and
-            self.leaves[0].type == token.NAME and
-            self.leaves[0].value == 'class'
+            bool(self)
+            and self.leaves[0].type == token.NAME
+            and self.leaves[0].value == "class"
         )
 
         )
 
+    @property
+    def is_stub_class(self) -> bool:
+        """Is this line a class definition with a body consisting only of "..."?"""
+        return self.is_class and self.leaves[-3:] == [
+            Leaf(token.DOT, ".") for _ in range(3)
+        ]
+
+    @property
+    def is_collection_with_optional_trailing_comma(self) -> bool:
+        """Is this line a collection literal with a trailing comma that's optional?
+
+        Note that the trailing comma in a 1-tuple is not optional.
+        """
+        if not self.leaves or len(self.leaves) < 4:
+            return False
+
+        # Look for and address a trailing colon.
+        if self.leaves[-1].type == token.COLON:
+            closer = self.leaves[-2]
+            close_index = -2
+        else:
+            closer = self.leaves[-1]
+            close_index = -1
+        if closer.type not in CLOSING_BRACKETS or self.inside_brackets:
+            return False
+
+        if closer.type == token.RPAR:
+            # Tuples require an extra check, because if there's only
+            # one element in the tuple removing the comma unmakes the
+            # tuple.
+            #
+            # We also check for parens before looking for the trailing
+            # comma because in some cases (eg assigning a dict
+            # literal) the literal gets wrapped in temporary parens
+            # during parsing. This case is covered by the
+            # collections.py test data.
+            opener = closer.opening_bracket
+            for _open_index, leaf in enumerate(self.leaves):
+                if leaf is opener:
+                    break
+
+            else:
+                # Couldn't find the matching opening paren, play it safe.
+                return False
+
+            commas = 0
+            comma_depth = self.leaves[close_index - 1].bracket_depth
+            for leaf in self.leaves[_open_index + 1 : close_index]:
+                if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA:
+                    commas += 1
+            if commas > 1:
+                # We haven't looked yet for the trailing comma because
+                # we might also have caught noop parens.
+                return self.leaves[close_index - 1].type == token.COMMA
+
+            elif commas == 1:
+                return False  # it's either a one-tuple or didn't have a trailing comma
+
+            if self.leaves[close_index - 1].type in CLOSING_BRACKETS:
+                close_index -= 1
+                closer = self.leaves[close_index]
+                if closer.type == token.RPAR:
+                    # TODO: this is a gut feeling. Will we ever see this?
+                    return False
+
+        if self.leaves[close_index - 1].type != token.COMMA:
+            return False
+
+        return True
+
     @property
     def is_def(self) -> bool:
     @property
     def is_def(self) -> bool:
-        """Also returns True for async defs."""
+        """Is this a function definition? (Also returns True for async defs.)"""
         try:
             first_leaf = self.leaves[0]
         except IndexError:
         try:
             first_leaf = self.leaves[0]
         except IndexError:
@@ -424,131 +1371,237 @@ class Line:
             second_leaf: Optional[Leaf] = self.leaves[1]
         except IndexError:
             second_leaf = None
             second_leaf: Optional[Leaf] = self.leaves[1]
         except IndexError:
             second_leaf = None
-        return (
-            (first_leaf.type == token.NAME and first_leaf.value == 'def') or
-            (
-                first_leaf.type == token.NAME and
-                first_leaf.value == 'async' and
-                second_leaf is not None and
-                second_leaf.type == token.NAME and
-                second_leaf.value == 'def'
-            )
+        return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
+            first_leaf.type == token.ASYNC
+            and second_leaf is not None
+            and second_leaf.type == token.NAME
+            and second_leaf.value == "def"
         )
 
     @property
         )
 
     @property
-    def is_flow_control(self) -> bool:
+    def is_class_paren_empty(self) -> bool:
+        """Is this a class with no base classes but using parentheses?
+
+        Those are unnecessary and should be removed.
+        """
         return (
         return (
-            bool(self) and
-            self.leaves[0].type == token.NAME and
-            self.leaves[0].value in FLOW_CONTROL
+            bool(self)
+            and len(self.leaves) == 4
+            and self.is_class
+            and self.leaves[2].type == token.LPAR
+            and self.leaves[2].value == "("
+            and self.leaves[3].type == token.RPAR
+            and self.leaves[3].value == ")"
         )
 
     @property
         )
 
     @property
-    def is_yield(self) -> bool:
+    def is_triple_quoted_string(self) -> bool:
+        """Is the line a triple quoted string?"""
         return (
         return (
-            bool(self) and
-            self.leaves[0].type == token.NAME and
-            self.leaves[0].value == 'yield'
+            bool(self)
+            and self.leaves[0].type == token.STRING
+            and self.leaves[0].value.startswith(('"""', "'''"))
         )
 
         )
 
-    def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
-        if not (
-            self.leaves and
-            self.leaves[-1].type == token.COMMA and
-            closing.type in CLOSING_BRACKETS
-        ):
-            return False
-
-        if closing.type == token.RSQB or closing.type == token.RBRACE:
-            self.leaves.pop()
-            return True
+    def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
+        """If so, needs to be split before emitting."""
+        for leaf in self.leaves:
+            if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit:
+                return True
 
 
-        # For parens let's check if it's safe to remove the comma.  If the
-        # trailing one is the only one, we might mistakenly change a tuple
-        # into a different type by removing the comma.
-        depth = closing.bracket_depth + 1  # type: ignore
-        commas = 0
-        opening = closing.opening_bracket  # type: ignore
-        for _opening_index, leaf in enumerate(self.leaves):
-            if leaf is opening:
-                break
+        return False
 
 
-        else:
+    def contains_uncollapsable_type_comments(self) -> bool:
+        ignored_ids = set()
+        try:
+            last_leaf = self.leaves[-1]
+            ignored_ids.add(id(last_leaf))
+            if last_leaf.type == token.COMMA or (
+                last_leaf.type == token.RPAR and not last_leaf.value
+            ):
+                # When trailing commas or optional parens are inserted by Black for
+                # consistency, comments after the previous last element are not moved
+                # (they don't have to, rendering will still be correct).  So we ignore
+                # trailing commas and invisible.
+                last_leaf = self.leaves[-2]
+                ignored_ids.add(id(last_leaf))
+        except IndexError:
             return False
 
             return False
 
-        for leaf in self.leaves[_opening_index + 1:]:
-            if leaf is closing:
-                break
+        # A type comment is uncollapsable if it is attached to a leaf
+        # that isn't at the end of the line (since that could cause it
+        # to get associated to a different argument) or if there are
+        # comments before it (since that could cause it to get hidden
+        # behind a comment.
+        comment_seen = False
+        for leaf_id, comments in self.comments.items():
+            for comment in comments:
+                if is_type_comment(comment):
+                    if comment_seen or (
+                        not is_type_comment(comment, " ignore")
+                        and leaf_id not in ignored_ids
+                    ):
+                        return True
 
 
-            bracket_depth = leaf.bracket_depth  # type: ignore
-            if bracket_depth == depth and leaf.type == token.COMMA:
-                commas += 1
-        if commas > 1:
-            self.leaves.pop()
-            return True
+                comment_seen = True
 
         return False
 
 
         return False
 
-    def maybe_adapt_standalone_comment(self, comment: Leaf) -> bool:
-        """Hack a standalone comment to act as a trailing comment for line splitting.
+    def contains_unsplittable_type_ignore(self) -> bool:
+        if not self.leaves:
+            return False
 
 
-        If this line has brackets and a standalone `comment`, we need to adapt
-        it to be able to still reformat the line.
+        # If a 'type: ignore' is attached to the end of a line, we
+        # can't split the line, because we can't know which of the
+        # subexpressions the ignore was meant to apply to.
+        #
+        # We only want this to apply to actual physical lines from the
+        # original source, though: we don't want the presence of a
+        # 'type: ignore' at the end of a multiline expression to
+        # justify pushing it all onto one line. Thus we
+        # (unfortunately) need to check the actual source lines and
+        # only report an unsplittable 'type: ignore' if this line was
+        # one line in the original code.
+
+        # Grab the first and last line numbers, skipping generated leaves
+        first_line = next((l.lineno for l in self.leaves if l.lineno != 0), 0)
+        last_line = next((l.lineno for l in reversed(self.leaves) if l.lineno != 0), 0)
+
+        if first_line == last_line:
+            # We look at the last two leaves since a comma or an
+            # invisible paren could have been added at the end of the
+            # line.
+            for node in self.leaves[-2:]:
+                for comment in self.comments.get(id(node), []):
+                    if is_type_comment(comment, " ignore"):
+                        return True
 
 
-        This is not perfect, the line to which the standalone comment gets
-        appended will appear "too long" when splitting.
-        """
+        return False
+
+    def contains_multiline_strings(self) -> bool:
+        return any(is_multiline_string(leaf) for leaf in self.leaves)
+
+    def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
+        """Remove trailing comma if there is one and it's safe."""
+        if not (self.leaves and self.leaves[-1].type == token.COMMA):
+            return False
+
+        # We remove trailing commas only in the case of importing a
+        # single name from a module.
         if not (
         if not (
-            comment.type == STANDALONE_COMMENT and
-            self.bracket_tracker.any_open_brackets()
+            self.leaves
+            and self.is_import
+            and len(self.leaves) > 4
+            and self.leaves[-1].type == token.COMMA
+            and closing.type in CLOSING_BRACKETS
+            and self.leaves[-4].type == token.NAME
+            and (
+                # regular `from foo import bar,`
+                self.leaves[-4].value == "import"
+                # `from foo import (bar as baz,)
+                or (
+                    len(self.leaves) > 6
+                    and self.leaves[-6].value == "import"
+                    and self.leaves[-3].value == "as"
+                )
+                # `from foo import bar as baz,`
+                or (
+                    len(self.leaves) > 5
+                    and self.leaves[-5].value == "import"
+                    and self.leaves[-3].value == "as"
+                )
+            )
+            and closing.type == token.RPAR
         ):
             return False
 
         ):
             return False
 
-        comment.type = token.COMMENT
-        comment.prefix = '\n' + '    ' * (self.depth + 1)
-        return self.append_comment(comment)
+        self.remove_trailing_comma()
+        return True
 
     def append_comment(self, comment: Leaf) -> bool:
 
     def append_comment(self, comment: Leaf) -> bool:
+        """Add an inline or standalone comment to the line."""
+        if (
+            comment.type == STANDALONE_COMMENT
+            and self.bracket_tracker.any_open_brackets()
+        ):
+            comment.prefix = ""
+            return False
+
         if comment.type != token.COMMENT:
             return False
 
         if comment.type != token.COMMENT:
             return False
 
-        try:
-            after = id(self.last_non_delimiter())
-        except LookupError:
+        if not self.leaves:
             comment.type = STANDALONE_COMMENT
             comment.type = STANDALONE_COMMENT
-            comment.prefix = ''
+            comment.prefix = ""
             return False
 
             return False
 
-        else:
-            if after in self.comments:
-                self.comments[after].value += str(comment)
-            else:
-                self.comments[after] = comment
-            return True
+        last_leaf = self.leaves[-1]
+        if (
+            last_leaf.type == token.RPAR
+            and not last_leaf.value
+            and last_leaf.parent
+            and len(list(last_leaf.parent.leaves())) <= 3
+            and not is_type_comment(comment)
+        ):
+            # Comments on an optional parens wrapping a single leaf should belong to
+            # the wrapped node except if it's a type comment. Pinning the comment like
+            # this avoids unstable formatting caused by comment migration.
+            if len(self.leaves) < 2:
+                comment.type = STANDALONE_COMMENT
+                comment.prefix = ""
+                return False
+
+            last_leaf = self.leaves[-2]
+        self.comments.setdefault(id(last_leaf), []).append(comment)
+        return True
+
+    def comments_after(self, leaf: Leaf) -> List[Leaf]:
+        """Generate comments that should appear directly after `leaf`."""
+        return self.comments.get(id(leaf), [])
+
+    def remove_trailing_comma(self) -> None:
+        """Remove the trailing comma and moves the comments attached to it."""
+        trailing_comma = self.leaves.pop()
+        trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
+        self.comments.setdefault(id(self.leaves[-1]), []).extend(
+            trailing_comma_comments
+        )
 
 
-    def last_non_delimiter(self) -> Leaf:
-        for i in range(len(self.leaves)):
-            last = self.leaves[-i - 1]
-            if not is_delimiter(last):
-                return last
+    def is_complex_subscript(self, leaf: Leaf) -> bool:
+        """Return True iff `leaf` is part of a slice with non-trivial exprs."""
+        open_lsqb = self.bracket_tracker.get_open_lsqb()
+        if open_lsqb is None:
+            return False
+
+        subscript_start = open_lsqb.next_sibling
 
 
-        raise LookupError("No non-delimiters found")
+        if isinstance(subscript_start, Node):
+            if subscript_start.type == syms.listmaker:
+                return False
+
+            if subscript_start.type == syms.subscriptlist:
+                subscript_start = child_towards(subscript_start, leaf)
+        return subscript_start is not None and any(
+            n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
+        )
 
     def __str__(self) -> str:
 
     def __str__(self) -> str:
+        """Render the line."""
         if not self:
         if not self:
-            return '\n'
+            return "\n"
 
 
-        indent = '    ' * self.depth
+        indent = "    " * self.depth
         leaves = iter(self.leaves)
         first = next(leaves)
         leaves = iter(self.leaves)
         first = next(leaves)
-        res = f'{first.prefix}{indent}{first.value}'
+        res = f"{first.prefix}{indent}{first.value}"
         for leaf in leaves:
             res += str(leaf)
         for leaf in leaves:
             res += str(leaf)
-        for comment in self.comments.values():
+        for comment in itertools.chain.from_iterable(self.comments.values()):
             res += str(comment)
             res += str(comment)
-        return res + '\n'
+        return res + "\n"
 
     def __bool__(self) -> bool:
 
     def __bool__(self) -> bool:
+        """Return True if the line has leaves or comments."""
         return bool(self.leaves or self.comments)
 
 
         return bool(self.leaves or self.comments)
 
 
@@ -557,68 +1610,117 @@ class EmptyLineTracker:
     """Provides a stateful method that returns the number of potential extra
     empty lines needed before and after the currently processed line.
 
     """Provides a stateful method that returns the number of potential extra
     empty lines needed before and after the currently processed line.
 
-    Note: this tracker works on lines that haven't been split yet.
+    Note: this tracker works on lines that haven't been split yet.  It assumes
+    the prefix of the first leaf consists of optional newlines.  Those newlines
+    are consumed by `maybe_empty_lines()` and included in the computation.
     """
     """
-    previous_line: Optional[Line] = attrib(default=None)
-    previous_after: int = attrib(default=0)
-    previous_defs: List[int] = attrib(default=Factory(list))
+
+    is_pyi: bool = False
+    previous_line: Optional[Line] = None
+    previous_after: int = 0
+    previous_defs: List[int] = field(default_factory=list)
 
     def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
 
     def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
-        """Returns the number of extra empty lines before and after the `current_line`.
+        """Return the number of extra empty lines before and after the `current_line`.
 
 
-        This is for separating `def`, `async def` and `class` with extra empty lines
-        (two on module-level), as well as providing an extra empty line after flow
-        control keywords to make them more prominent.
+        This is for separating `def`, `async def` and `class` with extra empty
+        lines (two on module-level).
         """
         before, after = self._maybe_empty_lines(current_line)
         """
         before, after = self._maybe_empty_lines(current_line)
+        before = (
+            # Black should not insert empty lines at the beginning
+            # of the file
+            0
+            if self.previous_line is None
+            else before - self.previous_after
+        )
         self.previous_after = after
         self.previous_line = current_line
         return before, after
 
     def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
         self.previous_after = after
         self.previous_line = current_line
         return before, after
 
     def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
-        before = 0
+        max_allowed = 1
+        if current_line.depth == 0:
+            max_allowed = 1 if self.is_pyi else 2
+        if current_line.leaves:
+            # Consume the first leaf's extra newlines.
+            first_leaf = current_line.leaves[0]
+            before = first_leaf.prefix.count("\n")
+            before = min(before, max_allowed)
+            first_leaf.prefix = ""
+        else:
+            before = 0
         depth = current_line.depth
         while self.previous_defs and self.previous_defs[-1] >= depth:
             self.previous_defs.pop()
         depth = current_line.depth
         while self.previous_defs and self.previous_defs[-1] >= depth:
             self.previous_defs.pop()
-            before = (1 if depth else 2) - self.previous_after
-        is_decorator = current_line.is_decorator
-        if is_decorator or current_line.is_def or current_line.is_class:
-            if not is_decorator:
-                self.previous_defs.append(depth)
-            if self.previous_line is None:
-                # Don't insert empty lines before the first line in the file.
-                return 0, 0
-
-            if self.previous_line and self.previous_line.is_decorator:
-                # Don't insert empty lines between decorators.
-                return 0, 0
-
-            newlines = 2
-            if current_line.depth:
-                newlines -= 1
-            newlines -= self.previous_after
-            return newlines, 0
-
-        if current_line.is_flow_control:
-            return before, 1
+            if self.is_pyi:
+                before = 0 if depth else 1
+            else:
+                before = 1 if depth else 2
+        if current_line.is_decorator or current_line.is_def or current_line.is_class:
+            return self._maybe_empty_lines_for_class_or_def(current_line, before)
 
         if (
 
         if (
-            self.previous_line and
-            self.previous_line.is_import and
-            not current_line.is_import and
-            depth == self.previous_line.depth
+            self.previous_line
+            and self.previous_line.is_import
+            and not current_line.is_import
+            and depth == self.previous_line.depth
         ):
             return (before or 1), 0
 
         if (
         ):
             return (before or 1), 0
 
         if (
-            self.previous_line and
-            self.previous_line.is_yield and
-            (not current_line.is_yield or depth != self.previous_line.depth)
+            self.previous_line
+            and self.previous_line.is_class
+            and current_line.is_triple_quoted_string
         ):
         ):
-            return (before or 1), 0
+            return before, 1
 
         return before, 0
 
 
         return before, 0
 
+    def _maybe_empty_lines_for_class_or_def(
+        self, current_line: Line, before: int
+    ) -> Tuple[int, int]:
+        if not current_line.is_decorator:
+            self.previous_defs.append(current_line.depth)
+        if self.previous_line is None:
+            # Don't insert empty lines before the first line in the file.
+            return 0, 0
+
+        if self.previous_line.is_decorator:
+            return 0, 0
+
+        if self.previous_line.depth < current_line.depth and (
+            self.previous_line.is_class or self.previous_line.is_def
+        ):
+            return 0, 0
+
+        if (
+            self.previous_line.is_comment
+            and self.previous_line.depth == current_line.depth
+            and before == 0
+        ):
+            return 0, 0
+
+        if self.is_pyi:
+            if self.previous_line.depth > current_line.depth:
+                newlines = 1
+            elif current_line.is_class or self.previous_line.is_class:
+                if current_line.is_stub_class and self.previous_line.is_stub_class:
+                    # No blank line between classes with an empty body
+                    newlines = 0
+                else:
+                    newlines = 1
+            elif current_line.is_def and not self.previous_line.is_def:
+                # Blank line between a block of functions and a block of non-functions
+                newlines = 1
+            else:
+                newlines = 0
+        else:
+            newlines = 2
+        if current_line.depth and newlines:
+            newlines -= 1
+        return newlines, 0
+
 
 @dataclass
 class LineGenerator(Visitor[Line]):
 
 @dataclass
 class LineGenerator(Visitor[Line]):
@@ -627,8 +1729,11 @@ class LineGenerator(Visitor[Line]):
     Note: destroys the tree it's visiting by mutating prefixes of its leaves
     in ways that will no longer stringify to valid Python code on the tree.
     """
     Note: destroys the tree it's visiting by mutating prefixes of its leaves
     in ways that will no longer stringify to valid Python code on the tree.
     """
-    current_line: Line = attrib(default=Factory(Line))
-    standalone_comments: List[Leaf] = attrib(default=Factory(list))
+
+    is_pyi: bool = False
+    normalize_strings: bool = True
+    current_line: Line = field(default_factory=Line)
+    remove_u_prefix: bool = False
 
     def line(self, indent: int = 0) -> Iterator[Line]:
         """Generate a line.
 
     def line(self, indent: int = 0) -> Iterator[Line]:
         """Generate a line.
@@ -647,9 +1752,11 @@ class LineGenerator(Visitor[Line]):
         yield complete_line
 
     def visit_default(self, node: LN) -> Iterator[Line]:
         yield complete_line
 
     def visit_default(self, node: LN) -> Iterator[Line]:
+        """Default `visit_*()` implementation. Recurses to children of `node`."""
         if isinstance(node, Leaf):
         if isinstance(node, Leaf):
+            any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
             for comment in generate_comments(node):
             for comment in generate_comments(node):
-                if self.current_line.bracket_tracker.any_open_brackets():
+                if any_open_brackets:
                     # any comment within brackets is subject to splitting
                     self.current_line.append(comment)
                 elif comment.type == token.COMMENT:
                     # any comment within brackets is subject to splitting
                     self.current_line.append(comment)
                 elif comment.type == token.COMMENT:
@@ -658,67 +1765,95 @@ class LineGenerator(Visitor[Line]):
                     yield from self.line()
 
                 else:
                     yield from self.line()
 
                 else:
-                    # regular standalone comment, to be processed later (see
-                    # docstring in `generate_comments()`
-                    self.standalone_comments.append(comment)
-            normalize_prefix(node)
-            if node.type not in WHITESPACE:
-                for comment in self.standalone_comments:
+                    # regular standalone comment
                     yield from self.line()
 
                     self.current_line.append(comment)
                     yield from self.line()
 
                     yield from self.line()
 
                     self.current_line.append(comment)
                     yield from self.line()
 
-                self.standalone_comments = []
+            normalize_prefix(node, inside_brackets=any_open_brackets)
+            if self.normalize_strings and node.type == token.STRING:
+                normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
+                normalize_string_quotes(node)
+            if node.type == token.NUMBER:
+                normalize_numeric_literal(node)
+            if node.type not in WHITESPACE:
                 self.current_line.append(node)
         yield from super().visit_default(node)
 
                 self.current_line.append(node)
         yield from super().visit_default(node)
 
-    def visit_suite(self, node: Node) -> Iterator[Line]:
-        """Body of a statement after a colon."""
-        children = iter(node.children)
-        # Process newline before indenting.  It might contain an inline
-        # comment that should go right after the colon.
-        newline = next(children)
-        yield from self.visit(newline)
+    def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
+        """Increase indentation level, maybe yield a line."""
+        # In blib2to3 INDENT never holds comments.
         yield from self.line(+1)
         yield from self.line(+1)
+        yield from self.visit_default(node)
 
 
-        for child in children:
-            yield from self.visit(child)
+    def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
+        """Decrease indentation level, maybe yield a line."""
+        # The current line might still wait for trailing comments.  At DEDENT time
+        # there won't be any (they would be prefixes on the preceding NEWLINE).
+        # Emit the line then.
+        yield from self.line()
+
+        # While DEDENT has no value, its prefix may contain standalone comments
+        # that belong to the current indentation level.  Get 'em.
+        yield from self.visit_default(node)
 
 
+        # Finally, emit the dedent.
         yield from self.line(-1)
 
         yield from self.line(-1)
 
-    def visit_stmt(self, node: Node, keywords: Set[str]) -> Iterator[Line]:
+    def visit_stmt(
+        self, node: Node, keywords: Set[str], parens: Set[str]
+    ) -> Iterator[Line]:
         """Visit a statement.
 
         """Visit a statement.
 
-        The relevant Python language keywords for this statement are NAME leaves
-        within it.
+        This implementation is shared for `if`, `while`, `for`, `try`, `except`,
+        `def`, `with`, `class`, `assert` and assignments.
+
+        The relevant Python language `keywords` for a given statement will be
+        NAME leaves within it. This methods puts those on a separate line.
+
+        `parens` holds a set of string leaf values immediately after which
+        invisible parens should be put.
         """
         """
+        normalize_invisible_parens(node, parens_after=parens)
         for child in node.children:
             if child.type == token.NAME and child.value in keywords:  # type: ignore
                 yield from self.line()
 
             yield from self.visit(child)
 
         for child in node.children:
             if child.type == token.NAME and child.value in keywords:  # type: ignore
                 yield from self.line()
 
             yield from self.visit(child)
 
+    def visit_suite(self, node: Node) -> Iterator[Line]:
+        """Visit a suite."""
+        if self.is_pyi and is_stub_suite(node):
+            yield from self.visit(node.children[2])
+        else:
+            yield from self.visit_default(node)
+
     def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
     def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
-        """A statement without nested statements."""
+        """Visit a statement without nested statements."""
         is_suite_like = node.parent and node.parent.type in STATEMENT
         if is_suite_like:
         is_suite_like = node.parent and node.parent.type in STATEMENT
         if is_suite_like:
-            yield from self.line(+1)
-            yield from self.visit_default(node)
-            yield from self.line(-1)
+            if self.is_pyi and is_stub_body(node):
+                yield from self.visit_default(node)
+            else:
+                yield from self.line(+1)
+                yield from self.visit_default(node)
+                yield from self.line(-1)
 
         else:
 
         else:
-            yield from self.line()
+            if not self.is_pyi or not node.parent or not is_stub_suite(node.parent):
+                yield from self.line()
             yield from self.visit_default(node)
 
     def visit_async_stmt(self, node: Node) -> Iterator[Line]:
             yield from self.visit_default(node)
 
     def visit_async_stmt(self, node: Node) -> Iterator[Line]:
+        """Visit `async def`, `async for`, `async with`."""
         yield from self.line()
 
         children = iter(node.children)
         for child in children:
             yield from self.visit(child)
 
         yield from self.line()
 
         children = iter(node.children)
         for child in children:
             yield from self.visit(child)
 
-            if child.type == token.NAME and child.value == 'async':  # type: ignore
+            if child.type == token.ASYNC:
                 break
 
         internal_stmt = next(children)
                 break
 
         internal_stmt = next(children)
@@ -726,82 +1861,173 @@ class LineGenerator(Visitor[Line]):
             yield from self.visit(child)
 
     def visit_decorators(self, node: Node) -> Iterator[Line]:
             yield from self.visit(child)
 
     def visit_decorators(self, node: Node) -> Iterator[Line]:
+        """Visit decorators."""
         for child in node.children:
             yield from self.line()
             yield from self.visit(child)
 
     def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
         for child in node.children:
             yield from self.line()
             yield from self.visit(child)
 
     def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
+        """Remove a semicolon and put the other statement on a separate line."""
         yield from self.line()
 
     def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
         yield from self.line()
 
     def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
+        """End of file. Process outstanding comments and end with a newline."""
         yield from self.visit_default(leaf)
         yield from self.line()
 
         yield from self.visit_default(leaf)
         yield from self.line()
 
-    def __attrs_post_init__(self) -> None:
+    def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
+        if not self.current_line.bracket_tracker.any_open_brackets():
+            yield from self.line()
+        yield from self.visit_default(leaf)
+
+    def visit_factor(self, node: Node) -> Iterator[Line]:
+        """Force parentheses between a unary op and a binary power:
+
+        -2 ** 8 -> -(2 ** 8)
+        """
+        _operator, operand = node.children
+        if (
+            operand.type == syms.power
+            and len(operand.children) == 3
+            and operand.children[1].type == token.DOUBLESTAR
+        ):
+            lpar = Leaf(token.LPAR, "(")
+            rpar = Leaf(token.RPAR, ")")
+            index = operand.remove() or 0
+            node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
+        yield from self.visit_default(node)
+
+    def __post_init__(self) -> None:
         """You are in a twisty little maze of passages."""
         v = self.visit_stmt
         """You are in a twisty little maze of passages."""
         v = self.visit_stmt
-        self.visit_if_stmt = partial(v, keywords={'if', 'else', 'elif'})
-        self.visit_while_stmt = partial(v, keywords={'while', 'else'})
-        self.visit_for_stmt = partial(v, keywords={'for', 'else'})
-        self.visit_try_stmt = partial(v, keywords={'try', 'except', 'else', 'finally'})
-        self.visit_except_clause = partial(v, keywords={'except'})
-        self.visit_funcdef = partial(v, keywords={'def'})
-        self.visit_with_stmt = partial(v, keywords={'with'})
-        self.visit_classdef = partial(v, keywords={'class'})
+        Ø: Set[str] = set()
+        self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
+        self.visit_if_stmt = partial(
+            v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
+        )
+        self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
+        self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
+        self.visit_try_stmt = partial(
+            v, keywords={"try", "except", "else", "finally"}, parens=Ø
+        )
+        self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
+        self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
+        self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
+        self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
+        self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
+        self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
+        self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
+        self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
         self.visit_async_funcdef = self.visit_async_stmt
         self.visit_decorated = self.visit_decorators
 
 
         self.visit_async_funcdef = self.visit_async_stmt
         self.visit_decorated = self.visit_decorators
 
 
+IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
 OPENING_BRACKETS = set(BRACKET.keys())
 CLOSING_BRACKETS = set(BRACKET.values())
 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
 OPENING_BRACKETS = set(BRACKET.keys())
 CLOSING_BRACKETS = set(BRACKET.values())
 BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
+ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
 
 
 
 
-def whitespace(leaf: Leaf) -> str:
-    """Return whitespace prefix if needed for the given `leaf`."""
-    NO = ''
-    SPACE = ' '
-    DOUBLESPACE = '  '
+def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str:  # noqa: C901
+    """Return whitespace prefix if needed for the given `leaf`.
+
+    `complex_subscript` signals whether the given leaf is part of a subscription
+    which has non-trivial arguments, like arithmetic expressions or function calls.
+    """
+    NO = ""
+    SPACE = " "
+    DOUBLESPACE = "  "
     t = leaf.type
     p = leaf.parent
     t = leaf.type
     p = leaf.parent
-    if t == token.COLON:
+    v = leaf.value
+    if t in ALWAYS_NO_SPACE:
         return NO
 
         return NO
 
-    if t == token.COMMA:
-        return NO
+    if t == token.COMMENT:
+        return DOUBLESPACE
 
 
-    if t == token.RPAR:
+    assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
+    if t == token.COLON and p.type not in {
+        syms.subscript,
+        syms.subscriptlist,
+        syms.sliceop,
+    }:
         return NO
 
         return NO
 
-    if t == token.COMMENT:
-        return DOUBLESPACE
+    prev = leaf.prev_sibling
+    if not prev:
+        prevp = preceding_leaf(p)
+        if not prevp or prevp.type in OPENING_BRACKETS:
+            return NO
+
+        if t == token.COLON:
+            if prevp.type == token.COLON:
+                return NO
+
+            elif prevp.type != token.COMMA and not complex_subscript:
+                return NO
+
+            return SPACE
 
 
-    if t == STANDALONE_COMMENT:
+        if prevp.type == token.EQUAL:
+            if prevp.parent:
+                if prevp.parent.type in {
+                    syms.arglist,
+                    syms.argument,
+                    syms.parameters,
+                    syms.varargslist,
+                }:
+                    return NO
+
+                elif prevp.parent.type == syms.typedargslist:
+                    # A bit hacky: if the equal sign has whitespace, it means we
+                    # previously found it's a typed argument.  So, we're using
+                    # that, too.
+                    return prevp.prefix
+
+        elif prevp.type in VARARGS_SPECIALS:
+            if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
+                return NO
+
+        elif prevp.type == token.COLON:
+            if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
+                return SPACE if complex_subscript else NO
+
+        elif (
+            prevp.parent
+            and prevp.parent.type == syms.factor
+            and prevp.type in MATH_OPERATORS
+        ):
+            return NO
+
+        elif (
+            prevp.type == token.RIGHTSHIFT
+            and prevp.parent
+            and prevp.parent.type == syms.shift_expr
+            and prevp.prev_sibling
+            and prevp.prev_sibling.type == token.NAME
+            and prevp.prev_sibling.value == "print"  # type: ignore
+        ):
+            # Python 2 print chevron
+            return NO
+
+    elif prev.type in OPENING_BRACKETS:
         return NO
 
         return NO
 
-    assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
     if p.type in {syms.parameters, syms.arglist}:
         # untyped function signatures or calls
     if p.type in {syms.parameters, syms.arglist}:
         # untyped function signatures or calls
-        if t == token.RPAR:
-            return NO
-
-        prev = leaf.prev_sibling
         if not prev or prev.type != token.COMMA:
             return NO
 
         if not prev or prev.type != token.COMMA:
             return NO
 
-    if p.type == syms.varargslist:
+    elif p.type == syms.varargslist:
         # lambdas
         # lambdas
-        if t == token.RPAR:
-            return NO
-
-        prev = leaf.prev_sibling
         if prev and prev.type != token.COMMA:
             return NO
 
     elif p.type == syms.typedargslist:
         # typed function signatures
         if prev and prev.type != token.COMMA:
             return NO
 
     elif p.type == syms.typedargslist:
         # typed function signatures
-        prev = leaf.prev_sibling
         if not prev:
             return NO
 
         if not prev:
             return NO
 
@@ -819,7 +2045,6 @@ def whitespace(leaf: Leaf) -> str:
 
     elif p.type == syms.tname:
         # type names
 
     elif p.type == syms.tname:
         # type names
-        prev = leaf.prev_sibling
         if not prev:
             prevp = preceding_leaf(p)
             if not prevp or prevp.type != token.COMMA:
         if not prev:
             prevp = preceding_leaf(p)
             if not prevp or prevp.type != token.COMMA:
@@ -830,7 +2055,6 @@ def whitespace(leaf: Leaf) -> str:
         if t == token.LPAR or t == token.RPAR:
             return NO
 
         if t == token.LPAR or t == token.RPAR:
             return NO
 
-        prev = leaf.prev_sibling
         if not prev:
             if t == token.DOT:
                 prevp = preceding_leaf(p)
         if not prev:
             if t == token.DOT:
                 prevp = preceding_leaf(p)
@@ -848,13 +2072,12 @@ def whitespace(leaf: Leaf) -> str:
         if t == token.EQUAL:
             return NO
 
         if t == token.EQUAL:
             return NO
 
-        prev = leaf.prev_sibling
         if not prev:
             prevp = preceding_leaf(p)
             if not prevp or prevp.type == token.LPAR:
                 return NO
 
         if not prev:
             prevp = preceding_leaf(p)
             if not prevp or prevp.type == token.LPAR:
                 return NO
 
-        elif prev.type == token.EQUAL or prev.type == token.DOUBLESTAR:
+        elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
             return NO
 
     elif p.type == syms.decorator:
             return NO
 
     elif p.type == syms.decorator:
@@ -862,113 +2085,44 @@ def whitespace(leaf: Leaf) -> str:
         return NO
 
     elif p.type == syms.dotted_name:
         return NO
 
     elif p.type == syms.dotted_name:
-        prev = leaf.prev_sibling
         if prev:
             return NO
 
         prevp = preceding_leaf(p)
         if prev:
             return NO
 
         prevp = preceding_leaf(p)
-        if not prevp or prevp.type == token.AT:
+        if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
             return NO
 
     elif p.type == syms.classdef:
         if t == token.LPAR:
             return NO
 
             return NO
 
     elif p.type == syms.classdef:
         if t == token.LPAR:
             return NO
 
-        prev = leaf.prev_sibling
         if prev and prev.type == token.LPAR:
             return NO
 
         if prev and prev.type == token.LPAR:
             return NO
 
-    elif p.type == syms.subscript:
+    elif p.type in {syms.subscript, syms.sliceop}:
         # indexing
         # indexing
-        if t == token.COLON:
-            return NO
-
-        prev = leaf.prev_sibling
-        if not prev or prev.type == token.COLON:
-            return NO
-
-    elif p.type in {
-        syms.test,
-        syms.not_test,
-        syms.xor_expr,
-        syms.or_test,
-        syms.and_test,
-        syms.arith_expr,
-        syms.shift_expr,
-        syms.yield_expr,
-        syms.term,
-        syms.power,
-        syms.comparison,
-    }:
-        # various arithmetic and logic expressions
-        prev = leaf.prev_sibling
         if not prev:
         if not prev:
-            prevp = preceding_leaf(p)
-            if not prevp or prevp.type in OPENING_BRACKETS:
-                return NO
-
-            if prevp.type == token.EQUAL:
-                if prevp.parent and prevp.parent.type in {
-                    syms.varargslist, syms.parameters, syms.arglist, syms.argument
-                }:
-                    return NO
+            assert p.parent is not None, "subscripts are always parented"
+            if p.parent.type == syms.subscriptlist:
+                return SPACE
 
 
-        return SPACE
-
-    elif p.type == syms.atom:
-        if t in CLOSING_BRACKETS:
             return NO
 
             return NO
 
-        prev = leaf.prev_sibling
-        if not prev:
-            prevp = preceding_leaf(p)
-            if not prevp:
-                return NO
-
-            if prevp.type in OPENING_BRACKETS:
-                return NO
-
-            if prevp.type == token.EQUAL:
-                if prevp.parent and prevp.parent.type in {
-                    syms.varargslist, syms.parameters, syms.arglist, syms.argument
-                }:
-                    return NO
-
-            if prevp.type == token.DOUBLESTAR:
-                if prevp.parent and prevp.parent.type in {
-                    syms.varargslist, syms.parameters, syms.arglist, syms.dictsetmaker
-                }:
-                    return NO
-
-        elif prev.type in OPENING_BRACKETS:
+        elif not complex_subscript:
             return NO
 
             return NO
 
-        elif t == token.DOT:
+    elif p.type == syms.atom:
+        if prev and t == token.DOT:
             # dots, but not the first one.
             return NO
 
             # dots, but not the first one.
             return NO
 
-    elif (
-        p.type == syms.listmaker or
-        p.type == syms.testlist_gexp or
-        p.type == syms.subscriptlist
-    ):
-        # list interior, including unpacking
-        prev = leaf.prev_sibling
-        if not prev:
-            return NO
-
     elif p.type == syms.dictsetmaker:
     elif p.type == syms.dictsetmaker:
-        # dict and set interior, including unpacking
-        prev = leaf.prev_sibling
-        if not prev:
-            return NO
-
-        if prev.type == token.DOUBLESTAR:
+        # dict unpacking
+        if prev and prev.type == token.DOUBLESTAR:
             return NO
 
             return NO
 
-    elif p.type == syms.factor or p.type == syms.star_expr:
+    elif p.type in {syms.factor, syms.star_expr}:
         # unary ops
         # unary ops
-        prev = leaf.prev_sibling
         if not prev:
             prevp = preceding_leaf(p)
             if not prevp or prevp.type in OPENING_BRACKETS:
         if not prev:
             prevp = preceding_leaf(p)
             if not prevp or prevp.type in OPENING_BRACKETS:
@@ -977,20 +2131,28 @@ def whitespace(leaf: Leaf) -> str:
             prevp_parent = prevp.parent
             assert prevp_parent is not None
             if prevp.type == token.COLON and prevp_parent.type in {
             prevp_parent = prevp.parent
             assert prevp_parent is not None
             if prevp.type == token.COLON and prevp_parent.type in {
-                syms.subscript, syms.sliceop
+                syms.subscript,
+                syms.sliceop,
             }:
                 return NO
 
             elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
                 return NO
 
             }:
                 return NO
 
             elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
                 return NO
 
-        elif t == token.NAME or t == token.NUMBER:
+        elif t in {token.NAME, token.NUMBER, token.STRING}:
             return NO
 
             return NO
 
-    elif p.type == syms.import_from and t == token.NAME:
-        prev = leaf.prev_sibling
-        if prev and prev.type == token.DOT:
-            return NO
+    elif p.type == syms.import_from:
+        if t == token.DOT:
+            if prev and prev.type == token.DOT:
+                return NO
+
+        elif t == token.NAME:
+            if v == "import":
+                return SPACE
+
+            if prev and prev.type == token.DOT:
+                return NO
 
     elif p.type == syms.sliceop:
         return NO
 
     elif p.type == syms.sliceop:
         return NO
@@ -999,7 +2161,7 @@ def whitespace(leaf: Leaf) -> str:
 
 
 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
 
 
 def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
-    """Returns the first leaf that precedes `node`, if any."""
+    """Return the first leaf that precedes `node`, if any."""
     while node:
         res = node.prev_sibling
         if res:
     while node:
         res = node.prev_sibling
         if res:
@@ -1016,32 +2178,155 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
     return None
 
 
     return None
 
 
-def is_delimiter(leaf: Leaf) -> int:
-    """Returns the priority of the `leaf` delimiter. Returns 0 if not delimiter.
+def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
+    """Return the child of `ancestor` that contains `descendant`."""
+    node: Optional[LN] = descendant
+    while node and node.parent != ancestor:
+        node = node.parent
+    return node
+
+
+def container_of(leaf: Leaf) -> LN:
+    """Return `leaf` or one of its ancestors that is the topmost container of it.
+
+    By "container" we mean a node where `leaf` is the very first child.
+    """
+    same_prefix = leaf.prefix
+    container: LN = leaf
+    while container:
+        parent = container.parent
+        if parent is None:
+            break
+
+        if parent.children[0].prefix != same_prefix:
+            break
+
+        if parent.type == syms.file_input:
+            break
+
+        if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
+            break
+
+        container = parent
+    return container
+
+
+def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
+    """Return the priority of the `leaf` delimiter, given a line break after it.
+
+    The delimiter priorities returned here are from those delimiters that would
+    cause a line break after themselves.
 
     Higher numbers are higher priority.
     """
     if leaf.type == token.COMMA:
         return COMMA_PRIORITY
 
 
     Higher numbers are higher priority.
     """
     if leaf.type == token.COMMA:
         return COMMA_PRIORITY
 
-    if leaf.type == token.NAME and leaf.value in LOGIC_OPERATORS:
-        return LOGIC_PRIORITY
+    return 0
+
+
+def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
+    """Return the priority of the `leaf` delimiter, given a line break before it.
+
+    The delimiter priorities returned here are from those delimiters that would
+    cause a line break before themselves.
+
+    Higher numbers are higher priority.
+    """
+    if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
+        # * and ** might also be MATH_OPERATORS but in this case they are not.
+        # Don't treat them as a delimiter.
+        return 0
+
+    if (
+        leaf.type == token.DOT
+        and leaf.parent
+        and leaf.parent.type not in {syms.import_from, syms.dotted_name}
+        and (previous is None or previous.type in CLOSING_BRACKETS)
+    ):
+        return DOT_PRIORITY
+
+    if (
+        leaf.type in MATH_OPERATORS
+        and leaf.parent
+        and leaf.parent.type not in {syms.factor, syms.star_expr}
+    ):
+        return MATH_PRIORITIES[leaf.type]
 
     if leaf.type in COMPARATORS:
         return COMPARATOR_PRIORITY
 
     if (
 
     if leaf.type in COMPARATORS:
         return COMPARATOR_PRIORITY
 
     if (
-        leaf.type in MATH_OPERATORS and
-        leaf.parent and
-        leaf.parent.type not in {syms.factor, syms.star_expr}
+        leaf.type == token.STRING
+        and previous is not None
+        and previous.type == token.STRING
+    ):
+        return STRING_PRIORITY
+
+    if leaf.type not in {token.NAME, token.ASYNC}:
+        return 0
+
+    if (
+        leaf.value == "for"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
+        or leaf.type == token.ASYNC
+    ):
+        if (
+            not isinstance(leaf.prev_sibling, Leaf)
+            or leaf.prev_sibling.value != "async"
+        ):
+            return COMPREHENSION_PRIORITY
+
+    if (
+        leaf.value == "if"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
+    ):
+        return COMPREHENSION_PRIORITY
+
+    if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
+        return TERNARY_PRIORITY
+
+    if leaf.value == "is":
+        return COMPARATOR_PRIORITY
+
+    if (
+        leaf.value == "in"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_op, syms.comparison}
+        and not (
+            previous is not None
+            and previous.type == token.NAME
+            and previous.value == "not"
+        )
+    ):
+        return COMPARATOR_PRIORITY
+
+    if (
+        leaf.value == "not"
+        and leaf.parent
+        and leaf.parent.type == syms.comp_op
+        and not (
+            previous is not None
+            and previous.type == token.NAME
+            and previous.value == "is"
+        )
     ):
     ):
-        return MATH_PRIORITY
+        return COMPARATOR_PRIORITY
+
+    if leaf.value in LOGIC_OPERATORS and leaf.parent:
+        return LOGIC_PRIORITY
 
     return 0
 
 
 
     return 0
 
 
-def generate_comments(leaf: Leaf) -> Iterator[Leaf]:
-    """Cleans the prefix of the `leaf` and generates comments from it, if any.
+FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"}
+FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"}
+
+
+def generate_comments(leaf: LN) -> Iterator[Leaf]:
+    """Clean the prefix of the `leaf` and generate comments from it, if any.
 
     Comments in lib2to3 are shoved into the whitespace prefix.  This happens
     in `pgen2/driver.py:Driver.parse_tokens()`.  This was a brilliant implementation
 
     Comments in lib2to3 are shoved into the whitespace prefix.  This happens
     in `pgen2/driver.py:Driver.parse_tokens()`.  This was a brilliant implementation
@@ -1059,67 +2344,156 @@ def generate_comments(leaf: Leaf) -> Iterator[Leaf]:
     Inline comments are emitted as regular token.COMMENT leaves.  Standalone
     are emitted with a fake STANDALONE_COMMENT token identifier.
     """
     Inline comments are emitted as regular token.COMMENT leaves.  Standalone
     are emitted with a fake STANDALONE_COMMENT token identifier.
     """
-    if not leaf.prefix:
-        return
+    for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
+        yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
 
 
-    if '#' not in leaf.prefix:
-        return
 
 
-    before_comment, content = leaf.prefix.split('#', 1)
-    content = content.rstrip()
-    if content and (content[0] not in {' ', '!', '#'}):
-        content = ' ' + content
-    is_standalone_comment = (
-        '\n' in before_comment or '\n' in content or leaf.type == token.DEDENT
-    )
-    if not is_standalone_comment:
-        # simple trailing comment
-        yield Leaf(token.COMMENT, value='#' + content)
-        return
+@dataclass
+class ProtoComment:
+    """Describes a piece of syntax that is a comment.
+
+    It's not a :class:`blib2to3.pytree.Leaf` so that:
 
 
-    for line in ('#' + content).split('\n'):
+    * it can be cached (`Leaf` objects should not be reused more than once as
+      they store their lineno, column, prefix, and parent information);
+    * `newlines` and `consumed` fields are kept separate from the `value`. This
+      simplifies handling of special marker comments like ``# fmt: off/on``.
+    """
+
+    type: int  # token.COMMENT or STANDALONE_COMMENT
+    value: str  # content of the comment
+    newlines: int  # how many newlines before the comment
+    consumed: int  # how many characters of the original leaf's prefix did we consume
+
+
+@lru_cache(maxsize=4096)
+def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
+    """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
+    result: List[ProtoComment] = []
+    if not prefix or "#" not in prefix:
+        return result
+
+    consumed = 0
+    nlines = 0
+    ignored_lines = 0
+    for index, line in enumerate(prefix.split("\n")):
+        consumed += len(line) + 1  # adding the length of the split '\n'
         line = line.lstrip()
         line = line.lstrip()
-        if not line.startswith('#'):
+        if not line:
+            nlines += 1
+        if not line.startswith("#"):
+            # Escaped newlines outside of a comment are not really newlines at
+            # all. We treat a single-line comment following an escaped newline
+            # as a simple trailing comment.
+            if line.endswith("\\"):
+                ignored_lines += 1
             continue
 
             continue
 
-        yield Leaf(STANDALONE_COMMENT, line)
+        if index == ignored_lines and not is_endmarker:
+            comment_type = token.COMMENT  # simple trailing comment
+        else:
+            comment_type = STANDALONE_COMMENT
+        comment = make_comment(line)
+        result.append(
+            ProtoComment(
+                type=comment_type, value=comment, newlines=nlines, consumed=consumed
+            )
+        )
+        nlines = 0
+    return result
+
+
+def make_comment(content: str) -> str:
+    """Return a consistently formatted comment from the given `content` string.
+
+    All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single
+    space between the hash sign and the content.
+
+    If `content` didn't start with a hash sign, one is provided.
+    """
+    content = content.rstrip()
+    if not content:
+        return "#"
+
+    if content[0] == "#":
+        content = content[1:]
+    if content and content[0] not in " !:#'%":
+        content = " " + content
+    return "#" + content
 
 
 
 
-def split_line(line: Line, line_length: int, inner: bool = False) -> Iterator[Line]:
-    """Splits a `line` into potentially many lines.
+def split_line(
+    line: Line,
+    line_length: int,
+    inner: bool = False,
+    features: Collection[Feature] = (),
+) -> Iterator[Line]:
+    """Split a `line` into potentially many lines.
 
     They should fit in the allotted `line_length` but might not be able to.
     `inner` signifies that there were a pair of brackets somewhere around the
     current `line`, possibly transitively. This means we can fallback to splitting
     by delimiters if the LHS/RHS don't yield any results.
 
     They should fit in the allotted `line_length` but might not be able to.
     `inner` signifies that there were a pair of brackets somewhere around the
     current `line`, possibly transitively. This means we can fallback to splitting
     by delimiters if the LHS/RHS don't yield any results.
+
+    `features` are syntactical features that may be used in the output.
     """
     """
-    line_str = str(line).strip('\n')
-    if len(line_str) <= line_length and '\n' not in line_str:
+    if line.is_comment:
+        yield line
+        return
+
+    line_str = str(line).strip("\n")
+
+    if (
+        not line.contains_uncollapsable_type_comments()
+        and not line.should_explode
+        and not line.is_collection_with_optional_trailing_comma
+        and (
+            is_line_short_enough(line, line_length=line_length, line_str=line_str)
+            or line.contains_unsplittable_type_ignore()
+        )
+    ):
         yield line
         return
 
         yield line
         return
 
+    split_funcs: List[SplitFunc]
     if line.is_def:
         split_funcs = [left_hand_split]
     if line.is_def:
         split_funcs = [left_hand_split]
-    elif line.inside_brackets:
-        split_funcs = [delimiter_split]
-        if '\n' not in line_str:
-            # Only attempt RHS if we don't have multiline strings or comments
-            # on this line.
-            split_funcs.append(right_hand_split)
     else:
     else:
-        split_funcs = [right_hand_split]
+
+        def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
+            for omit in generate_trailers_to_omit(line, line_length):
+                lines = list(right_hand_split(line, line_length, features, omit=omit))
+                if is_line_short_enough(lines[0], line_length=line_length):
+                    yield from lines
+                    return
+
+            # All splits failed, best effort split with no omits.
+            # This mostly happens to multiline strings that are by definition
+            # reported as not fitting a single line.
+            # line_length=1 here was historically a bug that somehow became a feature.
+            # See #762 and #781 for the full story.
+            yield from right_hand_split(line, line_length=1, features=features)
+
+        if line.inside_brackets:
+            split_funcs = [delimiter_split, standalone_comment_split, rhs]
+        else:
+            split_funcs = [rhs]
     for split_func in split_funcs:
         # We are accumulating lines in `result` because we might want to abort
         # mission and return the original line in the end, or attempt a different
         # split altogether.
         result: List[Line] = []
         try:
     for split_func in split_funcs:
         # We are accumulating lines in `result` because we might want to abort
         # mission and return the original line in the end, or attempt a different
         # split altogether.
         result: List[Line] = []
         try:
-            for l in split_func(line):
-                if str(l).strip('\n') == line_str:
+            for l in split_func(line, features):
+                if str(l).strip("\n") == line_str:
                     raise CannotSplit("Split function returned an unchanged result")
 
                     raise CannotSplit("Split function returned an unchanged result")
 
-                result.extend(split_line(l, line_length=line_length, inner=True))
-        except CannotSplit as cs:
+                result.extend(
+                    split_line(
+                        l, line_length=line_length, inner=True, features=features
+                    )
+                )
+        except CannotSplit:
             continue
 
         else:
             continue
 
         else:
@@ -1130,97 +2504,146 @@ def split_line(line: Line, line_length: int, inner: bool = False) -> Iterator[Li
         yield line
 
 
         yield line
 
 
-def left_hand_split(line: Line) -> Iterator[Line]:
+def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
     """Split line into many lines, starting with the first matching bracket pair.
 
     Note: this usually looks weird, only use this for function definitions.
     """Split line into many lines, starting with the first matching bracket pair.
 
     Note: this usually looks weird, only use this for function definitions.
-    Prefer RHS otherwise.
+    Prefer RHS otherwise.  This is why this function is not symmetrical with
+    :func:`right_hand_split` which also handles optional parentheses.
     """
     """
-    head = Line(depth=line.depth)
-    body = Line(depth=line.depth + 1, inside_brackets=True)
-    tail = Line(depth=line.depth)
     tail_leaves: List[Leaf] = []
     body_leaves: List[Leaf] = []
     head_leaves: List[Leaf] = []
     current_leaves = head_leaves
     tail_leaves: List[Leaf] = []
     body_leaves: List[Leaf] = []
     head_leaves: List[Leaf] = []
     current_leaves = head_leaves
-    matching_bracket = None
+    matching_bracket: Optional[Leaf] = None
     for leaf in line.leaves:
         if (
     for leaf in line.leaves:
         if (
-            current_leaves is body_leaves and
-            leaf.type in CLOSING_BRACKETS and
-            leaf.opening_bracket is matching_bracket  # type: ignore
+            current_leaves is body_leaves
+            and leaf.type in CLOSING_BRACKETS
+            and leaf.opening_bracket is matching_bracket
         ):
         ):
-            current_leaves = tail_leaves
+            current_leaves = tail_leaves if body_leaves else head_leaves
         current_leaves.append(leaf)
         if current_leaves is head_leaves:
             if leaf.type in OPENING_BRACKETS:
                 matching_bracket = leaf
                 current_leaves = body_leaves
         current_leaves.append(leaf)
         if current_leaves is head_leaves:
             if leaf.type in OPENING_BRACKETS:
                 matching_bracket = leaf
                 current_leaves = body_leaves
-    # Since body is a new indent level, remove spurious leading whitespace.
-    if body_leaves:
-        normalize_prefix(body_leaves[0])
-    # Build the new lines.
-    for result, leaves in (
-        (head, head_leaves), (body, body_leaves), (tail, tail_leaves)
-    ):
-        for leaf in leaves:
-            result.append(leaf, preformatted=True)
-            comment_after = line.comments.get(id(leaf))
-            if comment_after:
-                result.append(comment_after, preformatted=True)
-    # Check if the split succeeded.
-    tail_len = len(str(tail))
-    if not body:
-        if tail_len == 0:
-            raise CannotSplit("Splitting brackets produced the same line")
-
-        elif tail_len < 3:
-            raise CannotSplit(
-                f"Splitting brackets on an empty body to save "
-                f"{tail_len} characters is not worth it"
-            )
+    if not matching_bracket:
+        raise CannotSplit("No brackets found")
 
 
+    head = bracket_split_build_line(head_leaves, line, matching_bracket)
+    body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
+    tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
+    bracket_split_succeeded_or_raise(head, body, tail)
     for result in (head, body, tail):
         if result:
             yield result
 
 
     for result in (head, body, tail):
         if result:
             yield result
 
 
-def right_hand_split(line: Line) -> Iterator[Line]:
-    """Split line into many lines, starting with the last matching bracket pair."""
-    head = Line(depth=line.depth)
-    body = Line(depth=line.depth + 1, inside_brackets=True)
-    tail = Line(depth=line.depth)
-    tail_leaves: List[Leaf] = []
+def right_hand_split(
+    line: Line,
+    line_length: int,
+    features: Collection[Feature] = (),
+    omit: Collection[LeafID] = (),
+) -> Iterator[Line]:
+    """Split line into many lines, starting with the last matching bracket pair.
+
+    If the split was by optional parentheses, attempt splitting without them, too.
+    `omit` is a collection of closing bracket IDs that shouldn't be considered for
+    this split.
+
+    Note: running this function modifies `bracket_depth` on the leaves of `line`.
+    """
+    tail_leaves: List[Leaf] = []
     body_leaves: List[Leaf] = []
     head_leaves: List[Leaf] = []
     current_leaves = tail_leaves
     body_leaves: List[Leaf] = []
     head_leaves: List[Leaf] = []
     current_leaves = tail_leaves
-    opening_bracket = None
+    opening_bracket: Optional[Leaf] = None
+    closing_bracket: Optional[Leaf] = None
     for leaf in reversed(line.leaves):
         if current_leaves is body_leaves:
             if leaf is opening_bracket:
     for leaf in reversed(line.leaves):
         if current_leaves is body_leaves:
             if leaf is opening_bracket:
-                current_leaves = head_leaves
+                current_leaves = head_leaves if body_leaves else tail_leaves
         current_leaves.append(leaf)
         if current_leaves is tail_leaves:
         current_leaves.append(leaf)
         if current_leaves is tail_leaves:
-            if leaf.type in CLOSING_BRACKETS:
-                opening_bracket = leaf.opening_bracket  # type: ignore
+            if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
+                opening_bracket = leaf.opening_bracket
+                closing_bracket = leaf
                 current_leaves = body_leaves
                 current_leaves = body_leaves
+    if not (opening_bracket and closing_bracket and head_leaves):
+        # If there is no opening or closing_bracket that means the split failed and
+        # all content is in the tail.  Otherwise, if `head_leaves` are empty, it means
+        # the matching `opening_bracket` wasn't available on `line` anymore.
+        raise CannotSplit("No brackets found")
+
     tail_leaves.reverse()
     body_leaves.reverse()
     head_leaves.reverse()
     tail_leaves.reverse()
     body_leaves.reverse()
     head_leaves.reverse()
-    # Since body is a new indent level, remove spurious leading whitespace.
-    if body_leaves:
-        normalize_prefix(body_leaves[0])
-    # Build the new lines.
-    for result, leaves in (
-        (head, head_leaves), (body, body_leaves), (tail, tail_leaves)
+    head = bracket_split_build_line(head_leaves, line, opening_bracket)
+    body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
+    tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
+    bracket_split_succeeded_or_raise(head, body, tail)
+    if (
+        # the body shouldn't be exploded
+        not body.should_explode
+        # the opening bracket is an optional paren
+        and opening_bracket.type == token.LPAR
+        and not opening_bracket.value
+        # the closing bracket is an optional paren
+        and closing_bracket.type == token.RPAR
+        and not closing_bracket.value
+        # it's not an import (optional parens are the only thing we can split on
+        # in this case; attempting a split without them is a waste of time)
+        and not line.is_import
+        # there are no standalone comments in the body
+        and not body.contains_standalone_comments(0)
+        # and we can actually remove the parens
+        and can_omit_invisible_parens(body, line_length)
     ):
     ):
-        for leaf in leaves:
-            result.append(leaf, preformatted=True)
-            comment_after = line.comments.get(id(leaf))
-            if comment_after:
-                result.append(comment_after, preformatted=True)
-    # Check if the split succeeded.
-    tail_len = len(str(tail).strip('\n'))
+        omit = {id(closing_bracket), *omit}
+        try:
+            yield from right_hand_split(line, line_length, features=features, omit=omit)
+            return
+
+        except CannotSplit:
+            if not (
+                can_be_split(body)
+                or is_line_short_enough(body, line_length=line_length)
+            ):
+                raise CannotSplit(
+                    "Splitting failed, body is still too long and can't be split."
+                )
+
+            elif head.contains_multiline_strings() or tail.contains_multiline_strings():
+                raise CannotSplit(
+                    "The current optional pair of parentheses is bound to fail to "
+                    "satisfy the splitting algorithm because the head or the tail "
+                    "contains multiline strings which by definition never fit one "
+                    "line."
+                )
+
+    ensure_visible(opening_bracket)
+    ensure_visible(closing_bracket)
+    for result in (head, body, tail):
+        if result:
+            yield result
+
+
+def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
+    """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
+
+    Do nothing otherwise.
+
+    A left- or right-hand split is based on a pair of brackets. Content before
+    (and including) the opening bracket is left on one line, content inside the
+    brackets is put on a separate line, and finally content starting with and
+    following the closing bracket is put on a separate line.
+
+    Those are called `head`, `body`, and `tail`, respectively. If the split
+    produced the same line (all content in `head`) or ended up with an empty `body`
+    and the `tail` is just the closing bracket, then it's considered failed.
+    """
+    tail_len = len(str(tail).strip())
     if not body:
         if tail_len == 0:
             raise CannotSplit("Splitting brackets produced the same line")
     if not body:
         if tail_len == 0:
             raise CannotSplit("Splitting brackets produced the same line")
@@ -1231,159 +2654,1126 @@ def right_hand_split(line: Line) -> Iterator[Line]:
                 f"{tail_len} characters is not worth it"
             )
 
                 f"{tail_len} characters is not worth it"
             )
 
-    for result in (head, body, tail):
-        if result:
-            yield result
 
 
+def bracket_split_build_line(
+    leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
+) -> Line:
+    """Return a new line with given `leaves` and respective comments from `original`.
 
 
-def delimiter_split(line: Line) -> Iterator[Line]:
+    If `is_body` is True, the result line is one-indented inside brackets and as such
+    has its first leaf's prefix normalized and a trailing comma added when expected.
+    """
+    result = Line(depth=original.depth)
+    if is_body:
+        result.inside_brackets = True
+        result.depth += 1
+        if leaves:
+            # Since body is a new indent level, remove spurious leading whitespace.
+            normalize_prefix(leaves[0], inside_brackets=True)
+            # Ensure a trailing comma for imports and standalone function arguments, but
+            # be careful not to add one after any comments or within type annotations.
+            no_commas = (
+                original.is_def
+                and opening_bracket.value == "("
+                and not any(l.type == token.COMMA for l in leaves)
+            )
+
+            if original.is_import or no_commas:
+                for i in range(len(leaves) - 1, -1, -1):
+                    if leaves[i].type == STANDALONE_COMMENT:
+                        continue
+
+                    if leaves[i].type != token.COMMA:
+                        leaves.insert(i + 1, Leaf(token.COMMA, ","))
+                    break
+
+    # Populate the line
+    for leaf in leaves:
+        result.append(leaf, preformatted=True)
+        for comment_after in original.comments_after(leaf):
+            result.append(comment_after, preformatted=True)
+    if is_body:
+        result.should_explode = should_explode(result, opening_bracket)
+    return result
+
+
+def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc:
+    """Normalize prefix of the first leaf in every line returned by `split_func`.
+
+    This is a decorator over relevant split functions.
+    """
+
+    @wraps(split_func)
+    def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
+        for l in split_func(line, features):
+            normalize_prefix(l.leaves[0], inside_brackets=True)
+            yield l
+
+    return split_wrapper
+
+
+@dont_increase_indentation
+def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
     """Split according to delimiters of the highest priority.
 
     """Split according to delimiters of the highest priority.
 
-    This kind of split doesn't increase indentation.
+    If the appropriate Features are given, the split will add trailing commas
+    also in function signatures and calls that contain `*` and `**`.
     """
     try:
         last_leaf = line.leaves[-1]
     except IndexError:
         raise CannotSplit("Line empty")
 
     """
     try:
         last_leaf = line.leaves[-1]
     except IndexError:
         raise CannotSplit("Line empty")
 
-    delimiters = line.bracket_tracker.delimiters
+    bt = line.bracket_tracker
     try:
     try:
-        delimiter_priority = line.bracket_tracker.max_priority(exclude={id(last_leaf)})
+        delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
     except ValueError:
         raise CannotSplit("No delimiters found")
 
     except ValueError:
         raise CannotSplit("No delimiters found")
 
+    if delimiter_priority == DOT_PRIORITY:
+        if bt.delimiter_count_with_priority(delimiter_priority) == 1:
+            raise CannotSplit("Splitting a single attribute from its owner looks wrong")
+
     current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+    lowest_depth = sys.maxsize
+    trailing_comma_safe = True
+
+    def append_to_line(leaf: Leaf) -> Iterator[Line]:
+        """Append `leaf` to current line or to new line if appending impossible."""
+        nonlocal current_line
+        try:
+            current_line.append_safe(leaf, preformatted=True)
+        except ValueError:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+            current_line.append(leaf)
+
     for leaf in line.leaves:
     for leaf in line.leaves:
-        current_line.append(leaf, preformatted=True)
-        comment_after = line.comments.get(id(leaf))
-        if comment_after:
-            current_line.append(comment_after, preformatted=True)
-        leaf_priority = delimiters.get(id(leaf))
+        yield from append_to_line(leaf)
+
+        for comment_after in line.comments_after(leaf):
+            yield from append_to_line(comment_after)
+
+        lowest_depth = min(lowest_depth, leaf.bracket_depth)
+        if leaf.bracket_depth == lowest_depth:
+            if is_vararg(leaf, within={syms.typedargslist}):
+                trailing_comma_safe = (
+                    trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
+                )
+            elif is_vararg(leaf, within={syms.arglist, syms.argument}):
+                trailing_comma_safe = (
+                    trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
+                )
+
+        leaf_priority = bt.delimiters.get(id(leaf))
         if leaf_priority == delimiter_priority:
         if leaf_priority == delimiter_priority:
-            normalize_prefix(current_line.leaves[0])
             yield current_line
 
             current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     if current_line:
         if (
             yield current_line
 
             current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     if current_line:
         if (
-            delimiter_priority == COMMA_PRIORITY and
-            current_line.leaves[-1].type != token.COMMA
+            trailing_comma_safe
+            and delimiter_priority == COMMA_PRIORITY
+            and current_line.leaves[-1].type != token.COMMA
+            and current_line.leaves[-1].type != STANDALONE_COMMENT
         ):
         ):
-            current_line.append(Leaf(token.COMMA, ','))
-        normalize_prefix(current_line.leaves[0])
+            current_line.append(Leaf(token.COMMA, ","))
+        yield current_line
+
+
+@dont_increase_indentation
+def standalone_comment_split(
+    line: Line, features: Collection[Feature] = ()
+) -> Iterator[Line]:
+    """Split standalone comments from the rest of the line."""
+    if not line.contains_standalone_comments(0):
+        raise CannotSplit("Line does not have any standalone comments")
+
+    current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+
+    def append_to_line(leaf: Leaf) -> Iterator[Line]:
+        """Append `leaf` to current line or to new line if appending impossible."""
+        nonlocal current_line
+        try:
+            current_line.append_safe(leaf, preformatted=True)
+        except ValueError:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+            current_line.append(leaf)
+
+    for leaf in line.leaves:
+        yield from append_to_line(leaf)
+
+        for comment_after in line.comments_after(leaf):
+            yield from append_to_line(comment_after)
+
+    if current_line:
         yield current_line
 
 
 def is_import(leaf: Leaf) -> bool:
         yield current_line
 
 
 def is_import(leaf: Leaf) -> bool:
-    """Returns True if the given leaf starts an import statement."""
+    """Return True if the given leaf starts an import statement."""
     p = leaf.parent
     t = leaf.type
     v = leaf.value
     return bool(
     p = leaf.parent
     t = leaf.type
     v = leaf.value
     return bool(
-        t == token.NAME and
-        (
-            (v == 'import' and p and p.type == syms.import_name) or
-            (v == 'from' and p and p.type == syms.import_from)
+        t == token.NAME
+        and (
+            (v == "import" and p and p.type == syms.import_name)
+            or (v == "from" and p and p.type == syms.import_from)
         )
     )
 
 
         )
     )
 
 
-def normalize_prefix(leaf: Leaf) -> None:
-    """Leave existing extra newlines for imports.  Remove everything else."""
-    if is_import(leaf):
-        spl = leaf.prefix.split('#', 1)
-        nl_count = spl[0].count('\n')
-        if len(spl) > 1:
-            # Skip one newline since it was for a standalone comment.
-            nl_count -= 1
-        leaf.prefix = '\n' * nl_count
+def is_type_comment(leaf: Leaf, suffix: str = "") -> bool:
+    """Return True if the given leaf is a special comment.
+    Only returns true for type comments for now."""
+    t = leaf.type
+    v = leaf.value
+    return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix)
+
+
+def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
+    """Leave existing extra newlines if not `inside_brackets`. Remove everything
+    else.
+
+    Note: don't use backslashes for formatting or you'll lose your voting rights.
+    """
+    if not inside_brackets:
+        spl = leaf.prefix.split("#")
+        if "\\" not in spl[0]:
+            nl_count = spl[-1].count("\n")
+            if len(spl) > 1:
+                nl_count -= 1
+            leaf.prefix = "\n" * nl_count
+            return
+
+    leaf.prefix = ""
+
+
+def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
+    """Make all string prefixes lowercase.
+
+    If remove_u_prefix is given, also removes any u prefix from the string.
+
+    Note: Mutates its argument.
+    """
+    match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL)
+    assert match is not None, f"failed to match string {leaf.value!r}"
+    orig_prefix = match.group(1)
+    new_prefix = orig_prefix.replace("F", "f").replace("B", "b").replace("U", "u")
+    if remove_u_prefix:
+        new_prefix = new_prefix.replace("u", "")
+    leaf.value = f"{new_prefix}{match.group(2)}"
+
+
+def normalize_string_quotes(leaf: Leaf) -> None:
+    """Prefer double quotes but only if it doesn't cause more escaping.
+
+    Adds or removes backslashes as appropriate. Doesn't parse and fix
+    strings nested in f-strings (yet).
+
+    Note: Mutates its argument.
+    """
+    value = leaf.value.lstrip("furbFURB")
+    if value[:3] == '"""':
         return
 
         return
 
-    leaf.prefix = ''
+    elif value[:3] == "'''":
+        orig_quote = "'''"
+        new_quote = '"""'
+    elif value[0] == '"':
+        orig_quote = '"'
+        new_quote = "'"
+    else:
+        orig_quote = "'"
+        new_quote = '"'
+    first_quote_pos = leaf.value.find(orig_quote)
+    if first_quote_pos == -1:
+        return  # There's an internal error
+
+    prefix = leaf.value[:first_quote_pos]
+    unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
+    escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
+    escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
+    body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
+    if "r" in prefix.casefold():
+        if unescaped_new_quote.search(body):
+            # There's at least one unescaped new_quote in this raw string
+            # so converting is impossible
+            return
+
+        # Do not introduce or remove backslashes in raw strings
+        new_body = body
+    else:
+        # remove unnecessary escapes
+        new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
+        if body != new_body:
+            # Consider the string without unnecessary escapes as the original
+            body = new_body
+            leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
+        new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
+        new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
+    if "f" in prefix.casefold():
+        matches = re.findall(
+            r"""
+            (?:[^{]|^)\{  # start of the string or a non-{ followed by a single {
+                ([^{].*?)  # contents of the brackets except if begins with {{
+            \}(?:[^}]|$)  # A } followed by end of the string or a non-}
+            """,
+            new_body,
+            re.VERBOSE,
+        )
+        for m in matches:
+            if "\\" in str(m):
+                # Do not introduce backslashes in interpolated expressions
+                return
+
+    if new_quote == '"""' and new_body[-1:] == '"':
+        # edge case:
+        new_body = new_body[:-1] + '\\"'
+    orig_escape_count = body.count("\\")
+    new_escape_count = new_body.count("\\")
+    if new_escape_count > orig_escape_count:
+        return  # Do not introduce more escaping
 
 
+    if new_escape_count == orig_escape_count and orig_quote == '"':
+        return  # Prefer double quotes
 
 
-PYTHON_EXTENSIONS = {'.py'}
-BLACKLISTED_DIRECTORIES = {
-    'build', 'buck-out', 'dist', '_build', '.git', '.hg', '.mypy_cache', '.tox', '.venv'
-}
+    leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
+
+
+def normalize_numeric_literal(leaf: Leaf) -> None:
+    """Normalizes numeric (float, int, and complex) literals.
+
+    All letters used in the representation are normalized to lowercase (except
+    in Python 2 long literals).
+    """
+    text = leaf.value.lower()
+    if text.startswith(("0o", "0b")):
+        # Leave octal and binary literals alone.
+        pass
+    elif text.startswith("0x"):
+        # Change hex literals to upper case.
+        before, after = text[:2], text[2:]
+        text = f"{before}{after.upper()}"
+    elif "e" in text:
+        before, after = text.split("e")
+        sign = ""
+        if after.startswith("-"):
+            after = after[1:]
+            sign = "-"
+        elif after.startswith("+"):
+            after = after[1:]
+        before = format_float_or_int_string(before)
+        text = f"{before}e{sign}{after}"
+    elif text.endswith(("j", "l")):
+        number = text[:-1]
+        suffix = text[-1]
+        # Capitalize in "2L" because "l" looks too similar to "1".
+        if suffix == "l":
+            suffix = "L"
+        text = f"{format_float_or_int_string(number)}{suffix}"
+    else:
+        text = format_float_or_int_string(text)
+    leaf.value = text
+
+
+def format_float_or_int_string(text: str) -> str:
+    """Formats a float string like "1.0"."""
+    if "." not in text:
+        return text
+
+    before, after = text.split(".")
+    return f"{before or 0}.{after or 0}"
+
+
+def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
+    """Make existing optional parentheses invisible or create new ones.
+
+    `parens_after` is a set of string leaf values immediately after which parens
+    should be put.
+
+    Standardizes on visible parentheses for single-element tuples, and keeps
+    existing visible parentheses for other tuples and generator expressions.
+    """
+    for pc in list_comments(node.prefix, is_endmarker=False):
+        if pc.value in FMT_OFF:
+            # This `node` has a prefix with `# fmt: off`, don't mess with parens.
+            return
+    check_lpar = False
+    for index, child in enumerate(list(node.children)):
+        # Add parentheses around long tuple unpacking in assignments.
+        if (
+            index == 0
+            and isinstance(child, Node)
+            and child.type == syms.testlist_star_expr
+        ):
+            check_lpar = True
+
+        if check_lpar:
+            if is_walrus_assignment(child):
+                continue
+
+            if child.type == syms.atom:
+                if maybe_make_parens_invisible_in_atom(child, parent=node):
+                    wrap_in_parentheses(node, child, visible=False)
+            elif is_one_tuple(child):
+                wrap_in_parentheses(node, child, visible=True)
+            elif node.type == syms.import_from:
+                # "import from" nodes store parentheses directly as part of
+                # the statement
+                if child.type == token.LPAR:
+                    # make parentheses invisible
+                    child.value = ""  # type: ignore
+                    node.children[-1].value = ""  # type: ignore
+                elif child.type != token.STAR:
+                    # insert invisible parentheses
+                    node.insert_child(index, Leaf(token.LPAR, ""))
+                    node.append_child(Leaf(token.RPAR, ""))
+                break
+
+            elif not (isinstance(child, Leaf) and is_multiline_string(child)):
+                wrap_in_parentheses(node, child, visible=False)
+
+        check_lpar = isinstance(child, Leaf) and child.value in parens_after
+
+
+def normalize_fmt_off(node: Node) -> None:
+    """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
+    try_again = True
+    while try_again:
+        try_again = convert_one_fmt_off_pair(node)
+
+
+def convert_one_fmt_off_pair(node: Node) -> bool:
+    """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
+
+    Returns True if a pair was converted.
+    """
+    for leaf in node.leaves():
+        previous_consumed = 0
+        for comment in list_comments(leaf.prefix, is_endmarker=False):
+            if comment.value in FMT_OFF:
+                # We only want standalone comments. If there's no previous leaf or
+                # the previous leaf is indentation, it's a standalone comment in
+                # disguise.
+                if comment.type != STANDALONE_COMMENT:
+                    prev = preceding_leaf(leaf)
+                    if prev and prev.type not in WHITESPACE:
+                        continue
+
+                ignored_nodes = list(generate_ignored_nodes(leaf))
+                if not ignored_nodes:
+                    continue
+
+                first = ignored_nodes[0]  # Can be a container node with the `leaf`.
+                parent = first.parent
+                prefix = first.prefix
+                first.prefix = prefix[comment.consumed :]
+                hidden_value = (
+                    comment.value + "\n" + "".join(str(n) for n in ignored_nodes)
+                )
+                if hidden_value.endswith("\n"):
+                    # That happens when one of the `ignored_nodes` ended with a NEWLINE
+                    # leaf (possibly followed by a DEDENT).
+                    hidden_value = hidden_value[:-1]
+                first_idx: Optional[int] = None
+                for ignored in ignored_nodes:
+                    index = ignored.remove()
+                    if first_idx is None:
+                        first_idx = index
+                assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
+                assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
+                parent.insert_child(
+                    first_idx,
+                    Leaf(
+                        STANDALONE_COMMENT,
+                        hidden_value,
+                        prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
+                    ),
+                )
+                return True
+
+            previous_consumed = comment.consumed
+
+    return False
+
+
+def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]:
+    """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
+
+    Stops at the end of the block.
+    """
+    container: Optional[LN] = container_of(leaf)
+    while container is not None and container.type != token.ENDMARKER:
+        is_fmt_on = False
+        for comment in list_comments(container.prefix, is_endmarker=False):
+            if comment.value in FMT_ON:
+                is_fmt_on = True
+            elif comment.value in FMT_OFF:
+                is_fmt_on = False
+        if is_fmt_on:
+            return
+
+        yield container
+
+        container = container.next_sibling
+
+
+def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool:
+    """If it's safe, make the parens in the atom `node` invisible, recursively.
+    Additionally, remove repeated, adjacent invisible parens from the atom `node`
+    as they are redundant.
+
+    Returns whether the node should itself be wrapped in invisible parentheses.
+
+    """
+    if (
+        node.type != syms.atom
+        or is_empty_tuple(node)
+        or is_one_tuple(node)
+        or (is_yield(node) and parent.type != syms.expr_stmt)
+        or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
+    ):
+        return False
+
+    first = node.children[0]
+    last = node.children[-1]
+    if first.type == token.LPAR and last.type == token.RPAR:
+        middle = node.children[1]
+        # make parentheses invisible
+        first.value = ""  # type: ignore
+        last.value = ""  # type: ignore
+        maybe_make_parens_invisible_in_atom(middle, parent=parent)
+
+        if is_atom_with_invisible_parens(middle):
+            # Strip the invisible parens from `middle` by replacing
+            # it with the child in-between the invisible parens
+            middle.replace(middle.children[1])
+
+        return False
+
+    return True
+
+
+def is_atom_with_invisible_parens(node: LN) -> bool:
+    """Given a `LN`, determines whether it's an atom `node` with invisible
+    parens. Useful in dedupe-ing and normalizing parens.
+    """
+    if isinstance(node, Leaf) or node.type != syms.atom:
+        return False
+
+    first, last = node.children[0], node.children[-1]
+    return (
+        isinstance(first, Leaf)
+        and first.type == token.LPAR
+        and first.value == ""
+        and isinstance(last, Leaf)
+        and last.type == token.RPAR
+        and last.value == ""
+    )
+
+
+def is_empty_tuple(node: LN) -> bool:
+    """Return True if `node` holds an empty tuple."""
+    return (
+        node.type == syms.atom
+        and len(node.children) == 2
+        and node.children[0].type == token.LPAR
+        and node.children[1].type == token.RPAR
+    )
+
+
+def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]:
+    """Returns `wrapped` if `node` is of the shape ( wrapped ).
+
+    Parenthesis can be optional. Returns None otherwise"""
+    if len(node.children) != 3:
+        return None
+
+    lpar, wrapped, rpar = node.children
+    if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
+        return None
+
+    return wrapped
+
+
+def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
+    """Wrap `child` in parentheses.
+
+    This replaces `child` with an atom holding the parentheses and the old
+    child.  That requires moving the prefix.
+
+    If `visible` is False, the leaves will be valueless (and thus invisible).
+    """
+    lpar = Leaf(token.LPAR, "(" if visible else "")
+    rpar = Leaf(token.RPAR, ")" if visible else "")
+    prefix = child.prefix
+    child.prefix = ""
+    index = child.remove() or 0
+    new_child = Node(syms.atom, [lpar, child, rpar])
+    new_child.prefix = prefix
+    parent.insert_child(index, new_child)
+
+
+def is_one_tuple(node: LN) -> bool:
+    """Return True if `node` holds a tuple with one element, with or without parens."""
+    if node.type == syms.atom:
+        gexp = unwrap_singleton_parenthesis(node)
+        if gexp is None or gexp.type != syms.testlist_gexp:
+            return False
+
+        return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
+
+    return (
+        node.type in IMPLICIT_TUPLE
+        and len(node.children) == 2
+        and node.children[1].type == token.COMMA
+    )
+
+
+def is_walrus_assignment(node: LN) -> bool:
+    """Return True iff `node` is of the shape ( test := test )"""
+    inner = unwrap_singleton_parenthesis(node)
+    return inner is not None and inner.type == syms.namedexpr_test
+
+
+def is_yield(node: LN) -> bool:
+    """Return True if `node` holds a `yield` or `yield from` expression."""
+    if node.type == syms.yield_expr:
+        return True
+
+    if node.type == token.NAME and node.value == "yield":  # type: ignore
+        return True
+
+    if node.type != syms.atom:
+        return False
+
+    if len(node.children) != 3:
+        return False
+
+    lpar, expr, rpar = node.children
+    if lpar.type == token.LPAR and rpar.type == token.RPAR:
+        return is_yield(expr)
+
+    return False
+
+
+def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
+    """Return True if `leaf` is a star or double star in a vararg or kwarg.
+
+    If `within` includes VARARGS_PARENTS, this applies to function signatures.
+    If `within` includes UNPACKING_PARENTS, it applies to right hand-side
+    extended iterable unpacking (PEP 3132) and additional unpacking
+    generalizations (PEP 448).
+    """
+    if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
+        return False
+
+    p = leaf.parent
+    if p.type == syms.star_expr:
+        # Star expressions are also used as assignment targets in extended
+        # iterable unpacking (PEP 3132).  See what its parent is instead.
+        if not p.parent:
+            return False
+
+        p = p.parent
+
+    return p.type in within
+
+
+def is_multiline_string(leaf: Leaf) -> bool:
+    """Return True if `leaf` is a multiline string that actually spans many lines."""
+    value = leaf.value.lstrip("furbFURB")
+    return value[:3] in {'"""', "'''"} and "\n" in value
+
+
+def is_stub_suite(node: Node) -> bool:
+    """Return True if `node` is a suite with a stub body."""
+    if (
+        len(node.children) != 4
+        or node.children[0].type != token.NEWLINE
+        or node.children[1].type != token.INDENT
+        or node.children[3].type != token.DEDENT
+    ):
+        return False
+
+    return is_stub_body(node.children[2])
+
+
+def is_stub_body(node: LN) -> bool:
+    """Return True if `node` is a simple statement containing an ellipsis."""
+    if not isinstance(node, Node) or node.type != syms.simple_stmt:
+        return False
+
+    if len(node.children) != 2:
+        return False
+
+    child = node.children[0]
+    return (
+        child.type == syms.atom
+        and len(child.children) == 3
+        and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
+    )
+
+
+def max_delimiter_priority_in_atom(node: LN) -> Priority:
+    """Return maximum delimiter priority inside `node`.
+
+    This is specific to atoms with contents contained in a pair of parentheses.
+    If `node` isn't an atom or there are no enclosing parentheses, returns 0.
+    """
+    if node.type != syms.atom:
+        return 0
+
+    first = node.children[0]
+    last = node.children[-1]
+    if not (first.type == token.LPAR and last.type == token.RPAR):
+        return 0
+
+    bt = BracketTracker()
+    for c in node.children[1:-1]:
+        if isinstance(c, Leaf):
+            bt.mark(c)
+        else:
+            for leaf in c.leaves():
+                bt.mark(leaf)
+    try:
+        return bt.max_delimiter_priority()
+
+    except ValueError:
+        return 0
+
+
+def ensure_visible(leaf: Leaf) -> None:
+    """Make sure parentheses are visible.
+
+    They could be invisible as part of some statements (see
+    :func:`normalize_invisible_parens` and :func:`visit_import_from`).
+    """
+    if leaf.type == token.LPAR:
+        leaf.value = "("
+    elif leaf.type == token.RPAR:
+        leaf.value = ")"
+
+
+def should_explode(line: Line, opening_bracket: Leaf) -> bool:
+    """Should `line` immediately be split with `delimiter_split()` after RHS?"""
+
+    if not (
+        opening_bracket.parent
+        and opening_bracket.parent.type in {syms.atom, syms.import_from}
+        and opening_bracket.value in "[{("
+    ):
+        return False
+
+    try:
+        last_leaf = line.leaves[-1]
+        exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set()
+        max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
+    except (IndexError, ValueError):
+        return False
+
+    return max_priority == COMMA_PRIORITY
+
+
+def get_features_used(node: Node) -> Set[Feature]:
+    """Return a set of (relatively) new Python features used in this file.
+
+    Currently looking for:
+    - f-strings;
+    - underscores in numeric literals;
+    - trailing commas after * or ** in function signatures and calls;
+    - positional only arguments in function signatures and lambdas;
+    """
+    features: Set[Feature] = set()
+    for n in node.pre_order():
+        if n.type == token.STRING:
+            value_head = n.value[:2]  # type: ignore
+            if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
+                features.add(Feature.F_STRINGS)
+
+        elif n.type == token.NUMBER:
+            if "_" in n.value:  # type: ignore
+                features.add(Feature.NUMERIC_UNDERSCORES)
+
+        elif n.type == token.SLASH:
+            if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}:
+                features.add(Feature.POS_ONLY_ARGUMENTS)
+
+        elif n.type == token.COLONEQUAL:
+            features.add(Feature.ASSIGNMENT_EXPRESSIONS)
+
+        elif (
+            n.type in {syms.typedargslist, syms.arglist}
+            and n.children
+            and n.children[-1].type == token.COMMA
+        ):
+            if n.type == syms.typedargslist:
+                feature = Feature.TRAILING_COMMA_IN_DEF
+            else:
+                feature = Feature.TRAILING_COMMA_IN_CALL
+
+            for ch in n.children:
+                if ch.type in STARS:
+                    features.add(feature)
+
+                if ch.type == syms.argument:
+                    for argch in ch.children:
+                        if argch.type in STARS:
+                            features.add(feature)
+
+    return features
+
+
+def detect_target_versions(node: Node) -> Set[TargetVersion]:
+    """Detect the version to target based on the nodes used."""
+    features = get_features_used(node)
+    return {
+        version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
+    }
+
+
+def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
+    """Generate sets of closing bracket IDs that should be omitted in a RHS.
+
+    Brackets can be omitted if the entire trailer up to and including
+    a preceding closing bracket fits in one line.
+
+    Yielded sets are cumulative (contain results of previous yields, too).  First
+    set is empty.
+    """
+
+    omit: Set[LeafID] = set()
+    yield omit
+
+    length = 4 * line.depth
+    opening_bracket: Optional[Leaf] = None
+    closing_bracket: Optional[Leaf] = None
+    inner_brackets: Set[LeafID] = set()
+    for index, leaf, leaf_length in enumerate_with_length(line, reversed=True):
+        length += leaf_length
+        if length > line_length:
+            break
+
+        has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
+        if leaf.type == STANDALONE_COMMENT or has_inline_comment:
+            break
+
+        if opening_bracket:
+            if leaf is opening_bracket:
+                opening_bracket = None
+            elif leaf.type in CLOSING_BRACKETS:
+                inner_brackets.add(id(leaf))
+        elif leaf.type in CLOSING_BRACKETS:
+            if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS:
+                # Empty brackets would fail a split so treat them as "inner"
+                # brackets (e.g. only add them to the `omit` set if another
+                # pair of brackets was good enough.
+                inner_brackets.add(id(leaf))
+                continue
+
+            if closing_bracket:
+                omit.add(id(closing_bracket))
+                omit.update(inner_brackets)
+                inner_brackets.clear()
+                yield omit
+
+            if leaf.value:
+                opening_bracket = leaf.opening_bracket
+                closing_bracket = leaf
+
+
+def get_future_imports(node: Node) -> Set[str]:
+    """Return a set of __future__ imports in the file."""
+    imports: Set[str] = set()
+
+    def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
+        for child in children:
+            if isinstance(child, Leaf):
+                if child.type == token.NAME:
+                    yield child.value
+
+            elif child.type == syms.import_as_name:
+                orig_name = child.children[0]
+                assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
+                assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
+                yield orig_name.value
+
+            elif child.type == syms.import_as_names:
+                yield from get_imports_from_children(child.children)
+
+            else:
+                raise AssertionError("Invalid syntax parsing imports")
+
+    for child in node.children:
+        if child.type != syms.simple_stmt:
+            break
+
+        first_child = child.children[0]
+        if isinstance(first_child, Leaf):
+            # Continue looking if we see a docstring; otherwise stop.
+            if (
+                len(child.children) == 2
+                and first_child.type == token.STRING
+                and child.children[1].type == token.NEWLINE
+            ):
+                continue
+
+            break
+
+        elif first_child.type == syms.import_from:
+            module_name = first_child.children[1]
+            if not isinstance(module_name, Leaf) or module_name.value != "__future__":
+                break
 
 
+            imports |= set(get_imports_from_children(first_child.children[3:]))
+        else:
+            break
+
+    return imports
+
+
+@lru_cache()
+def get_gitignore(root: Path) -> PathSpec:
+    """ Return a PathSpec matching gitignore content if present."""
+    gitignore = root / ".gitignore"
+    lines: List[str] = []
+    if gitignore.is_file():
+        with gitignore.open() as gf:
+            lines = gf.readlines()
+    return PathSpec.from_lines("gitwildmatch", lines)
+
+
+def gen_python_files_in_dir(
+    path: Path,
+    root: Path,
+    include: Pattern[str],
+    exclude: Pattern[str],
+    report: "Report",
+    gitignore: PathSpec,
+) -> Iterator[Path]:
+    """Generate all files under `path` whose paths are not excluded by the
+    `exclude` regex, but are included by the `include` regex.
 
 
-def gen_python_files_in_dir(path: Path) -> Iterator[Path]:
+    Symbolic links pointing outside of the `root` directory are ignored.
+
+    `report` is where output about exclusions goes.
+    """
+    assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
     for child in path.iterdir():
     for child in path.iterdir():
-        if child.is_dir():
-            if child.name in BLACKLISTED_DIRECTORIES:
+        # First ignore files matching .gitignore
+        if gitignore.match_file(child.as_posix()):
+            report.path_ignored(child, "matches the .gitignore file content")
+            continue
+
+        # Then ignore with `exclude` option.
+        try:
+            normalized_path = "/" + child.resolve().relative_to(root).as_posix()
+        except OSError as e:
+            report.path_ignored(child, f"cannot be read because {e}")
+            continue
+
+        except ValueError:
+            if child.is_symlink():
+                report.path_ignored(
+                    child, f"is a symbolic link that points outside {root}"
+                )
                 continue
 
                 continue
 
-            yield from gen_python_files_in_dir(child)
+            raise
+
+        if child.is_dir():
+            normalized_path += "/"
+
+        exclude_match = exclude.search(normalized_path)
+        if exclude_match and exclude_match.group(0):
+            report.path_ignored(child, "matches the --exclude regular expression")
+            continue
+
+        if child.is_dir():
+            yield from gen_python_files_in_dir(
+                child, root, include, exclude, report, gitignore
+            )
+
+        elif child.is_file():
+            include_match = include.search(normalized_path)
+            if include_match:
+                yield child
+
+
+@lru_cache()
+def find_project_root(srcs: Iterable[str]) -> Path:
+    """Return a directory containing .git, .hg, or pyproject.toml.
+
+    That directory can be one of the directories passed in `srcs` or their
+    common parent.
 
 
-        elif child.suffix in PYTHON_EXTENSIONS:
-            yield child
+    If no directory in the tree contains a marker that would specify it's the
+    project root, the root of the file system is returned.
+    """
+    if not srcs:
+        return Path("/").resolve()
+
+    common_base = min(Path(src).resolve() for src in srcs)
+    if common_base.is_dir():
+        # Append a fake file so `parents` below returns `common_base_dir`, too.
+        common_base /= "fake-file"
+    for directory in common_base.parents:
+        if (directory / ".git").exists():
+            return directory
+
+        if (directory / ".hg").is_dir():
+            return directory
+
+        if (directory / "pyproject.toml").is_file():
+            return directory
+
+    return directory
 
 
 @dataclass
 class Report:
 
 
 @dataclass
 class Report:
-    """Provides a reformatting counter."""
-    change_count: int = attrib(default=0)
-    same_count: int = attrib(default=0)
-    failure_count: int = attrib(default=0)
+    """Provides a reformatting counter. Can be rendered with `str(report)`."""
+
+    check: bool = False
+    diff: bool = False
+    quiet: bool = False
+    verbose: bool = False
+    change_count: int = 0
+    same_count: int = 0
+    failure_count: int = 0
 
 
-    def done(self, src: Path, changed: bool) -> None:
+    def done(self, src: Path, changed: Changed) -> None:
         """Increment the counter for successful reformatting. Write out a message."""
         """Increment the counter for successful reformatting. Write out a message."""
-        if changed:
-            out(f'reformatted {src}')
+        if changed is Changed.YES:
+            reformatted = "would reformat" if self.check or self.diff else "reformatted"
+            if self.verbose or not self.quiet:
+                out(f"{reformatted} {src}")
             self.change_count += 1
         else:
             self.change_count += 1
         else:
-            out(f'{src} already well formatted, good job.', bold=False)
+            if self.verbose:
+                if changed is Changed.NO:
+                    msg = f"{src} already well formatted, good job."
+                else:
+                    msg = f"{src} wasn't modified on disk since last run."
+                out(msg, bold=False)
             self.same_count += 1
 
     def failed(self, src: Path, message: str) -> None:
         """Increment the counter for failed reformatting. Write out a message."""
             self.same_count += 1
 
     def failed(self, src: Path, message: str) -> None:
         """Increment the counter for failed reformatting. Write out a message."""
-        err(f'error: cannot format {src}: {message}')
+        err(f"error: cannot format {src}: {message}")
         self.failure_count += 1
 
         self.failure_count += 1
 
+    def path_ignored(self, path: Path, message: str) -> None:
+        if self.verbose:
+            out(f"{path} ignored: {message}", bold=False)
+
     @property
     def return_code(self) -> int:
     @property
     def return_code(self) -> int:
-        """Which return code should the app use considering the current state."""
-        return 1 if self.failure_count else 0
+        """Return the exit code that the app should use.
+
+        This considers the current state of changed files and failures:
+        - if there were any failures, return 123;
+        - if any files were changed and --check is being used, return 1;
+        - otherwise return 0.
+        """
+        # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
+        # 126 we have special return codes reserved by the shell.
+        if self.failure_count:
+            return 123
+
+        elif self.change_count and self.check:
+            return 1
+
+        return 0
 
     def __str__(self) -> str:
 
     def __str__(self) -> str:
-        """A color report of the current state.
+        """Render a color report of the current state.
 
         Use `click.unstyle` to remove colors.
         """
 
         Use `click.unstyle` to remove colors.
         """
+        if self.check or self.diff:
+            reformatted = "would be reformatted"
+            unchanged = "would be left unchanged"
+            failed = "would fail to reformat"
+        else:
+            reformatted = "reformatted"
+            unchanged = "left unchanged"
+            failed = "failed to reformat"
         report = []
         if self.change_count:
         report = []
         if self.change_count:
-            s = 's' if self.change_count > 1 else ''
+            s = "s" if self.change_count > 1 else ""
             report.append(
             report.append(
-                click.style(f'{self.change_count} file{s} reformatted', bold=True)
+                click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
             )
         if self.same_count:
             )
         if self.same_count:
-            s = 's' if self.same_count > 1 else ''
-            report.append(f'{self.same_count} file{s} left unchanged')
+            s = "s" if self.same_count > 1 else ""
+            report.append(f"{self.same_count} file{s} {unchanged}")
         if self.failure_count:
         if self.failure_count:
-            s = 's' if self.failure_count > 1 else ''
+            s = "s" if self.failure_count > 1 else ""
             report.append(
             report.append(
-                click.style(
-                    f'{self.failure_count} file{s} failed to reformat', fg='red'
-                )
+                click.style(f"{self.failure_count} file{s} {failed}", fg="red")
             )
             )
-        return ', '.join(report) + '.'
+        return ", ".join(report) + "."
 
 
 
 
-def assert_equivalent(src: str, dst: str) -> None:
-    """Raises AssertionError if `src` and `dst` aren't equivalent.
+def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]:
+    filename = "<unknown>"
+    if sys.version_info >= (3, 8):
+        # TODO: support Python 4+ ;)
+        for minor_version in range(sys.version_info[1], 4, -1):
+            try:
+                return ast.parse(src, filename, feature_version=(3, minor_version))
+            except SyntaxError:
+                continue
+    else:
+        for feature_version in (7, 6):
+            try:
+                return ast3.parse(src, filename, feature_version=feature_version)
+            except SyntaxError:
+                continue
 
 
-    This is a temporary sanity check until Black becomes stable.
-    """
+    return ast27.parse(src)
+
+
+def _fixup_ast_constants(
+    node: Union[ast.AST, ast3.AST, ast27.AST]
+) -> Union[ast.AST, ast3.AST, ast27.AST]:
+    """Map ast nodes deprecated in 3.8 to Constant."""
+    if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)):
+        return ast.Constant(value=node.s)
+
+    if isinstance(node, (ast.Num, ast3.Num, ast27.Num)):
+        return ast.Constant(value=node.n)
+
+    if isinstance(node, (ast.NameConstant, ast3.NameConstant)):
+        return ast.Constant(value=node.value)
+
+    return node
 
 
-    import ast
-    import traceback
 
 
-    def _v(node: ast.AST, depth: int = 0) -> Iterator[str]:
+def assert_equivalent(src: str, dst: str) -> None:
+    """Raise AssertionError if `src` and `dst` aren't equivalent."""
+
+    def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]:
         """Simple visitor generating strings to compare ASTs by content."""
         """Simple visitor generating strings to compare ASTs by content."""
+
+        node = _fixup_ast_constants(node)
+
         yield f"{'  ' * depth}{node.__class__.__name__}("
 
         yield f"{'  ' * depth}{node.__class__.__name__}("
 
-        for field in sorted(node._fields):
+        for field in sorted(node._fields):  # noqa: F402
+            # TypeIgnore has only one field 'lineno' which breaks this comparison
+            type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore)
+            if sys.version_info >= (3, 8):
+                type_ignore_classes += (ast.TypeIgnore,)
+            if isinstance(node, type_ignore_classes):
+                break
+
             try:
                 value = getattr(node, field)
             except AttributeError:
             try:
                 value = getattr(node, field)
             except AttributeError:
@@ -1393,10 +3783,20 @@ def assert_equivalent(src: str, dst: str) -> None:
 
             if isinstance(value, list):
                 for item in value:
 
             if isinstance(value, list):
                 for item in value:
-                    if isinstance(item, ast.AST):
+                    # Ignore nested tuples within del statements, because we may insert
+                    # parentheses and they change the AST.
+                    if (
+                        field == "targets"
+                        and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
+                        and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
+                    ):
+                        for item in item.elts:
+                            yield from _v(item, depth + 2)
+
+                    elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)):
                         yield from _v(item, depth + 2)
 
                         yield from _v(item, depth + 2)
 
-            elif isinstance(value, ast.AST):
+            elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)):
                 yield from _v(value, depth + 2)
 
             else:
                 yield from _v(value, depth + 2)
 
             else:
@@ -1405,74 +3805,390 @@ def assert_equivalent(src: str, dst: str) -> None:
         yield f"{'  ' * depth})  # /{node.__class__.__name__}"
 
     try:
         yield f"{'  ' * depth})  # /{node.__class__.__name__}"
 
     try:
-        src_ast = ast.parse(src)
+        src_ast = parse_ast(src)
     except Exception as exc:
     except Exception as exc:
-        raise AssertionError(f"cannot parse source: {exc}") from None
+        raise AssertionError(
+            f"cannot use --safe with this file; failed to parse source file.  "
+            f"AST error message: {exc}"
+        )
 
     try:
 
     try:
-        dst_ast = ast.parse(dst)
+        dst_ast = parse_ast(dst)
     except Exception as exc:
     except Exception as exc:
-        log = dump_to_file(''.join(traceback.format_tb(exc.__traceback__)), dst)
+        log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
         raise AssertionError(
             f"INTERNAL ERROR: Black produced invalid code: {exc}. "
         raise AssertionError(
             f"INTERNAL ERROR: Black produced invalid code: {exc}. "
-            f"Please report a bug on https://github.com/ambv/black/issues.  "
-            f"This invalid output might be helpful: {log}",
+            f"Please report a bug on https://github.com/psf/black/issues.  "
+            f"This invalid output might be helpful: {log}"
         ) from None
 
         ) from None
 
-    src_ast_str = '\n'.join(_v(src_ast))
-    dst_ast_str = '\n'.join(_v(dst_ast))
+    src_ast_str = "\n".join(_v(src_ast))
+    dst_ast_str = "\n".join(_v(dst_ast))
     if src_ast_str != dst_ast_str:
     if src_ast_str != dst_ast_str:
-        log = dump_to_file(diff(src_ast_str, dst_ast_str, 'src', 'dst'))
+        log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
         raise AssertionError(
             f"INTERNAL ERROR: Black produced code that is not equivalent to "
             f"the source.  "
         raise AssertionError(
             f"INTERNAL ERROR: Black produced code that is not equivalent to "
             f"the source.  "
-            f"Please report a bug on https://github.com/ambv/black/issues.  "
-            f"This diff might be helpful: {log}",
+            f"Please report a bug on https://github.com/psf/black/issues.  "
+            f"This diff might be helpful: {log}"
         ) from None
 
 
         ) from None
 
 
-def assert_stable(src: str, dst: str, line_length: int) -> None:
-    """Raises AssertionError if `dst` reformats differently the second time.
-
-    This is a temporary sanity check until Black becomes stable.
-    """
-    newdst = format_str(dst, line_length=line_length)
+def assert_stable(src: str, dst: str, mode: Mode) -> None:
+    """Raise AssertionError if `dst` reformats differently the second time."""
+    newdst = format_str(dst, mode=mode)
     if dst != newdst:
         log = dump_to_file(
     if dst != newdst:
         log = dump_to_file(
-            diff(src, dst, 'source', 'first pass'),
-            diff(dst, newdst, 'first pass', 'second pass'),
+            diff(src, dst, "source", "first pass"),
+            diff(dst, newdst, "first pass", "second pass"),
         )
         raise AssertionError(
             f"INTERNAL ERROR: Black produced different code on the second pass "
             f"of the formatter.  "
         )
         raise AssertionError(
             f"INTERNAL ERROR: Black produced different code on the second pass "
             f"of the formatter.  "
-            f"Please report a bug on https://github.com/ambv/black/issues.  "
-            f"This diff might be helpful: {log}",
+            f"Please report a bug on https://github.com/psf/black/issues.  "
+            f"This diff might be helpful: {log}"
         ) from None
 
 
         ) from None
 
 
+@mypyc_attr(patchable=True)
 def dump_to_file(*output: str) -> str:
 def dump_to_file(*output: str) -> str:
-    """Dumps `output` to a temporary file. Returns path to the file."""
-    import tempfile
-
+    """Dump `output` to a temporary file. Return path to the file."""
     with tempfile.NamedTemporaryFile(
     with tempfile.NamedTemporaryFile(
-        mode='w', prefix='blk_', suffix='.log', delete=False
+        mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
     ) as f:
         for lines in output:
             f.write(lines)
     ) as f:
         for lines in output:
             f.write(lines)
-            f.write('\n')
+            if lines and lines[-1] != "\n":
+                f.write("\n")
     return f.name
 
 
     return f.name
 
 
+@contextmanager
+def nullcontext() -> Iterator[None]:
+    """Return an empty context manager.
+
+    To be used like `nullcontext` in Python 3.7.
+    """
+    yield
+
+
 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
 def diff(a: str, b: str, a_name: str, b_name: str) -> str:
-    """Returns a udiff string between strings `a` and `b`."""
+    """Return a unified diff string between strings `a` and `b`."""
     import difflib
 
     import difflib
 
-    a_lines = [line + '\n' for line in a.split('\n')]
-    b_lines = [line + '\n' for line in b.split('\n')]
-    return ''.join(
+    a_lines = [line + "\n" for line in a.splitlines()]
+    b_lines = [line + "\n" for line in b.splitlines()]
+    return "".join(
         difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
     )
 
 
         difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
     )
 
 
-if __name__ == '__main__':
+def cancel(tasks: Iterable["asyncio.Task[Any]"]) -> None:
+    """asyncio signal handler that cancels all `tasks` and reports to stderr."""
+    err("Aborted!")
+    for task in tasks:
+        task.cancel()
+
+
+def shutdown(loop: asyncio.AbstractEventLoop) -> None:
+    """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
+    try:
+        if sys.version_info[:2] >= (3, 7):
+            all_tasks = asyncio.all_tasks
+        else:
+            all_tasks = asyncio.Task.all_tasks
+        # This part is borrowed from asyncio/runners.py in Python 3.7b2.
+        to_cancel = [task for task in all_tasks(loop) if not task.done()]
+        if not to_cancel:
+            return
+
+        for task in to_cancel:
+            task.cancel()
+        loop.run_until_complete(
+            asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
+        )
+    finally:
+        # `concurrent.futures.Future` objects cannot be cancelled once they
+        # are already running. There might be some when the `shutdown()` happened.
+        # Silence their logger's spew about the event loop being closed.
+        cf_logger = logging.getLogger("concurrent.futures")
+        cf_logger.setLevel(logging.CRITICAL)
+        loop.close()
+
+
+def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
+    """Replace `regex` with `replacement` twice on `original`.
+
+    This is used by string normalization to perform replaces on
+    overlapping matches.
+    """
+    return regex.sub(replacement, regex.sub(replacement, original))
+
+
+def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
+    """Compile a regular expression string in `regex`.
+
+    If it contains newlines, use verbose mode.
+    """
+    if "\n" in regex:
+        regex = "(?x)" + regex
+    compiled: Pattern[str] = re.compile(regex)
+    return compiled
+
+
+def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
+    """Like `reversed(enumerate(sequence))` if that were possible."""
+    index = len(sequence) - 1
+    for element in reversed(sequence):
+        yield (index, element)
+        index -= 1
+
+
+def enumerate_with_length(
+    line: Line, reversed: bool = False
+) -> Iterator[Tuple[Index, Leaf, int]]:
+    """Return an enumeration of leaves with their length.
+
+    Stops prematurely on multiline strings and standalone comments.
+    """
+    op = cast(
+        Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
+        enumerate_reversed if reversed else enumerate,
+    )
+    for index, leaf in op(line.leaves):
+        length = len(leaf.prefix) + len(leaf.value)
+        if "\n" in leaf.value:
+            return  # Multiline strings, we can't continue.
+
+        for comment in line.comments_after(leaf):
+            length += len(comment.value)
+
+        yield index, leaf, length
+
+
+def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
+    """Return True if `line` is no longer than `line_length`.
+
+    Uses the provided `line_str` rendering, if any, otherwise computes a new one.
+    """
+    if not line_str:
+        line_str = str(line).strip("\n")
+    return (
+        len(line_str) <= line_length
+        and "\n" not in line_str  # multiline strings
+        and not line.contains_standalone_comments()
+    )
+
+
+def can_be_split(line: Line) -> bool:
+    """Return False if the line cannot be split *for sure*.
+
+    This is not an exhaustive search but a cheap heuristic that we can use to
+    avoid some unfortunate formattings (mostly around wrapping unsplittable code
+    in unnecessary parentheses).
+    """
+    leaves = line.leaves
+    if len(leaves) < 2:
+        return False
+
+    if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
+        call_count = 0
+        dot_count = 0
+        next = leaves[-1]
+        for leaf in leaves[-2::-1]:
+            if leaf.type in OPENING_BRACKETS:
+                if next.type not in CLOSING_BRACKETS:
+                    return False
+
+                call_count += 1
+            elif leaf.type == token.DOT:
+                dot_count += 1
+            elif leaf.type == token.NAME:
+                if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
+                    return False
+
+            elif leaf.type not in CLOSING_BRACKETS:
+                return False
+
+            if dot_count > 1 and call_count > 1:
+                return False
+
+    return True
+
+
+def can_omit_invisible_parens(line: Line, line_length: int) -> bool:
+    """Does `line` have a shape safe to reformat without optional parens around it?
+
+    Returns True for only a subset of potentially nice looking formattings but
+    the point is to not return false positives that end up producing lines that
+    are too long.
+    """
+    bt = line.bracket_tracker
+    if not bt.delimiters:
+        # Without delimiters the optional parentheses are useless.
+        return True
+
+    max_priority = bt.max_delimiter_priority()
+    if bt.delimiter_count_with_priority(max_priority) > 1:
+        # With more than one delimiter of a kind the optional parentheses read better.
+        return False
+
+    if max_priority == DOT_PRIORITY:
+        # A single stranded method call doesn't require optional parentheses.
+        return True
+
+    assert len(line.leaves) >= 2, "Stranded delimiter"
+
+    first = line.leaves[0]
+    second = line.leaves[1]
+    penultimate = line.leaves[-2]
+    last = line.leaves[-1]
+
+    # With a single delimiter, omit if the expression starts or ends with
+    # a bracket.
+    if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
+        remainder = False
+        length = 4 * line.depth
+        for _index, leaf, leaf_length in enumerate_with_length(line):
+            if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
+                remainder = True
+            if remainder:
+                length += leaf_length
+                if length > line_length:
+                    break
+
+                if leaf.type in OPENING_BRACKETS:
+                    # There are brackets we can further split on.
+                    remainder = False
+
+        else:
+            # checked the entire string and line length wasn't exceeded
+            if len(line.leaves) == _index + 1:
+                return True
+
+        # Note: we are not returning False here because a line might have *both*
+        # a leading opening bracket and a trailing closing bracket.  If the
+        # opening bracket doesn't match our rule, maybe the closing will.
+
+    if (
+        last.type == token.RPAR
+        or last.type == token.RBRACE
+        or (
+            # don't use indexing for omitting optional parentheses;
+            # it looks weird
+            last.type == token.RSQB
+            and last.parent
+            and last.parent.type != syms.trailer
+        )
+    ):
+        if penultimate.type in OPENING_BRACKETS:
+            # Empty brackets don't help.
+            return False
+
+        if is_multiline_string(first):
+            # Additional wrapping of a multiline string in this situation is
+            # unnecessary.
+            return True
+
+        length = 4 * line.depth
+        seen_other_brackets = False
+        for _index, leaf, leaf_length in enumerate_with_length(line):
+            length += leaf_length
+            if leaf is last.opening_bracket:
+                if seen_other_brackets or length <= line_length:
+                    return True
+
+            elif leaf.type in OPENING_BRACKETS:
+                # There are brackets we can further split on.
+                seen_other_brackets = True
+
+    return False
+
+
+def get_cache_file(mode: Mode) -> Path:
+    return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
+
+
+def read_cache(mode: Mode) -> Cache:
+    """Read the cache if it exists and is well formed.
+
+    If it is not well formed, the call to write_cache later should resolve the issue.
+    """
+    cache_file = get_cache_file(mode)
+    if not cache_file.exists():
+        return {}
+
+    with cache_file.open("rb") as fobj:
+        try:
+            cache: Cache = pickle.load(fobj)
+        except (pickle.UnpicklingError, ValueError):
+            return {}
+
+    return cache
+
+
+def get_cache_info(path: Path) -> CacheInfo:
+    """Return the information used to check if a file is already formatted or not."""
+    stat = path.stat()
+    return stat.st_mtime, stat.st_size
+
+
+def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
+    """Split an iterable of paths in `sources` into two sets.
+
+    The first contains paths of files that modified on disk or are not in the
+    cache. The other contains paths to non-modified files.
+    """
+    todo, done = set(), set()
+    for src in sources:
+        src = src.resolve()
+        if cache.get(src) != get_cache_info(src):
+            todo.add(src)
+        else:
+            done.add(src)
+    return todo, done
+
+
+def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:
+    """Update the cache file."""
+    cache_file = get_cache_file(mode)
+    try:
+        CACHE_DIR.mkdir(parents=True, exist_ok=True)
+        new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
+        with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
+            pickle.dump(new_cache, f, protocol=4)
+        os.replace(f.name, cache_file)
+    except OSError:
+        pass
+
+
+def patch_click() -> None:
+    """Make Click not crash.
+
+    On certain misconfigured environments, Python 3 selects the ASCII encoding as the
+    default which restricts paths that it can access during the lifetime of the
+    application.  Click refuses to work in this scenario by raising a RuntimeError.
+
+    In case of Black the likelihood that non-ASCII characters are going to be used in
+    file paths is minimal since it's Python source code.  Moreover, this crash was
+    spurious on Python 3.7 thanks to PEP 538 and PEP 540.
+    """
+    try:
+        from click import core
+        from click import _unicodefun  # type: ignore
+    except ModuleNotFoundError:
+        return
+
+    for module in (core, _unicodefun):
+        if hasattr(module, "_verify_python3_env"):
+            module._verify_python3_env = lambda: None
+
+
+def patched_main() -> None:
+    freeze_support()
+    patch_click()
     main()
     main()
+
+
+if __name__ == "__main__":
+    patched_main()