]> git.madduck.net Git - etc/vim.git/blobdiff - black.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Fix tests on windows (#159)
[etc/vim.git] / black.py
index dab3f004ff3289711dc26006b533844ad2268bac..c77166a2a6c454486c564aa8ad0a3065e3d0edde 100644 (file)
--- a/black.py
+++ b/black.py
@@ -1,18 +1,39 @@
 #!/usr/bin/env python3
 
 import asyncio
 #!/usr/bin/env python3
 
 import asyncio
+import pickle
 from asyncio.base_events import BaseEventLoop
 from concurrent.futures import Executor, ProcessPoolExecutor
 from asyncio.base_events import BaseEventLoop
 from concurrent.futures import Executor, ProcessPoolExecutor
-from functools import partial
+from enum import Enum
+from functools import partial, wraps
 import keyword
 import keyword
+import logging
+from multiprocessing import Manager
 import os
 from pathlib import Path
 import os
 from pathlib import Path
+import re
 import tokenize
 import tokenize
+import signal
 import sys
 from typing import (
 import sys
 from typing import (
-    Dict, Generic, Iterable, Iterator, List, Optional, Set, Tuple, Type, TypeVar, Union
+    Any,
+    Callable,
+    Collection,
+    Dict,
+    Generic,
+    Iterable,
+    Iterator,
+    List,
+    Optional,
+    Pattern,
+    Set,
+    Tuple,
+    Type,
+    TypeVar,
+    Union,
 )
 
 )
 
+from appdirs import user_cache_dir
 from attr import dataclass, Factory
 import click
 
 from attr import dataclass, Factory
 import click
 
@@ -22,7 +43,7 @@ from blib2to3 import pygram, pytree
 from blib2to3.pgen2 import driver, token
 from blib2to3.pgen2.parse import ParseError
 
 from blib2to3.pgen2 import driver, token
 from blib2to3.pgen2.parse import ParseError
 
-__version__ = "18.3a4"
+__version__ = "18.4a2"
 DEFAULT_LINE_LENGTH = 88
 # types
 syms = pygram.python_symbols
 DEFAULT_LINE_LENGTH = 88
 # types
 syms = pygram.python_symbols
@@ -32,9 +53,15 @@ Depth = int
 NodeType = int
 LeafID = int
 Priority = int
 NodeType = int
 LeafID = int
 Priority = int
+Index = int
 LN = Union[Leaf, Node]
 LN = Union[Leaf, Node]
+SplitFunc = Callable[["Line", bool], Iterator["Line"]]
+Timestamp = float
+FileSize = int
+CacheInfo = Tuple[Timestamp, FileSize]
+Cache = Dict[Path, CacheInfo]
 out = partial(click.secho, bold=True, err=True)
 out = partial(click.secho, bold=True, err=True)
-err = partial(click.secho, fg='red', err=True)
+err = partial(click.secho, fg="red", err=True)
 
 
 class NothingChanged(UserWarning):
 
 
 class NothingChanged(UserWarning):
@@ -77,32 +104,58 @@ class FormatOff(FormatError):
     """Found a comment like `# fmt: off` in the file."""
 
 
     """Found a comment like `# fmt: off` in the file."""
 
 
+class WriteBack(Enum):
+    NO = 0
+    YES = 1
+    DIFF = 2
+
+
+class Changed(Enum):
+    NO = 0
+    CACHED = 1
+    YES = 2
+
+
 @click.command()
 @click.option(
 @click.command()
 @click.option(
-    '-l',
-    '--line-length',
+    "-l",
+    "--line-length",
     type=int,
     default=DEFAULT_LINE_LENGTH,
     type=int,
     default=DEFAULT_LINE_LENGTH,
-    help='How many character per line to allow.',
+    help="How many character per line to allow.",
     show_default=True,
 )
 @click.option(
     show_default=True,
 )
 @click.option(
-    '--check',
+    "--check",
     is_flag=True,
     help=(
     is_flag=True,
     help=(
-        "Don't write back the files, just return the status.  Return code 0 "
+        "Don't write the files back, just return the status.  Return code 0 "
         "means nothing would change.  Return code 1 means some files would be "
         "reformatted.  Return code 123 means there was an internal error."
     ),
 )
 @click.option(
         "means nothing would change.  Return code 1 means some files would be "
         "reformatted.  Return code 123 means there was an internal error."
     ),
 )
 @click.option(
-    '--fast/--safe',
+    "--diff",
+    is_flag=True,
+    help="Don't write the files back, just output a diff for each file on stdout.",
+)
+@click.option(
+    "--fast/--safe",
+    is_flag=True,
+    help="If --fast given, skip temporary sanity checks. [default: --safe]",
+)
+@click.option(
+    "-q",
+    "--quiet",
     is_flag=True,
     is_flag=True,
-    help='If --fast given, skip temporary sanity checks. [default: --safe]',
+    help=(
+        "Don't emit non-error messages to stderr. Errors are still emitted, "
+        "silence those with 2>/dev/null."
+    ),
 )
 @click.version_option(version=__version__)
 @click.argument(
 )
 @click.version_option(version=__version__)
 @click.argument(
-    'src',
+    "src",
     nargs=-1,
     type=click.Path(
         exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
     nargs=-1,
     type=click.Path(
         exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
@@ -110,7 +163,13 @@ class FormatOff(FormatError):
 )
 @click.pass_context
 def main(
 )
 @click.pass_context
 def main(
-    ctx: click.Context, line_length: int, check: bool, fast: bool, src: List[str]
+    ctx: click.Context,
+    line_length: int,
+    check: bool,
+    diff: bool,
+    fast: bool,
+    quiet: bool,
+    src: List[str],
 ) -> None:
     """The uncompromising code formatter."""
     sources: List[Path] = []
 ) -> None:
     """The uncompromising code formatter."""
     sources: List[Path] = []
@@ -121,28 +180,25 @@ def main(
         elif p.is_file():
             # if a file was explicitly given, we don't care about its extension
             sources.append(p)
         elif p.is_file():
             # if a file was explicitly given, we don't care about its extension
             sources.append(p)
-        elif s == '-':
-            sources.append(Path('-'))
+        elif s == "-":
+            sources.append(Path("-"))
         else:
         else:
-            err(f'invalid path: {s}')
+            err(f"invalid path: {s}")
+
+    if check and not diff:
+        write_back = WriteBack.NO
+    elif diff:
+        write_back = WriteBack.DIFF
+    else:
+        write_back = WriteBack.YES
     if len(sources) == 0:
         ctx.exit(0)
     if len(sources) == 0:
         ctx.exit(0)
+        return
+
     elif len(sources) == 1:
     elif len(sources) == 1:
-        p = sources[0]
-        report = Report(check=check)
-        try:
-            if not p.is_file() and str(p) == '-':
-                changed = format_stdin_to_stdout(
-                    line_length=line_length, fast=fast, write_back=not check
-                )
-            else:
-                changed = format_file_in_place(
-                    p, line_length=line_length, fast=fast, write_back=not check
-                )
-            report.done(p, changed)
-        except Exception as exc:
-            report.failed(p, str(exc))
-        ctx.exit(report.return_code)
+        return_code = reformat_one(
+            sources[0], line_length, fast, quiet, write_back, check
+        )
     else:
         loop = asyncio.get_event_loop()
         executor = ProcessPoolExecutor(max_workers=os.cpu_count())
     else:
         loop = asyncio.get_event_loop()
         executor = ProcessPoolExecutor(max_workers=os.cpu_count())
@@ -150,21 +206,66 @@ def main(
         try:
             return_code = loop.run_until_complete(
                 schedule_formatting(
         try:
             return_code = loop.run_until_complete(
                 schedule_formatting(
-                    sources, line_length, not check, fast, loop, executor
+                    sources, line_length, write_back, fast, quiet, loop, executor, check
                 )
             )
         finally:
                 )
             )
         finally:
-            loop.close()
-            ctx.exit(return_code)
+            shutdown(loop)
+    ctx.exit(return_code)
+
+
+def reformat_one(
+    src: Path,
+    line_length: int,
+    fast: bool,
+    quiet: bool,
+    write_back: WriteBack,
+    check: bool,
+) -> int:
+    """Reformat a single file under `src` without spawning child processes.
+
+    If `quiet` is True, non-error messages are not output. `line_length`,
+    `write_back`, and `fast` options are passed to :func:`format_file_in_place`.
+    """
+    report = Report(check=check, quiet=quiet)
+    try:
+        changed = Changed.NO
+        if not src.is_file() and str(src) == "-":
+            if format_stdin_to_stdout(
+                line_length=line_length, fast=fast, write_back=write_back
+            ):
+                changed = Changed.YES
+        else:
+            cache: Cache = {}
+            if write_back != WriteBack.DIFF:
+                cache = read_cache()
+                src = src.resolve()
+                if src in cache and cache[src] == get_cache_info(src):
+                    changed = Changed.CACHED
+            if (
+                changed is not Changed.CACHED
+                and format_file_in_place(
+                    src, line_length=line_length, fast=fast, write_back=write_back
+                )
+            ):
+                changed = Changed.YES
+            if write_back != WriteBack.DIFF and changed is not Changed.NO:
+                write_cache(cache, [src])
+        report.done(src, changed)
+    except Exception as exc:
+        report.failed(src, str(exc))
+    return report.return_code
 
 
 async def schedule_formatting(
     sources: List[Path],
     line_length: int,
 
 
 async def schedule_formatting(
     sources: List[Path],
     line_length: int,
-    write_back: bool,
+    write_back: WriteBack,
     fast: bool,
     fast: bool,
+    quiet: bool,
     loop: BaseEventLoop,
     executor: Executor,
     loop: BaseEventLoop,
     executor: Executor,
+    check: bool,
 ) -> int:
     """Run formatting of `sources` in parallel using the provided `executor`.
 
 ) -> int:
     """Run formatting of `sources` in parallel using the provided `executor`.
 
@@ -173,73 +274,125 @@ async def schedule_formatting(
     `line_length`, `write_back`, and `fast` options are passed to
     :func:`format_file_in_place`.
     """
     `line_length`, `write_back`, and `fast` options are passed to
     :func:`format_file_in_place`.
     """
-    tasks = {
-        src: loop.run_in_executor(
-            executor, format_file_in_place, src, line_length, fast, write_back
-        )
-        for src in sources
-    }
-    await asyncio.wait(tasks.values())
+    report = Report(check=check, quiet=quiet)
+    cache: Cache = {}
+    if write_back != WriteBack.DIFF:
+        cache = read_cache()
+        sources, cached = filter_cached(cache, sources)
+        for src in cached:
+            report.done(src, Changed.CACHED)
     cancelled = []
     cancelled = []
-    report = Report(check=not write_back)
-    for src, task in tasks.items():
-        if not task.done():
-            report.failed(src, 'timed out, cancelling')
-            task.cancel()
-            cancelled.append(task)
-        elif task.exception():
-            report.failed(src, str(task.exception()))
-        else:
-            report.done(src, task.result())
+    formatted = []
+    if sources:
+        lock = None
+        if write_back == WriteBack.DIFF:
+            # For diff output, we need locks to ensure we don't interleave output
+            # from different processes.
+            manager = Manager()
+            lock = manager.Lock()
+        tasks = {
+            src: loop.run_in_executor(
+                executor, format_file_in_place, src, line_length, fast, write_back, lock
+            )
+            for src in sources
+        }
+        _task_values = list(tasks.values())
+        try:
+            loop.add_signal_handler(signal.SIGINT, cancel, _task_values)
+            loop.add_signal_handler(signal.SIGTERM, cancel, _task_values)
+        except NotImplementedError:
+            # There are no good alternatives for these on Windows
+            pass
+        await asyncio.wait(_task_values)
+        for src, task in tasks.items():
+            if not task.done():
+                report.failed(src, "timed out, cancelling")
+                task.cancel()
+                cancelled.append(task)
+            elif task.cancelled():
+                cancelled.append(task)
+            elif task.exception():
+                report.failed(src, str(task.exception()))
+            else:
+                formatted.append(src)
+                report.done(src, Changed.YES if task.result() else Changed.NO)
+
     if cancelled:
     if cancelled:
-        await asyncio.wait(cancelled, timeout=2)
-    out('All done! ✨ 🍰 ✨')
-    click.echo(str(report))
+        await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
+    elif not quiet:
+        out("All done! ✨ 🍰 ✨")
+    if not quiet:
+        click.echo(str(report))
+
+    if write_back != WriteBack.DIFF and formatted:
+        write_cache(cache, formatted)
+
     return report.return_code
 
 
 def format_file_in_place(
     return report.return_code
 
 
 def format_file_in_place(
-    src: Path, line_length: int, fast: bool, write_back: bool = False
+    src: Path,
+    line_length: int,
+    fast: bool,
+    write_back: WriteBack = WriteBack.NO,
+    lock: Any = None,  # multiprocessing.Manager().Lock() is some crazy proxy
 ) -> bool:
     """Format file under `src` path. Return True if changed.
 
     If `write_back` is True, write reformatted code back to stdout.
     `line_length` and `fast` options are passed to :func:`format_file_contents`.
     """
 ) -> bool:
     """Format file under `src` path. Return True if changed.
 
     If `write_back` is True, write reformatted code back to stdout.
     `line_length` and `fast` options are passed to :func:`format_file_contents`.
     """
+
     with tokenize.open(src) as src_buffer:
         src_contents = src_buffer.read()
     try:
     with tokenize.open(src) as src_buffer:
         src_contents = src_buffer.read()
     try:
-        contents = format_file_contents(
+        dst_contents = format_file_contents(
             src_contents, line_length=line_length, fast=fast
         )
     except NothingChanged:
         return False
 
             src_contents, line_length=line_length, fast=fast
         )
     except NothingChanged:
         return False
 
-    if write_back:
+    if write_back == write_back.YES:
         with open(src, "w", encoding=src_buffer.encoding) as f:
         with open(src, "w", encoding=src_buffer.encoding) as f:
-            f.write(contents)
+            f.write(dst_contents)
+    elif write_back == write_back.DIFF:
+        src_name = f"{src.name}  (original)"
+        dst_name = f"{src.name}  (formatted)"
+        diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
+        if lock:
+            lock.acquire()
+        try:
+            sys.stdout.write(diff_contents)
+        finally:
+            if lock:
+                lock.release()
     return True
 
 
 def format_stdin_to_stdout(
     return True
 
 
 def format_stdin_to_stdout(
-    line_length: int, fast: bool, write_back: bool = False
+    line_length: int, fast: bool, write_back: WriteBack = WriteBack.NO
 ) -> bool:
     """Format file on stdin. Return True if changed.
 
     If `write_back` is True, write reformatted code back to stdout.
     `line_length` and `fast` arguments are passed to :func:`format_file_contents`.
     """
 ) -> bool:
     """Format file on stdin. Return True if changed.
 
     If `write_back` is True, write reformatted code back to stdout.
     `line_length` and `fast` arguments are passed to :func:`format_file_contents`.
     """
-    contents = sys.stdin.read()
+    src = sys.stdin.read()
+    dst = src
     try:
     try:
-        contents = format_file_contents(contents, line_length=line_length, fast=fast)
+        dst = format_file_contents(src, line_length=line_length, fast=fast)
         return True
 
     except NothingChanged:
         return False
 
     finally:
         return True
 
     except NothingChanged:
         return False
 
     finally:
-        if write_back:
-            sys.stdout.write(contents)
+        if write_back == WriteBack.YES:
+            sys.stdout.write(dst)
+        elif write_back == WriteBack.DIFF:
+            src_name = "<stdin>  (original)"
+            dst_name = "<stdin>  (formatted)"
+            sys.stdout.write(diff(src, dst, src_name, dst_name))
 
 
 def format_file_contents(
 
 
 def format_file_contents(
@@ -251,7 +404,7 @@ def format_file_contents(
     valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
     `line_length` is passed to :func:`format_str`.
     """
     valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
     `line_length` is passed to :func:`format_str`.
     """
-    if src_contents.strip() == '':
+    if src_contents.strip() == "":
         raise NothingChanged
 
     dst_contents = format_str(src_contents, line_length=line_length)
         raise NothingChanged
 
     dst_contents = format_str(src_contents, line_length=line_length)
@@ -298,8 +451,8 @@ GRAMMARS = [
 def lib2to3_parse(src_txt: str) -> Node:
     """Given a string with source, return the lib2to3 Node."""
     grammar = pygram.python_grammar_no_print_statement
 def lib2to3_parse(src_txt: str) -> Node:
     """Given a string with source, return the lib2to3 Node."""
     grammar = pygram.python_grammar_no_print_statement
-    if src_txt[-1] != '\n':
-        nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n'
+    if src_txt[-1] != "\n":
+        nl = "\r\n" if "\r\n" in src_txt[:1024] else "\n"
         src_txt += nl
     for grammar in GRAMMARS:
         drv = driver.Driver(grammar, pytree.convert)
         src_txt += nl
     for grammar in GRAMMARS:
         drv = driver.Driver(grammar, pytree.convert)
@@ -329,7 +482,7 @@ def lib2to3_unparse(node: Node) -> str:
     return code
 
 
     return code
 
 
-T = TypeVar('T')
+T = TypeVar("T")
 
 
 class Visitor(Generic[T]):
 
 
 class Visitor(Generic[T]):
@@ -349,7 +502,7 @@ class Visitor(Generic[T]):
             name = token.tok_name[node.type]
         else:
             name = type_repr(node.type)
             name = token.tok_name[node.type]
         else:
             name = type_repr(node.type)
-        yield from getattr(self, f'visit_{name}', self.visit_default)(node)
+        yield from getattr(self, f"visit_{name}", self.visit_default)(node)
 
     def visit_default(self, node: LN) -> Iterator[T]:
         """Default `visit_*()` implementation. Recurses to children of `node`."""
 
     def visit_default(self, node: LN) -> Iterator[T]:
         """Default `visit_*()` implementation. Recurses to children of `node`."""
@@ -363,24 +516,24 @@ class DebugVisitor(Visitor[T]):
     tree_depth: int = 0
 
     def visit_default(self, node: LN) -> Iterator[T]:
     tree_depth: int = 0
 
     def visit_default(self, node: LN) -> Iterator[T]:
-        indent = ' ' * (2 * self.tree_depth)
+        indent = " " * (2 * self.tree_depth)
         if isinstance(node, Node):
             _type = type_repr(node.type)
         if isinstance(node, Node):
             _type = type_repr(node.type)
-            out(f'{indent}{_type}', fg='yellow')
+            out(f"{indent}{_type}", fg="yellow")
             self.tree_depth += 1
             for child in node.children:
                 yield from self.visit(child)
 
             self.tree_depth -= 1
             self.tree_depth += 1
             for child in node.children:
                 yield from self.visit(child)
 
             self.tree_depth -= 1
-            out(f'{indent}/{_type}', fg='yellow', bold=False)
+            out(f"{indent}/{_type}", fg="yellow", bold=False)
         else:
             _type = token.tok_name.get(node.type, str(node.type))
         else:
             _type = token.tok_name.get(node.type, str(node.type))
-            out(f'{indent}{_type}', fg='blue', nl=False)
+            out(f"{indent}{_type}", fg="blue", nl=False)
             if node.prefix:
                 # We don't have to handle prefixes for `Node` objects since
                 # that delegates to the first child anyway.
             if node.prefix:
                 # We don't have to handle prefixes for `Node` objects since
                 # that delegates to the first child anyway.
-                out(f' {node.prefix!r}', fg='green', bold=False, nl=False)
-            out(f' {node.value!r}', fg='blue', bold=False)
+                out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
+            out(f" {node.value!r}", fg="blue", bold=False)
 
     @classmethod
     def show(cls, code: str) -> None:
 
     @classmethod
     def show(cls, code: str) -> None:
@@ -394,7 +547,7 @@ class DebugVisitor(Visitor[T]):
 
 KEYWORDS = set(keyword.kwlist)
 WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
 
 KEYWORDS = set(keyword.kwlist)
 WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
-FLOW_CONTROL = {'return', 'raise', 'break', 'continue'}
+FLOW_CONTROL = {"return", "raise", "break", "continue"}
 STATEMENT = {
     syms.if_stmt,
     syms.while_stmt,
 STATEMENT = {
     syms.if_stmt,
     syms.while_stmt,
@@ -406,7 +559,7 @@ STATEMENT = {
     syms.classdef,
 }
 STANDALONE_COMMENT = 153
     syms.classdef,
 }
 STANDALONE_COMMENT = 153
-LOGIC_OPERATORS = {'and', 'or'}
+LOGIC_OPERATORS = {"and", "or"}
 COMPARATORS = {
     token.LESS,
     token.GREATER,
 COMPARATORS = {
     token.LESS,
     token.GREATER,
@@ -430,6 +583,20 @@ MATH_OPERATORS = {
     token.DOUBLESTAR,
     token.DOUBLESLASH,
 }
     token.DOUBLESTAR,
     token.DOUBLESLASH,
 }
+STARS = {token.STAR, token.DOUBLESTAR}
+VARARGS_PARENTS = {
+    syms.arglist,
+    syms.argument,  # double star in arglist
+    syms.trailer,  # single argument to call
+    syms.typedargslist,
+    syms.varargslist,  # lambdas
+}
+UNPACKING_PARENTS = {
+    syms.atom,  # single element of a list or set literal
+    syms.dictsetmaker,
+    syms.listmaker,
+    syms.testlist_gexp,
+}
 COMPREHENSION_PRIORITY = 20
 COMMA_PRIORITY = 10
 LOGIC_PRIORITY = 5
 COMPREHENSION_PRIORITY = 20
 COMMA_PRIORITY = 10
 LOGIC_PRIORITY = 5
@@ -446,6 +613,8 @@ class BracketTracker:
     bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
     delimiters: Dict[LeafID, Priority] = Factory(dict)
     previous: Optional[Leaf] = None
     bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
     delimiters: Dict[LeafID, Priority] = Factory(dict)
     previous: Optional[Leaf] = None
+    _for_loop_variable: bool = False
+    _lambda_arguments: bool = False
 
     def mark(self, leaf: Leaf) -> None:
         """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
 
     def mark(self, leaf: Leaf) -> None:
         """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
@@ -465,42 +634,27 @@ class BracketTracker:
         if leaf.type == token.COMMENT:
             return
 
         if leaf.type == token.COMMENT:
             return
 
+        self.maybe_decrement_after_for_loop_variable(leaf)
+        self.maybe_decrement_after_lambda_arguments(leaf)
         if leaf.type in CLOSING_BRACKETS:
             self.depth -= 1
             opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
             leaf.opening_bracket = opening_bracket
         leaf.bracket_depth = self.depth
         if self.depth == 0:
         if leaf.type in CLOSING_BRACKETS:
             self.depth -= 1
             opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
             leaf.opening_bracket = opening_bracket
         leaf.bracket_depth = self.depth
         if self.depth == 0:
-            delim = is_delimiter(leaf)
-            if delim:
-                self.delimiters[id(leaf)] = delim
-            elif self.previous is not None:
-                if leaf.type == token.STRING and self.previous.type == token.STRING:
-                    self.delimiters[id(self.previous)] = STRING_PRIORITY
-                elif (
-                    leaf.type == token.NAME
-                    and leaf.value == 'for'
-                    and leaf.parent
-                    and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
-                ):
-                    self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
-                elif (
-                    leaf.type == token.NAME
-                    and leaf.value == 'if'
-                    and leaf.parent
-                    and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
-                ):
-                    self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
-                elif (
-                    leaf.type == token.NAME
-                    and leaf.value in LOGIC_OPERATORS
-                    and leaf.parent
-                ):
-                    self.delimiters[id(self.previous)] = LOGIC_PRIORITY
+            delim = is_split_before_delimiter(leaf, self.previous)
+            if delim and self.previous is not None:
+                self.delimiters[id(self.previous)] = delim
+            else:
+                delim = is_split_after_delimiter(leaf, self.previous)
+                if delim:
+                    self.delimiters[id(leaf)] = delim
         if leaf.type in OPENING_BRACKETS:
             self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
             self.depth += 1
         self.previous = leaf
         if leaf.type in OPENING_BRACKETS:
             self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
             self.depth += 1
         self.previous = leaf
+        self.maybe_increment_lambda_arguments(leaf)
+        self.maybe_increment_for_loop_variable(leaf)
 
     def any_open_brackets(self) -> bool:
         """Return True if there is an yet unmatched open bracket on the line."""
 
     def any_open_brackets(self) -> bool:
         """Return True if there is an yet unmatched open bracket on the line."""
@@ -509,10 +663,55 @@ class BracketTracker:
     def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> int:
         """Return the highest priority of a delimiter found on the line.
 
     def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> int:
         """Return the highest priority of a delimiter found on the line.
 
-        Values are consistent with what `is_delimiter()` returns.
+        Values are consistent with what `is_split_*_delimiter()` return.
+        Raises ValueError on no delimiters.
         """
         return max(v for k, v in self.delimiters.items() if k not in exclude)
 
         """
         return max(v for k, v in self.delimiters.items() if k not in exclude)
 
+    def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
+        """In a for loop, or comprehension, the variables are often unpacks.
+
+        To avoid splitting on the comma in this situation, increase the depth of
+        tokens between `for` and `in`.
+        """
+        if leaf.type == token.NAME and leaf.value == "for":
+            self.depth += 1
+            self._for_loop_variable = True
+            return True
+
+        return False
+
+    def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
+        """See `maybe_increment_for_loop_variable` above for explanation."""
+        if self._for_loop_variable and leaf.type == token.NAME and leaf.value == "in":
+            self.depth -= 1
+            self._for_loop_variable = False
+            return True
+
+        return False
+
+    def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
+        """In a lambda expression, there might be more than one argument.
+
+        To avoid splitting on the comma in this situation, increase the depth of
+        tokens between `lambda` and `:`.
+        """
+        if leaf.type == token.NAME and leaf.value == "lambda":
+            self.depth += 1
+            self._lambda_arguments = True
+            return True
+
+        return False
+
+    def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
+        """See `maybe_increment_lambda_arguments` above for explanation."""
+        if self._lambda_arguments and leaf.type == token.COLON:
+            self.depth -= 1
+            self._lambda_arguments = False
+            return True
+
+        return False
+
 
 @dataclass
 class Line:
 
 @dataclass
 class Line:
@@ -520,11 +719,9 @@ class Line:
 
     depth: int = 0
     leaves: List[Leaf] = Factory(list)
 
     depth: int = 0
     leaves: List[Leaf] = Factory(list)
-    comments: Dict[LeafID, Leaf] = Factory(dict)
+    comments: List[Tuple[Index, Leaf]] = Factory(list)
     bracket_tracker: BracketTracker = Factory(BracketTracker)
     inside_brackets: bool = False
     bracket_tracker: BracketTracker = Factory(BracketTracker)
     inside_brackets: bool = False
-    has_for: bool = False
-    _for_loop_variable: bool = False
 
     def append(self, leaf: Leaf, preformatted: bool = False) -> None:
         """Add a new `leaf` to the end of the line.
 
     def append(self, leaf: Leaf, preformatted: bool = False) -> None:
         """Add a new `leaf` to the end of the line.
@@ -536,7 +733,7 @@ class Line:
 
         Inline comments are put aside.
         """
 
         Inline comments are put aside.
         """
-        has_value = leaf.value.strip()
+        has_value = leaf.type in BRACKETS or bool(leaf.value.strip())
         if not has_value:
             return
 
         if not has_value:
             return
 
@@ -545,20 +742,33 @@ class Line:
             # imports, for which we only preserve newlines.
             leaf.prefix += whitespace(leaf)
         if self.inside_brackets or not preformatted:
             # imports, for which we only preserve newlines.
             leaf.prefix += whitespace(leaf)
         if self.inside_brackets or not preformatted:
-            self.maybe_decrement_after_for_loop_variable(leaf)
             self.bracket_tracker.mark(leaf)
             self.maybe_remove_trailing_comma(leaf)
             self.bracket_tracker.mark(leaf)
             self.maybe_remove_trailing_comma(leaf)
-            self.maybe_increment_for_loop_variable(leaf)
-            if self.maybe_adapt_standalone_comment(leaf):
-                return
 
         if not self.append_comment(leaf):
             self.leaves.append(leaf)
 
 
         if not self.append_comment(leaf):
             self.leaves.append(leaf)
 
+    def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
+        """Like :func:`append()` but disallow invalid standalone comment structure.
+
+        Raises ValueError when any `leaf` is appended after a standalone comment
+        or when a standalone comment is not the first leaf on the line.
+        """
+        if self.bracket_tracker.depth == 0:
+            if self.is_comment:
+                raise ValueError("cannot append to standalone comments")
+
+            if self.leaves and leaf.type == STANDALONE_COMMENT:
+                raise ValueError(
+                    "cannot append standalone comments to a populated line"
+                )
+
+        self.append(leaf, preformatted=preformatted)
+
     @property
     def is_comment(self) -> bool:
         """Is this line a standalone comment?"""
     @property
     def is_comment(self) -> bool:
         """Is this line a standalone comment?"""
-        return bool(self) and self.leaves[0].type == STANDALONE_COMMENT
+        return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
 
     @property
     def is_decorator(self) -> bool:
 
     @property
     def is_decorator(self) -> bool:
@@ -576,7 +786,7 @@ class Line:
         return (
             bool(self)
             and self.leaves[0].type == token.NAME
         return (
             bool(self)
             and self.leaves[0].type == token.NAME
-            and self.leaves[0].value == 'class'
+            and self.leaves[0].value == "class"
         )
 
     @property
         )
 
     @property
@@ -592,12 +802,12 @@ class Line:
         except IndexError:
             second_leaf = None
         return (
         except IndexError:
             second_leaf = None
         return (
-            (first_leaf.type == token.NAME and first_leaf.value == 'def')
+            (first_leaf.type == token.NAME and first_leaf.value == "def")
             or (
                 first_leaf.type == token.ASYNC
                 and second_leaf is not None
                 and second_leaf.type == token.NAME
             or (
                 first_leaf.type == token.ASYNC
                 and second_leaf is not None
                 and second_leaf.type == token.NAME
-                and second_leaf.value == 'def'
+                and second_leaf.value == "def"
             )
         )
 
             )
         )
 
@@ -619,9 +829,18 @@ class Line:
         return (
             bool(self)
             and self.leaves[0].type == token.NAME
         return (
             bool(self)
             and self.leaves[0].type == token.NAME
-            and self.leaves[0].value == 'yield'
+            and self.leaves[0].value == "yield"
         )
 
         )
 
+    def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
+        """If so, needs to be split before emitting."""
+        for leaf in self.leaves:
+            if leaf.type == STANDALONE_COMMENT:
+                if leaf.bracket_depth <= depth_limit:
+                    return True
+
+        return False
+
     def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
         """Remove trailing comma if there is one and it's safe."""
         if not (
     def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
         """Remove trailing comma if there is one and it's safe."""
         if not (
@@ -632,13 +851,13 @@ class Line:
             return False
 
         if closing.type == token.RBRACE:
             return False
 
         if closing.type == token.RBRACE:
-            self.leaves.pop()
+            self.remove_trailing_comma()
             return True
 
         if closing.type == token.RSQB:
             comma = self.leaves[-1]
             if comma.parent and comma.parent.type == syms.listmaker:
             return True
 
         if closing.type == token.RSQB:
             comma = self.leaves[-1]
             if comma.parent and comma.parent.type == syms.listmaker:
-                self.leaves.pop()
+                self.remove_trailing_comma()
                 return True
 
         # For parens let's check if it's safe to remove the comma.  If the
                 return True
 
         # For parens let's check if it's safe to remove the comma.  If the
@@ -666,95 +885,69 @@ class Line:
                     break
 
         if commas > 1:
                     break
 
         if commas > 1:
-            self.leaves.pop()
-            return True
-
-        return False
-
-    def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
-        """In a for loop, or comprehension, the variables are often unpacks.
-
-        To avoid splitting on the comma in this situation, increase the depth of
-        tokens between `for` and `in`.
-        """
-        if leaf.type == token.NAME and leaf.value == 'for':
-            self.has_for = True
-            self.bracket_tracker.depth += 1
-            self._for_loop_variable = True
-            return True
-
-        return False
-
-    def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
-        """See `maybe_increment_for_loop_variable` above for explanation."""
-        if self._for_loop_variable and leaf.type == token.NAME and leaf.value == 'in':
-            self.bracket_tracker.depth -= 1
-            self._for_loop_variable = False
+            self.remove_trailing_comma()
             return True
 
         return False
 
             return True
 
         return False
 
-    def maybe_adapt_standalone_comment(self, comment: Leaf) -> bool:
-        """Hack a standalone comment to act as a trailing comment for line splitting.
-
-        If this line has brackets and a standalone `comment`, we need to adapt
-        it to be able to still reformat the line.
-
-        This is not perfect, the line to which the standalone comment gets
-        appended will appear "too long" when splitting.
-        """
-        if not (
+    def append_comment(self, comment: Leaf) -> bool:
+        """Add an inline or standalone comment to the line."""
+        if (
             comment.type == STANDALONE_COMMENT
             and self.bracket_tracker.any_open_brackets()
         ):
             comment.type == STANDALONE_COMMENT
             and self.bracket_tracker.any_open_brackets()
         ):
+            comment.prefix = ""
             return False
 
             return False
 
-        comment.type = token.COMMENT
-        comment.prefix = '\n' + '    ' * (self.depth + 1)
-        return self.append_comment(comment)
-
-    def append_comment(self, comment: Leaf) -> bool:
-        """Add an inline comment to the line."""
         if comment.type != token.COMMENT:
             return False
 
         if comment.type != token.COMMENT:
             return False
 
-        try:
-            after = id(self.last_non_delimiter())
-        except LookupError:
+        after = len(self.leaves) - 1
+        if after == -1:
             comment.type = STANDALONE_COMMENT
             comment.type = STANDALONE_COMMENT
-            comment.prefix = ''
+            comment.prefix = ""
             return False
 
         else:
             return False
 
         else:
-            if after in self.comments:
-                self.comments[after].value += str(comment)
-            else:
-                self.comments[after] = comment
+            self.comments.append((after, comment))
             return True
 
             return True
 
-    def last_non_delimiter(self) -> Leaf:
-        """Return the last non-delimiter on the line. Raise LookupError otherwise."""
-        for i in range(len(self.leaves)):
-            last = self.leaves[-i - 1]
-            if not is_delimiter(last):
-                return last
+    def comments_after(self, leaf: Leaf) -> Iterator[Leaf]:
+        """Generate comments that should appear directly after `leaf`."""
+        for _leaf_index, _leaf in enumerate(self.leaves):
+            if leaf is _leaf:
+                break
+
+        else:
+            return
+
+        for index, comment_after in self.comments:
+            if _leaf_index == index:
+                yield comment_after
 
 
-        raise LookupError("No non-delimiters found")
+    def remove_trailing_comma(self) -> None:
+        """Remove the trailing comma and moves the comments attached to it."""
+        comma_index = len(self.leaves) - 1
+        for i in range(len(self.comments)):
+            comment_index, comment = self.comments[i]
+            if comment_index == comma_index:
+                self.comments[i] = (comma_index - 1, comment)
+        self.leaves.pop()
 
     def __str__(self) -> str:
         """Render the line."""
         if not self:
 
     def __str__(self) -> str:
         """Render the line."""
         if not self:
-            return '\n'
+            return "\n"
 
 
-        indent = '    ' * self.depth
+        indent = "    " * self.depth
         leaves = iter(self.leaves)
         first = next(leaves)
         leaves = iter(self.leaves)
         first = next(leaves)
-        res = f'{first.prefix}{indent}{first.value}'
+        res = f"{first.prefix}{indent}{first.value}"
         for leaf in leaves:
             res += str(leaf)
         for leaf in leaves:
             res += str(leaf)
-        for comment in self.comments.values():
+        for _, comment in self.comments:
             res += str(comment)
             res += str(comment)
-        return res + '\n'
+        return res + "\n"
 
     def __bool__(self) -> bool:
         """Return True if the line has leaves or comments."""
 
     def __bool__(self) -> bool:
         """Return True if the line has leaves or comments."""
@@ -790,9 +983,9 @@ class UnformattedLines(Line):
         `depth` is not used for indentation in this case.
         """
         if not self:
         `depth` is not used for indentation in this case.
         """
         if not self:
-            return '\n'
+            return "\n"
 
 
-        res = ''
+        res = ""
         for leaf in self.leaves:
             res += str(leaf)
         return res
         for leaf in self.leaves:
             res += str(leaf)
         return res
@@ -809,10 +1002,6 @@ class UnformattedLines(Line):
         """Does nothing and returns False."""
         return False
 
         """Does nothing and returns False."""
         return False
 
-    def maybe_adapt_standalone_comment(self, comment: Leaf) -> bool:
-        """Does nothing and returns False."""
-        return False
-
 
 @dataclass
 class EmptyLineTracker:
 
 @dataclass
 class EmptyLineTracker:
@@ -850,9 +1039,9 @@ class EmptyLineTracker:
         if current_line.leaves:
             # Consume the first leaf's extra newlines.
             first_leaf = current_line.leaves[0]
         if current_line.leaves:
             # Consume the first leaf's extra newlines.
             first_leaf = current_line.leaves[0]
-            before = first_leaf.prefix.count('\n')
+            before = first_leaf.prefix.count("\n")
             before = min(before, max_allowed)
             before = min(before, max_allowed)
-            first_leaf.prefix = ''
+            first_leaf.prefix = ""
         else:
             before = 0
         depth = current_line.depth
         else:
             before = 0
         depth = current_line.depth
@@ -971,6 +1160,8 @@ class LineGenerator(Visitor[Line]):
 
             else:
                 normalize_prefix(node, inside_brackets=any_open_brackets)
 
             else:
                 normalize_prefix(node, inside_brackets=any_open_brackets)
+                if node.type == token.STRING:
+                    normalize_string_quotes(node)
                 if node.type not in WHITESPACE:
                     self.current_line.append(node)
         yield from super().visit_default(node)
                 if node.type not in WHITESPACE:
                     self.current_line.append(node)
         yield from super().visit_default(node)
@@ -986,15 +1177,22 @@ class LineGenerator(Visitor[Line]):
         # DEDENT has no value. Additionally, in blib2to3 it never holds comments.
         yield from self.line(-1)
 
         # DEDENT has no value. Additionally, in blib2to3 it never holds comments.
         yield from self.line(-1)
 
-    def visit_stmt(self, node: Node, keywords: Set[str]) -> Iterator[Line]:
+    def visit_stmt(
+        self, node: Node, keywords: Set[str], parens: Set[str]
+    ) -> Iterator[Line]:
         """Visit a statement.
 
         This implementation is shared for `if`, `while`, `for`, `try`, `except`,
         """Visit a statement.
 
         This implementation is shared for `if`, `while`, `for`, `try`, `except`,
-        `def`, `with`, and `class`.
+        `def`, `with`, `class`, and `assert`.
+
+        The relevant Python language `keywords` for a given statement will be
+        NAME leaves within it. This methods puts those on a separate line.
 
 
-        The relevant Python language `keywords` for a given statement will be NAME
-        leaves within it. This methods puts those on a separate line.
+        `parens` holds pairs of nodes where invisible parentheses should be put.
+        Keys hold nodes after which opening parentheses should be put, values
+        hold nodes before which closing parentheses should be put.
         """
         """
+        normalize_invisible_parens(node, parens_after=parens)
         for child in node.children:
             if child.type == token.NAME and child.value in keywords:  # type: ignore
                 yield from self.line()
         for child in node.children:
             if child.type == token.NAME and child.value in keywords:  # type: ignore
                 yield from self.line()
@@ -1034,6 +1232,32 @@ class LineGenerator(Visitor[Line]):
             yield from self.line()
             yield from self.visit(child)
 
             yield from self.line()
             yield from self.visit(child)
 
+    def visit_import_from(self, node: Node) -> Iterator[Line]:
+        """Visit import_from and maybe put invisible parentheses.
+
+        This is separate from `visit_stmt` because import statements don't
+        support arbitrary atoms and thus handling of parentheses is custom.
+        """
+        check_lpar = False
+        for index, child in enumerate(node.children):
+            if check_lpar:
+                if child.type == token.LPAR:
+                    # make parentheses invisible
+                    child.value = ""  # type: ignore
+                    node.children[-1].value = ""  # type: ignore
+                else:
+                    # insert invisible parentheses
+                    node.insert_child(index, Leaf(token.LPAR, ""))
+                    node.append_child(Leaf(token.RPAR, ""))
+                break
+
+            check_lpar = (
+                child.type == token.NAME and child.value == "import"  # type: ignore
+            )
+
+        for child in node.children:
+            yield from self.visit(child)
+
     def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
         """Remove a semicolon and put the other statement on a separate line."""
         yield from self.line()
     def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
         """Remove a semicolon and put the other statement on a separate line."""
         yield from self.line()
@@ -1057,21 +1281,30 @@ class LineGenerator(Visitor[Line]):
                 yield from self.line()
                 yield from self.visit(node)
 
                 yield from self.line()
                 yield from self.visit(node)
 
+            if node.type == token.ENDMARKER:
+                # somebody decided not to put a final `# fmt: on`
+                yield from self.line()
+
     def __attrs_post_init__(self) -> None:
         """You are in a twisty little maze of passages."""
         v = self.visit_stmt
     def __attrs_post_init__(self) -> None:
         """You are in a twisty little maze of passages."""
         v = self.visit_stmt
-        self.visit_if_stmt = partial(v, keywords={'if', 'else', 'elif'})
-        self.visit_while_stmt = partial(v, keywords={'while', 'else'})
-        self.visit_for_stmt = partial(v, keywords={'for', 'else'})
-        self.visit_try_stmt = partial(v, keywords={'try', 'except', 'else', 'finally'})
-        self.visit_except_clause = partial(v, keywords={'except'})
-        self.visit_funcdef = partial(v, keywords={'def'})
-        self.visit_with_stmt = partial(v, keywords={'with'})
-        self.visit_classdef = partial(v, keywords={'class'})
+        Ø: Set[str] = set()
+        self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
+        self.visit_if_stmt = partial(v, keywords={"if", "else", "elif"}, parens={"if"})
+        self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
+        self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
+        self.visit_try_stmt = partial(
+            v, keywords={"try", "except", "else", "finally"}, parens=Ø
+        )
+        self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
+        self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
+        self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
+        self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
         self.visit_async_funcdef = self.visit_async_stmt
         self.visit_decorated = self.visit_decorators
 
 
         self.visit_async_funcdef = self.visit_async_stmt
         self.visit_decorated = self.visit_decorators
 
 
+IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
 OPENING_BRACKETS = set(BRACKET.keys())
 CLOSING_BRACKETS = set(BRACKET.values())
 BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
 OPENING_BRACKETS = set(BRACKET.keys())
 CLOSING_BRACKETS = set(BRACKET.values())
@@ -1081,9 +1314,9 @@ ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
 
 def whitespace(leaf: Leaf) -> str:  # noqa C901
     """Return whitespace prefix if needed for the given `leaf`."""
 
 def whitespace(leaf: Leaf) -> str:  # noqa C901
     """Return whitespace prefix if needed for the given `leaf`."""
-    NO = ''
-    SPACE = ' '
-    DOUBLESPACE = '  '
+    NO = ""
+    SPACE = " "
+    DOUBLESPACE = "  "
     t = leaf.type
     p = leaf.parent
     v = leaf.value
     t = leaf.type
     p = leaf.parent
     v = leaf.value
@@ -1119,15 +1352,8 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
                     # that, too.
                     return prevp.prefix
 
                     # that, too.
                     return prevp.prefix
 
-        elif prevp.type == token.DOUBLESTAR:
-            if prevp.parent and prevp.parent.type in {
-                syms.arglist,
-                syms.argument,
-                syms.dictsetmaker,
-                syms.parameters,
-                syms.typedargslist,
-                syms.varargslist,
-            }:
+        elif prevp.type in STARS:
+            if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
                 return NO
 
         elif prevp.type == token.COLON:
                 return NO
 
         elif prevp.type == token.COLON:
@@ -1136,7 +1362,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
 
         elif (
             prevp.parent
 
         elif (
             prevp.parent
-            and prevp.parent.type in {syms.factor, syms.star_expr}
+            and prevp.parent.type == syms.factor
             and prevp.type in MATH_OPERATORS
         ):
             return NO
             and prevp.type in MATH_OPERATORS
         ):
             return NO
@@ -1147,7 +1373,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
             and prevp.parent.type == syms.shift_expr
             and prevp.prev_sibling
             and prevp.prev_sibling.type == token.NAME
             and prevp.parent.type == syms.shift_expr
             and prevp.prev_sibling
             and prevp.prev_sibling.type == token.NAME
-            and prevp.prev_sibling.value == 'print'  # type: ignore
+            and prevp.prev_sibling.value == "print"  # type: ignore
         ):
             # Python 2 print chevron
             return NO
         ):
             # Python 2 print chevron
             return NO
@@ -1157,17 +1383,11 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
 
     if p.type in {syms.parameters, syms.arglist}:
         # untyped function signatures or calls
 
     if p.type in {syms.parameters, syms.arglist}:
         # untyped function signatures or calls
-        if t == token.RPAR:
-            return NO
-
         if not prev or prev.type != token.COMMA:
             return NO
 
     elif p.type == syms.varargslist:
         # lambdas
         if not prev or prev.type != token.COMMA:
             return NO
 
     elif p.type == syms.varargslist:
         # lambdas
-        if t == token.RPAR:
-            return NO
-
         if prev and prev.type != token.COMMA:
             return NO
 
         if prev and prev.type != token.COMMA:
             return NO
 
@@ -1222,7 +1442,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
             if not prevp or prevp.type == token.LPAR:
                 return NO
 
             if not prevp or prevp.type == token.LPAR:
                 return NO
 
-        elif prev.type == token.EQUAL or prev.type == token.DOUBLESTAR:
+        elif prev.type in {token.EQUAL} | STARS:
             return NO
 
     elif p.type == syms.decorator:
             return NO
 
     elif p.type == syms.decorator:
@@ -1261,21 +1481,9 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
             # dots, but not the first one.
             return NO
 
             # dots, but not the first one.
             return NO
 
-    elif (
-        p.type == syms.listmaker
-        or p.type == syms.testlist_gexp
-        or p.type == syms.subscriptlist
-    ):
-        # list interior, including unpacking
-        if not prev:
-            return NO
-
     elif p.type == syms.dictsetmaker:
     elif p.type == syms.dictsetmaker:
-        # dict and set interior, including unpacking
-        if not prev:
-            return NO
-
-        if prev.type == token.DOUBLESTAR:
+        # dict unpacking
+        if prev and prev.type == token.DOUBLESTAR:
             return NO
 
     elif p.type in {syms.factor, syms.star_expr}:
             return NO
 
     elif p.type in {syms.factor, syms.star_expr}:
@@ -1287,9 +1495,10 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
 
             prevp_parent = prevp.parent
             assert prevp_parent is not None
 
             prevp_parent = prevp.parent
             assert prevp_parent is not None
-            if prevp.type == token.COLON and prevp_parent.type in {
-                syms.subscript, syms.sliceop
-            }:
+            if (
+                prevp.type == token.COLON
+                and prevp_parent.type in {syms.subscript, syms.sliceop}
+            ):
                 return NO
 
             elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
                 return NO
 
             elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
@@ -1304,7 +1513,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
                 return NO
 
         elif t == token.NAME:
                 return NO
 
         elif t == token.NAME:
-            if v == 'import':
+            if v == "import":
                 return SPACE
 
             if prev and prev.type == token.DOT:
                 return SPACE
 
             if prev and prev.type == token.DOT:
@@ -1334,16 +1543,32 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
     return None
 
 
     return None
 
 
-def is_delimiter(leaf: Leaf) -> int:
-    """Return the priority of the `leaf` delimiter. Return 0 if not delimiter.
+def is_split_after_delimiter(leaf: Leaf, previous: Leaf = None) -> int:
+    """Return the priority of the `leaf` delimiter, given a line break after it.
+
+    The delimiter priorities returned here are from those delimiters that would
+    cause a line break after themselves.
 
     Higher numbers are higher priority.
     """
     if leaf.type == token.COMMA:
         return COMMA_PRIORITY
 
 
     Higher numbers are higher priority.
     """
     if leaf.type == token.COMMA:
         return COMMA_PRIORITY
 
-    if leaf.type in COMPARATORS:
-        return COMPARATOR_PRIORITY
+    return 0
+
+
+def is_split_before_delimiter(leaf: Leaf, previous: Leaf = None) -> int:
+    """Return the priority of the `leaf` delimiter, given a line before after it.
+
+    The delimiter priorities returned here are from those delimiters that would
+    cause a line break before themselves.
+
+    Higher numbers are higher priority.
+    """
+    if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
+        # * and ** might also be MATH_OPERATORS but in this case they are not.
+        # Don't treat them as a delimiter.
+        return 0
 
     if (
         leaf.type in MATH_OPERATORS
 
     if (
         leaf.type in MATH_OPERATORS
@@ -1352,6 +1577,35 @@ def is_delimiter(leaf: Leaf) -> int:
     ):
         return MATH_PRIORITY
 
     ):
         return MATH_PRIORITY
 
+    if leaf.type in COMPARATORS:
+        return COMPARATOR_PRIORITY
+
+    if (
+        leaf.type == token.STRING
+        and previous is not None
+        and previous.type == token.STRING
+    ):
+        return STRING_PRIORITY
+
+    if (
+        leaf.type == token.NAME
+        and leaf.value == "for"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
+    ):
+        return COMPREHENSION_PRIORITY
+
+    if (
+        leaf.type == token.NAME
+        and leaf.value == "if"
+        and leaf.parent
+        and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
+    ):
+        return COMPREHENSION_PRIORITY
+
+    if leaf.type == token.NAME and leaf.value in LOGIC_OPERATORS and leaf.parent:
+        return LOGIC_PRIORITY
+
     return 0
 
 
     return 0
 
 
@@ -1378,17 +1632,17 @@ def generate_comments(leaf: Leaf) -> Iterator[Leaf]:
     if not p:
         return
 
     if not p:
         return
 
-    if '#' not in p:
+    if "#" not in p:
         return
 
     consumed = 0
     nlines = 0
         return
 
     consumed = 0
     nlines = 0
-    for index, line in enumerate(p.split('\n')):
+    for index, line in enumerate(p.split("\n")):
         consumed += len(line) + 1  # adding the length of the split '\n'
         line = line.lstrip()
         if not line:
             nlines += 1
         consumed += len(line) + 1  # adding the length of the split '\n'
         line = line.lstrip()
         if not line:
             nlines += 1
-        if not line.startswith('#'):
+        if not line.startswith("#"):
             continue
 
         if index == 0 and leaf.type != token.ENDMARKER:
             continue
 
         if index == 0 and leaf.type != token.ENDMARKER:
@@ -1396,13 +1650,18 @@ def generate_comments(leaf: Leaf) -> Iterator[Leaf]:
         else:
             comment_type = STANDALONE_COMMENT
         comment = make_comment(line)
         else:
             comment_type = STANDALONE_COMMENT
         comment = make_comment(line)
-        yield Leaf(comment_type, comment, prefix='\n' * nlines)
+        yield Leaf(comment_type, comment, prefix="\n" * nlines)
 
 
-        if comment in {'# fmt: on', '# yapf: enable'}:
+        if comment in {"# fmt: on", "# yapf: enable"}:
             raise FormatOn(consumed)
 
             raise FormatOn(consumed)
 
-        if comment in {'# fmt: off', '# yapf: disable'}:
-            raise FormatOff(consumed)
+        if comment in {"# fmt: off", "# yapf: disable"}:
+            if comment_type == STANDALONE_COMMENT:
+                raise FormatOff(consumed)
+
+            prev = preceding_leaf(leaf)
+            if not prev or prev.type in WHITESPACE:  # standalone comment in disguise
+                raise FormatOff(consumed)
 
         nlines = 0
 
 
         nlines = 0
 
@@ -1417,13 +1676,13 @@ def make_comment(content: str) -> str:
     """
     content = content.rstrip()
     if not content:
     """
     content = content.rstrip()
     if not content:
-        return '#'
+        return "#"
 
 
-    if content[0] == '#':
+    if content[0] == "#":
         content = content[1:]
         content = content[1:]
-    if content and content[0] not in ' !:#':
-        content = ' ' + content
-    return '#' + content
+    if content and content[0] not in " !:#":
+        content = " " + content
+    return "#" + content
 
 
 def split_line(
 
 
 def split_line(
@@ -1439,23 +1698,24 @@ def split_line(
     If `py36` is True, splitting may generate syntax that is only compatible
     with Python 3.6 and later.
     """
     If `py36` is True, splitting may generate syntax that is only compatible
     with Python 3.6 and later.
     """
-    if isinstance(line, UnformattedLines):
+    if isinstance(line, UnformattedLines) or line.is_comment:
         yield line
         return
 
         yield line
         return
 
-    line_str = str(line).strip('\n')
-    if len(line_str) <= line_length and '\n' not in line_str:
+    line_str = str(line).strip("\n")
+    if (
+        len(line_str) <= line_length
+        and "\n" not in line_str  # multiline strings
+        and not line.contains_standalone_comments()
+    ):
         yield line
         return
 
         yield line
         return
 
+    split_funcs: List[SplitFunc]
     if line.is_def:
         split_funcs = [left_hand_split]
     elif line.inside_brackets:
     if line.is_def:
         split_funcs = [left_hand_split]
     elif line.inside_brackets:
-        split_funcs = [delimiter_split]
-        if '\n' not in line_str:
-            # Only attempt RHS if we don't have multiline strings or comments
-            # on this line.
-            split_funcs.append(right_hand_split)
+        split_funcs = [delimiter_split, standalone_comment_split, right_hand_split]
     else:
         split_funcs = [right_hand_split]
     for split_func in split_funcs:
     else:
         split_funcs = [right_hand_split]
     for split_func in split_funcs:
@@ -1464,8 +1724,8 @@ def split_line(
         # split altogether.
         result: List[Line] = []
         try:
         # split altogether.
         result: List[Line] = []
         try:
-            for l in split_func(line, py36=py36):
-                if str(l).strip('\n') == line_str:
+            for l in split_func(line, py36):
+                if str(l).strip("\n") == line_str:
                     raise CannotSplit("Split function returned an unchanged result")
 
                 result.extend(
                     raise CannotSplit("Split function returned an unchanged result")
 
                 result.extend(
@@ -1512,13 +1772,10 @@ def left_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
     if body_leaves:
         normalize_prefix(body_leaves[0], inside_brackets=True)
     # Build the new lines.
     if body_leaves:
         normalize_prefix(body_leaves[0], inside_brackets=True)
     # Build the new lines.
-    for result, leaves in (
-        (head, head_leaves), (body, body_leaves), (tail, tail_leaves)
-    ):
+    for result, leaves in (head, head_leaves), (body, body_leaves), (tail, tail_leaves):
         for leaf in leaves:
             result.append(leaf, preformatted=True)
         for leaf in leaves:
             result.append(leaf, preformatted=True)
-            comment_after = line.comments.get(id(leaf))
-            if comment_after:
+            for comment_after in line.comments_after(leaf):
                 result.append(comment_after, preformatted=True)
     bracket_split_succeeded_or_raise(head, body, tail)
     for result in (head, body, tail):
                 result.append(comment_after, preformatted=True)
     bracket_split_succeeded_or_raise(head, body, tail)
     for result in (head, body, tail):
@@ -1526,7 +1783,9 @@ def left_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
             yield result
 
 
             yield result
 
 
-def right_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
+def right_hand_split(
+    line: Line, py36: bool = False, omit: Collection[LeafID] = ()
+) -> Iterator[Line]:
     """Split line into many lines, starting with the last matching bracket pair."""
     head = Line(depth=line.depth)
     body = Line(depth=line.depth + 1, inside_brackets=True)
     """Split line into many lines, starting with the last matching bracket pair."""
     head = Line(depth=line.depth)
     body = Line(depth=line.depth + 1, inside_brackets=True)
@@ -1536,14 +1795,16 @@ def right_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
     head_leaves: List[Leaf] = []
     current_leaves = tail_leaves
     opening_bracket = None
     head_leaves: List[Leaf] = []
     current_leaves = tail_leaves
     opening_bracket = None
+    closing_bracket = None
     for leaf in reversed(line.leaves):
         if current_leaves is body_leaves:
             if leaf is opening_bracket:
                 current_leaves = head_leaves if body_leaves else tail_leaves
         current_leaves.append(leaf)
         if current_leaves is tail_leaves:
     for leaf in reversed(line.leaves):
         if current_leaves is body_leaves:
             if leaf is opening_bracket:
                 current_leaves = head_leaves if body_leaves else tail_leaves
         current_leaves.append(leaf)
         if current_leaves is tail_leaves:
-            if leaf.type in CLOSING_BRACKETS:
+            if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
                 opening_bracket = leaf.opening_bracket
                 opening_bracket = leaf.opening_bracket
+                closing_bracket = leaf
                 current_leaves = body_leaves
     tail_leaves.reverse()
     body_leaves.reverse()
                 current_leaves = body_leaves
     tail_leaves.reverse()
     body_leaves.reverse()
@@ -1551,16 +1812,36 @@ def right_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
     # Since body is a new indent level, remove spurious leading whitespace.
     if body_leaves:
         normalize_prefix(body_leaves[0], inside_brackets=True)
     # Since body is a new indent level, remove spurious leading whitespace.
     if body_leaves:
         normalize_prefix(body_leaves[0], inside_brackets=True)
+    elif not head_leaves:
+        # No `head` and no `body` means the split failed. `tail` has all content.
+        raise CannotSplit("No brackets found")
+
     # Build the new lines.
     # Build the new lines.
-    for result, leaves in (
-        (head, head_leaves), (body, body_leaves), (tail, tail_leaves)
-    ):
+    for result, leaves in (head, head_leaves), (body, body_leaves), (tail, tail_leaves):
         for leaf in leaves:
             result.append(leaf, preformatted=True)
         for leaf in leaves:
             result.append(leaf, preformatted=True)
-            comment_after = line.comments.get(id(leaf))
-            if comment_after:
+            for comment_after in line.comments_after(leaf):
                 result.append(comment_after, preformatted=True)
     bracket_split_succeeded_or_raise(head, body, tail)
                 result.append(comment_after, preformatted=True)
     bracket_split_succeeded_or_raise(head, body, tail)
+    assert opening_bracket and closing_bracket
+    if (
+        opening_bracket.type == token.LPAR
+        and not opening_bracket.value
+        and closing_bracket.type == token.RPAR
+        and not closing_bracket.value
+    ):
+        # These parens were optional. If there aren't any delimiters or standalone
+        # comments in the body, they were unnecessary and another split without
+        # them should be attempted.
+        if not (
+            body.bracket_tracker.delimiters or line.contains_standalone_comments(0)
+        ):
+            omit = {id(closing_bracket), *omit}
+            yield from right_hand_split(line, py36=py36, omit=omit)
+            return
+
+    ensure_visible(opening_bracket)
+    ensure_visible(closing_bracket)
     for result in (head, body, tail):
         if result:
             yield result
     for result in (head, body, tail):
         if result:
             yield result
@@ -1592,10 +1873,25 @@ def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None
             )
 
 
             )
 
 
+def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc:
+    """Normalize prefix of the first leaf in every line returned by `split_func`.
+
+    This is a decorator over relevant split functions.
+    """
+
+    @wraps(split_func)
+    def split_wrapper(line: Line, py36: bool = False) -> Iterator[Line]:
+        for l in split_func(line, py36):
+            normalize_prefix(l.leaves[0], inside_brackets=True)
+            yield l
+
+    return split_wrapper
+
+
+@dont_increase_indentation
 def delimiter_split(line: Line, py36: bool = False) -> Iterator[Line]:
     """Split according to delimiters of the highest priority.
 
 def delimiter_split(line: Line, py36: bool = False) -> Iterator[Line]:
     """Split according to delimiters of the highest priority.
 
-    This kind of split doesn't increase indentation.
     If `py36` is True, the split will add trailing commas also in function
     signatures that contain `*` and `**`.
     """
     If `py36` is True, the split will add trailing commas also in function
     signatures that contain `*` and `**`.
     """
@@ -1615,32 +1911,72 @@ def delimiter_split(line: Line, py36: bool = False) -> Iterator[Line]:
     current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     lowest_depth = sys.maxsize
     trailing_comma_safe = True
     current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     lowest_depth = sys.maxsize
     trailing_comma_safe = True
+
+    def append_to_line(leaf: Leaf) -> Iterator[Line]:
+        """Append `leaf` to current line or to new line if appending impossible."""
+        nonlocal current_line
+        try:
+            current_line.append_safe(leaf, preformatted=True)
+        except ValueError as ve:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+            current_line.append(leaf)
+
     for leaf in line.leaves:
     for leaf in line.leaves:
-        current_line.append(leaf, preformatted=True)
-        comment_after = line.comments.get(id(leaf))
-        if comment_after:
-            current_line.append(comment_after, preformatted=True)
+        yield from append_to_line(leaf)
+
+        for comment_after in line.comments_after(leaf):
+            yield from append_to_line(comment_after)
+
         lowest_depth = min(lowest_depth, leaf.bracket_depth)
         if (
             leaf.bracket_depth == lowest_depth
         lowest_depth = min(lowest_depth, leaf.bracket_depth)
         if (
             leaf.bracket_depth == lowest_depth
-            and leaf.type == token.STAR
-            or leaf.type == token.DOUBLESTAR
+            and is_vararg(leaf, within=VARARGS_PARENTS)
         ):
             trailing_comma_safe = trailing_comma_safe and py36
         leaf_priority = delimiters.get(id(leaf))
         if leaf_priority == delimiter_priority:
         ):
             trailing_comma_safe = trailing_comma_safe and py36
         leaf_priority = delimiters.get(id(leaf))
         if leaf_priority == delimiter_priority:
-            normalize_prefix(current_line.leaves[0], inside_brackets=True)
             yield current_line
 
             current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     if current_line:
         if (
             yield current_line
 
             current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
     if current_line:
         if (
-            delimiter_priority == COMMA_PRIORITY
+            trailing_comma_safe
+            and delimiter_priority == COMMA_PRIORITY
             and current_line.leaves[-1].type != token.COMMA
             and current_line.leaves[-1].type != token.COMMA
-            and trailing_comma_safe
+            and current_line.leaves[-1].type != STANDALONE_COMMENT
         ):
         ):
-            current_line.append(Leaf(token.COMMA, ','))
-        normalize_prefix(current_line.leaves[0], inside_brackets=True)
+            current_line.append(Leaf(token.COMMA, ","))
+        yield current_line
+
+
+@dont_increase_indentation
+def standalone_comment_split(line: Line, py36: bool = False) -> Iterator[Line]:
+    """Split standalone comments from the rest of the line."""
+    if not line.contains_standalone_comments(0):
+        raise CannotSplit("Line does not have any standalone comments")
+
+    current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+
+    def append_to_line(leaf: Leaf) -> Iterator[Line]:
+        """Append `leaf` to current line or to new line if appending impossible."""
+        nonlocal current_line
+        try:
+            current_line.append_safe(leaf, preformatted=True)
+        except ValueError as ve:
+            yield current_line
+
+            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
+            current_line.append(leaf)
+
+    for leaf in line.leaves:
+        yield from append_to_line(leaf)
+
+        for comment_after in line.comments_after(leaf):
+            yield from append_to_line(comment_after)
+
+    if current_line:
         yield current_line
 
 
         yield current_line
 
 
@@ -1652,8 +1988,8 @@ def is_import(leaf: Leaf) -> bool:
     return bool(
         t == token.NAME
         and (
     return bool(
         t == token.NAME
         and (
-            (v == 'import' and p and p.type == syms.import_name)
-            or (v == 'from' and p and p.type == syms.import_from)
+            (v == "import" and p and p.type == syms.import_name)
+            or (v == "from" and p and p.type == syms.import_from)
         )
     )
 
         )
     )
 
@@ -1665,15 +2001,209 @@ def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
     Note: don't use backslashes for formatting or you'll lose your voting rights.
     """
     if not inside_brackets:
     Note: don't use backslashes for formatting or you'll lose your voting rights.
     """
     if not inside_brackets:
-        spl = leaf.prefix.split('#')
-        if '\\' not in spl[0]:
-            nl_count = spl[-1].count('\n')
+        spl = leaf.prefix.split("#")
+        if "\\" not in spl[0]:
+            nl_count = spl[-1].count("\n")
             if len(spl) > 1:
                 nl_count -= 1
             if len(spl) > 1:
                 nl_count -= 1
-            leaf.prefix = '\n' * nl_count
+            leaf.prefix = "\n" * nl_count
             return
 
             return
 
-    leaf.prefix = ''
+    leaf.prefix = ""
+
+
+def normalize_string_quotes(leaf: Leaf) -> None:
+    """Prefer double quotes but only if it doesn't cause more escaping.
+
+    Adds or removes backslashes as appropriate. Doesn't parse and fix
+    strings nested in f-strings (yet).
+
+    Note: Mutates its argument.
+    """
+    value = leaf.value.lstrip("furbFURB")
+    if value[:3] == '"""':
+        return
+
+    elif value[:3] == "'''":
+        orig_quote = "'''"
+        new_quote = '"""'
+    elif value[0] == '"':
+        orig_quote = '"'
+        new_quote = "'"
+    else:
+        orig_quote = "'"
+        new_quote = '"'
+    first_quote_pos = leaf.value.find(orig_quote)
+    if first_quote_pos == -1:
+        return  # There's an internal error
+
+    prefix = leaf.value[:first_quote_pos]
+    unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
+    escaped_new_quote = re.compile(rf"([^\\]|^)\\(\\\\)*{new_quote}")
+    escaped_orig_quote = re.compile(rf"([^\\]|^)\\(\\\\)*{orig_quote}")
+    body = leaf.value[first_quote_pos + len(orig_quote):-len(orig_quote)]
+    if "r" in prefix.casefold():
+        if unescaped_new_quote.search(body):
+            # There's at least one unescaped new_quote in this raw string
+            # so converting is impossible
+            return
+
+        # Do not introduce or remove backslashes in raw strings
+        new_body = body
+    else:
+        # remove unnecessary quotes
+        new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
+        if body != new_body:
+            # Consider the string without unnecessary quotes as the original
+            body = new_body
+            leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}"
+        new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
+        new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
+    if new_quote == '"""' and new_body[-1] == '"':
+        # edge case:
+        new_body = new_body[:-1] + '\\"'
+    orig_escape_count = body.count("\\")
+    new_escape_count = new_body.count("\\")
+    if new_escape_count > orig_escape_count:
+        return  # Do not introduce more escaping
+
+    if new_escape_count == orig_escape_count and orig_quote == '"':
+        return  # Prefer double quotes
+
+    leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}"
+
+
+def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
+    """Make existing optional parentheses invisible or create new ones.
+
+    Standardizes on visible parentheses for single-element tuples, and keeps
+    existing visible parentheses for other tuples and generator expressions.
+    """
+    check_lpar = False
+    for child in list(node.children):
+        if check_lpar:
+            if child.type == syms.atom:
+                if not (
+                    is_empty_tuple(child)
+                    or is_one_tuple(child)
+                    or max_delimiter_priority_in_atom(child) >= COMMA_PRIORITY
+                ):
+                    first = child.children[0]
+                    last = child.children[-1]
+                    if first.type == token.LPAR and last.type == token.RPAR:
+                        # make parentheses invisible
+                        first.value = ""  # type: ignore
+                        last.value = ""  # type: ignore
+            elif is_one_tuple(child):
+                # wrap child in visible parentheses
+                lpar = Leaf(token.LPAR, "(")
+                rpar = Leaf(token.RPAR, ")")
+                index = child.remove() or 0
+                node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
+            else:
+                # wrap child in invisible parentheses
+                lpar = Leaf(token.LPAR, "")
+                rpar = Leaf(token.RPAR, "")
+                index = child.remove() or 0
+                node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
+
+        check_lpar = isinstance(child, Leaf) and child.value in parens_after
+
+
+def is_empty_tuple(node: LN) -> bool:
+    """Return True if `node` holds an empty tuple."""
+    return (
+        node.type == syms.atom
+        and len(node.children) == 2
+        and node.children[0].type == token.LPAR
+        and node.children[1].type == token.RPAR
+    )
+
+
+def is_one_tuple(node: LN) -> bool:
+    """Return True if `node` holds a tuple with one element, with or without parens."""
+    if node.type == syms.atom:
+        if len(node.children) != 3:
+            return False
+
+        lpar, gexp, rpar = node.children
+        if not (
+            lpar.type == token.LPAR
+            and gexp.type == syms.testlist_gexp
+            and rpar.type == token.RPAR
+        ):
+            return False
+
+        return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
+
+    return (
+        node.type in IMPLICIT_TUPLE
+        and len(node.children) == 2
+        and node.children[1].type == token.COMMA
+    )
+
+
+def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
+    """Return True if `leaf` is a star or double star in a vararg or kwarg.
+
+    If `within` includes VARARGS_PARENTS, this applies to function signatures.
+    If `within` includes COLLECTION_LIBERALS_PARENTS, it applies to right
+    hand-side extended iterable unpacking (PEP 3132) and additional unpacking
+    generalizations (PEP 448).
+    """
+    if leaf.type not in STARS or not leaf.parent:
+        return False
+
+    p = leaf.parent
+    if p.type == syms.star_expr:
+        # Star expressions are also used as assignment targets in extended
+        # iterable unpacking (PEP 3132).  See what its parent is instead.
+        if not p.parent:
+            return False
+
+        p = p.parent
+
+    return p.type in within
+
+
+def max_delimiter_priority_in_atom(node: LN) -> int:
+    """Return maximum delimiter priority inside `node`.
+
+    This is specific to atoms with contents contained in a pair of parentheses.
+    If `node` isn't an atom or there are no enclosing parentheses, returns 0.
+    """
+    if node.type != syms.atom:
+        return 0
+
+    first = node.children[0]
+    last = node.children[-1]
+    if not (first.type == token.LPAR and last.type == token.RPAR):
+        return 0
+
+    bt = BracketTracker()
+    for c in node.children[1:-1]:
+        if isinstance(c, Leaf):
+            bt.mark(c)
+        else:
+            for leaf in c.leaves():
+                bt.mark(leaf)
+    try:
+        return bt.max_delimiter_priority()
+
+    except ValueError:
+        return 0
+
+
+def ensure_visible(leaf: Leaf) -> None:
+    """Make sure parentheses are visible.
+
+    They could be invisible as part of some statements (see
+    :func:`normalize_invible_parens` and :func:`visit_import_from`).
+    """
+    if leaf.type == token.LPAR:
+        leaf.value = "("
+    elif leaf.type == token.RPAR:
+        leaf.value = ")"
 
 
 def is_python36(node: Node) -> bool:
 
 
 def is_python36(node: Node) -> bool:
@@ -1686,7 +2216,7 @@ def is_python36(node: Node) -> bool:
     for n in node.pre_order():
         if n.type == token.STRING:
             value_head = n.value[:2]  # type: ignore
     for n in node.pre_order():
         if n.type == token.STRING:
             value_head = n.value[:2]  # type: ignore
-            if value_head in {'f"', 'F"', "f'", "F'", 'rf', 'fr', 'RF', 'FR'}:
+            if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}:
                 return True
 
         elif (
                 return True
 
         elif (
@@ -1695,15 +2225,15 @@ def is_python36(node: Node) -> bool:
             and n.children[-1].type == token.COMMA
         ):
             for ch in n.children:
             and n.children[-1].type == token.COMMA
         ):
             for ch in n.children:
-                if ch.type == token.STAR or ch.type == token.DOUBLESTAR:
+                if ch.type in STARS:
                     return True
 
     return False
 
 
                     return True
 
     return False
 
 
-PYTHON_EXTENSIONS = {'.py'}
+PYTHON_EXTENSIONS = {".py"}
 BLACKLISTED_DIRECTORIES = {
 BLACKLISTED_DIRECTORIES = {
-    'build', 'buck-out', 'dist', '_build', '.git', '.hg', '.mypy_cache', '.tox', '.venv'
+    "build", "buck-out", "dist", "_build", ".git", ".hg", ".mypy_cache", ".tox", ".venv"
 }
 
 
 }
 
 
@@ -1726,23 +2256,30 @@ def gen_python_files_in_dir(path: Path) -> Iterator[Path]:
 class Report:
     """Provides a reformatting counter. Can be rendered with `str(report)`."""
     check: bool = False
 class Report:
     """Provides a reformatting counter. Can be rendered with `str(report)`."""
     check: bool = False
+    quiet: bool = False
     change_count: int = 0
     same_count: int = 0
     failure_count: int = 0
 
     change_count: int = 0
     same_count: int = 0
     failure_count: int = 0
 
-    def done(self, src: Path, changed: bool) -> None:
+    def done(self, src: Path, changed: Changed) -> None:
         """Increment the counter for successful reformatting. Write out a message."""
         """Increment the counter for successful reformatting. Write out a message."""
-        if changed:
-            reformatted = 'would reformat' if self.check else 'reformatted'
-            out(f'{reformatted} {src}')
+        if changed is Changed.YES:
+            reformatted = "would reformat" if self.check else "reformatted"
+            if not self.quiet:
+                out(f"{reformatted} {src}")
             self.change_count += 1
         else:
             self.change_count += 1
         else:
-            out(f'{src} already well formatted, good job.', bold=False)
+            if not self.quiet:
+                if changed is Changed.NO:
+                    msg = f"{src} already well formatted, good job."
+                else:
+                    msg = f"{src} wasn't modified on disk since last run."
+                out(msg, bold=False)
             self.same_count += 1
 
     def failed(self, src: Path, message: str) -> None:
         """Increment the counter for failed reformatting. Write out a message."""
             self.same_count += 1
 
     def failed(self, src: Path, message: str) -> None:
         """Increment the counter for failed reformatting. Write out a message."""
-        err(f'error: cannot format {src}: {message}')
+        err(f"error: cannot format {src}: {message}")
         self.failure_count += 1
 
     @property
         self.failure_count += 1
 
     @property
@@ -1779,19 +2316,19 @@ class Report:
             failed = "failed to reformat"
         report = []
         if self.change_count:
             failed = "failed to reformat"
         report = []
         if self.change_count:
-            s = 's' if self.change_count > 1 else ''
+            s = "s" if self.change_count > 1 else ""
             report.append(
             report.append(
-                click.style(f'{self.change_count} file{s} {reformatted}', bold=True)
+                click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
             )
         if self.same_count:
             )
         if self.same_count:
-            s = 's' if self.same_count > 1 else ''
-            report.append(f'{self.same_count} file{s} {unchanged}')
+            s = "s" if self.same_count > 1 else ""
+            report.append(f"{self.same_count} file{s} {unchanged}")
         if self.failure_count:
         if self.failure_count:
-            s = 's' if self.failure_count > 1 else ''
+            s = "s" if self.failure_count > 1 else ""
             report.append(
             report.append(
-                click.style(f'{self.failure_count} file{s} {failed}', fg='red')
+                click.style(f"{self.failure_count} file{s} {failed}", fg="red")
             )
             )
-        return ', '.join(report) + '.'
+        return ", ".join(report) + "."
 
 
 def assert_equivalent(src: str, dst: str) -> None:
 
 
 def assert_equivalent(src: str, dst: str) -> None:
@@ -1838,17 +2375,17 @@ def assert_equivalent(src: str, dst: str) -> None:
     try:
         dst_ast = ast.parse(dst)
     except Exception as exc:
     try:
         dst_ast = ast.parse(dst)
     except Exception as exc:
-        log = dump_to_file(''.join(traceback.format_tb(exc.__traceback__)), dst)
+        log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
         raise AssertionError(
             f"INTERNAL ERROR: Black produced invalid code: {exc}. "
             f"Please report a bug on https://github.com/ambv/black/issues.  "
             f"This invalid output might be helpful: {log}"
         ) from None
 
         raise AssertionError(
             f"INTERNAL ERROR: Black produced invalid code: {exc}. "
             f"Please report a bug on https://github.com/ambv/black/issues.  "
             f"This invalid output might be helpful: {log}"
         ) from None
 
-    src_ast_str = '\n'.join(_v(src_ast))
-    dst_ast_str = '\n'.join(_v(dst_ast))
+    src_ast_str = "\n".join(_v(src_ast))
+    dst_ast_str = "\n".join(_v(dst_ast))
     if src_ast_str != dst_ast_str:
     if src_ast_str != dst_ast_str:
-        log = dump_to_file(diff(src_ast_str, dst_ast_str, 'src', 'dst'))
+        log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
         raise AssertionError(
             f"INTERNAL ERROR: Black produced code that is not equivalent to "
             f"the source.  "
         raise AssertionError(
             f"INTERNAL ERROR: Black produced code that is not equivalent to "
             f"the source.  "
@@ -1862,8 +2399,8 @@ def assert_stable(src: str, dst: str, line_length: int) -> None:
     newdst = format_str(dst, line_length=line_length)
     if dst != newdst:
         log = dump_to_file(
     newdst = format_str(dst, line_length=line_length)
     if dst != newdst:
         log = dump_to_file(
-            diff(src, dst, 'source', 'first pass'),
-            diff(dst, newdst, 'first pass', 'second pass'),
+            diff(src, dst, "source", "first pass"),
+            diff(dst, newdst, "first pass", "second pass"),
         )
         raise AssertionError(
             f"INTERNAL ERROR: Black produced different code on the second pass "
         )
         raise AssertionError(
             f"INTERNAL ERROR: Black produced different code on the second pass "
@@ -1878,11 +2415,12 @@ def dump_to_file(*output: str) -> str:
     import tempfile
 
     with tempfile.NamedTemporaryFile(
     import tempfile
 
     with tempfile.NamedTemporaryFile(
-        mode='w', prefix='blk_', suffix='.log', delete=False
+        mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
     ) as f:
         for lines in output:
             f.write(lines)
     ) as f:
         for lines in output:
             f.write(lines)
-            f.write('\n')
+            if lines and lines[-1] != "\n":
+                f.write("\n")
     return f.name
 
 
     return f.name
 
 
@@ -1890,12 +2428,107 @@ def diff(a: str, b: str, a_name: str, b_name: str) -> str:
     """Return a unified diff string between strings `a` and `b`."""
     import difflib
 
     """Return a unified diff string between strings `a` and `b`."""
     import difflib
 
-    a_lines = [line + '\n' for line in a.split('\n')]
-    b_lines = [line + '\n' for line in b.split('\n')]
-    return ''.join(
+    a_lines = [line + "\n" for line in a.split("\n")]
+    b_lines = [line + "\n" for line in b.split("\n")]
+    return "".join(
         difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
     )
 
 
         difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
     )
 
 
-if __name__ == '__main__':
+def cancel(tasks: List[asyncio.Task]) -> None:
+    """asyncio signal handler that cancels all `tasks` and reports to stderr."""
+    err("Aborted!")
+    for task in tasks:
+        task.cancel()
+
+
+def shutdown(loop: BaseEventLoop) -> None:
+    """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
+    try:
+        # This part is borrowed from asyncio/runners.py in Python 3.7b2.
+        to_cancel = [task for task in asyncio.Task.all_tasks(loop) if not task.done()]
+        if not to_cancel:
+            return
+
+        for task in to_cancel:
+            task.cancel()
+        loop.run_until_complete(
+            asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
+        )
+    finally:
+        # `concurrent.futures.Future` objects cannot be cancelled once they
+        # are already running. There might be some when the `shutdown()` happened.
+        # Silence their logger's spew about the event loop being closed.
+        cf_logger = logging.getLogger("concurrent.futures")
+        cf_logger.setLevel(logging.CRITICAL)
+        loop.close()
+
+
+def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
+    """Replace `regex` with `replacement` twice on `original`.
+
+    This is used by string normalization to perform replaces on
+    overlapping matches.
+    """
+    return regex.sub(replacement, regex.sub(replacement, original))
+
+
+CACHE_DIR = Path(user_cache_dir("black", version=__version__))
+CACHE_FILE = CACHE_DIR / "cache.pickle"
+
+
+def read_cache() -> Cache:
+    """Read the cache if it exists and is well formed.
+
+    If it is not well formed, the call to write_cache later should resolve the issue.
+    """
+    if not CACHE_FILE.exists():
+        return {}
+
+    with CACHE_FILE.open("rb") as fobj:
+        try:
+            cache: Cache = pickle.load(fobj)
+        except pickle.UnpicklingError:
+            return {}
+
+    return cache
+
+
+def get_cache_info(path: Path) -> CacheInfo:
+    """Return the information used to check if a file is already formatted or not."""
+    stat = path.stat()
+    return stat.st_mtime, stat.st_size
+
+
+def filter_cached(
+    cache: Cache, sources: Iterable[Path]
+) -> Tuple[List[Path], List[Path]]:
+    """Split a list of paths into two.
+
+    The first list contains paths of files that modified on disk or are not in the
+    cache. The other list contains paths to non-modified files.
+    """
+    todo, done = [], []
+    for src in sources:
+        src = src.resolve()
+        if cache.get(src) != get_cache_info(src):
+            todo.append(src)
+        else:
+            done.append(src)
+    return todo, done
+
+
+def write_cache(cache: Cache, sources: List[Path]) -> None:
+    """Update the cache file."""
+    try:
+        if not CACHE_DIR.exists():
+            CACHE_DIR.mkdir(parents=True)
+        new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
+        with CACHE_FILE.open("wb") as fobj:
+            pickle.dump(new_cache, fobj, protocol=pickle.HIGHEST_PROTOCOL)
+    except OSError:
+        pass
+
+
+if __name__ == "__main__":
     main()
     main()