]> git.madduck.net Git - etc/vim.git/blobdiff - black.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Adding Jupyter Notebook magic command (#200)
[etc/vim.git] / black.py
index 58f7976aa60bc72b0ceb72b5a160a6107bf6637b..913fe8dfefaf496e57373e94d96b272caebf486d 100644 (file)
--- a/black.py
+++ b/black.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python3
-
 import asyncio
 import pickle
 from asyncio.base_events import BaseEventLoop
 import asyncio
 import pickle
 from asyncio.base_events import BaseEventLoop
@@ -43,8 +41,9 @@ from blib2to3 import pygram, pytree
 from blib2to3.pgen2 import driver, token
 from blib2to3.pgen2.parse import ParseError
 
 from blib2to3.pgen2 import driver, token
 from blib2to3.pgen2.parse import ParseError
 
-__version__ = "18.4a2"
+__version__ = "18.4a6"
 DEFAULT_LINE_LENGTH = 88
 DEFAULT_LINE_LENGTH = 88
+
 # types
 syms = pygram.python_symbols
 FileContent = str
 # types
 syms = pygram.python_symbols
 FileContent = str
@@ -88,11 +87,11 @@ class FormatError(Exception):
         self.consumed = consumed
 
     def trim_prefix(self, leaf: Leaf) -> None:
         self.consumed = consumed
 
     def trim_prefix(self, leaf: Leaf) -> None:
-        leaf.prefix = leaf.prefix[self.consumed:]
+        leaf.prefix = leaf.prefix[self.consumed :]
 
     def leaf_from_consumed(self, leaf: Leaf) -> Leaf:
         """Returns a new Leaf from the consumed part of the prefix."""
 
     def leaf_from_consumed(self, leaf: Leaf) -> Leaf:
         """Returns a new Leaf from the consumed part of the prefix."""
-        unformatted_prefix = leaf.prefix[:self.consumed]
+        unformatted_prefix = leaf.prefix[: self.consumed]
         return Leaf(token.NEWLINE, unformatted_prefix)
 
 
         return Leaf(token.NEWLINE, unformatted_prefix)
 
 
@@ -184,47 +183,46 @@ def main(
             sources.append(Path("-"))
         else:
             err(f"invalid path: {s}")
             sources.append(Path("-"))
         else:
             err(f"invalid path: {s}")
-    if check and diff:
-        exc = click.ClickException("Options --check and --diff are mutually exclusive")
-        exc.exit_code = 2
-        raise exc
 
 
-    if check:
+    if check and not diff:
         write_back = WriteBack.NO
     elif diff:
         write_back = WriteBack.DIFF
     else:
         write_back = WriteBack.YES
         write_back = WriteBack.NO
     elif diff:
         write_back = WriteBack.DIFF
     else:
         write_back = WriteBack.YES
+    report = Report(check=check, quiet=quiet)
     if len(sources) == 0:
     if len(sources) == 0:
+        out("No paths given. Nothing to do 😴")
         ctx.exit(0)
         return
 
     elif len(sources) == 1:
         ctx.exit(0)
         return
 
     elif len(sources) == 1:
-        return_code = reformat_one(sources[0], line_length, fast, quiet, write_back)
+        reformat_one(sources[0], line_length, fast, write_back, report)
     else:
         loop = asyncio.get_event_loop()
         executor = ProcessPoolExecutor(max_workers=os.cpu_count())
     else:
         loop = asyncio.get_event_loop()
         executor = ProcessPoolExecutor(max_workers=os.cpu_count())
-        return_code = 1
         try:
         try:
-            return_code = loop.run_until_complete(
+            loop.run_until_complete(
                 schedule_formatting(
                 schedule_formatting(
-                    sources, line_length, write_back, fast, quiet, loop, executor
+                    sources, line_length, fast, write_back, report, loop, executor
                 )
             )
         finally:
             shutdown(loop)
                 )
             )
         finally:
             shutdown(loop)
-    ctx.exit(return_code)
+        if not quiet:
+            out("All done! ✨ 🍰 ✨")
+            click.echo(str(report))
+    ctx.exit(report.return_code)
 
 
 def reformat_one(
 
 
 def reformat_one(
-    src: Path, line_length: int, fast: bool, quiet: bool, write_back: WriteBack
-) -> int:
+    src: Path, line_length: int, fast: bool, write_back: WriteBack, report: "Report"
+) -> None:
     """Reformat a single file under `src` without spawning child processes.
 
     If `quiet` is True, non-error messages are not output. `line_length`,
     `write_back`, and `fast` options are passed to :func:`format_file_in_place`.
     """
     """Reformat a single file under `src` without spawning child processes.
 
     If `quiet` is True, non-error messages are not output. `line_length`,
     `write_back`, and `fast` options are passed to :func:`format_file_in_place`.
     """
-    report = Report(check=write_back is WriteBack.NO, quiet=quiet)
     try:
         changed = Changed.NO
         if not src.is_file() and str(src) == "-":
     try:
         changed = Changed.NO
         if not src.is_file() and str(src) == "-":
@@ -235,7 +233,7 @@ def reformat_one(
         else:
             cache: Cache = {}
             if write_back != WriteBack.DIFF:
         else:
             cache: Cache = {}
             if write_back != WriteBack.DIFF:
-                cache = read_cache()
+                cache = read_cache(line_length)
                 src = src.resolve()
                 if src in cache and cache[src] == get_cache_info(src):
                     changed = Changed.CACHED
                 src = src.resolve()
                 if src in cache and cache[src] == get_cache_info(src):
                     changed = Changed.CACHED
@@ -246,23 +244,22 @@ def reformat_one(
                 )
             ):
                 changed = Changed.YES
                 )
             ):
                 changed = Changed.YES
-            if write_back != WriteBack.DIFF and changed is not Changed.NO:
-                write_cache(cache, [src])
+            if write_back == WriteBack.YES and changed is not Changed.NO:
+                write_cache(cache, [src], line_length)
         report.done(src, changed)
     except Exception as exc:
         report.failed(src, str(exc))
         report.done(src, changed)
     except Exception as exc:
         report.failed(src, str(exc))
-    return report.return_code
 
 
 async def schedule_formatting(
     sources: List[Path],
     line_length: int,
 
 
 async def schedule_formatting(
     sources: List[Path],
     line_length: int,
-    write_back: WriteBack,
     fast: bool,
     fast: bool,
-    quiet: bool,
+    write_back: WriteBack,
+    report: "Report",
     loop: BaseEventLoop,
     executor: Executor,
     loop: BaseEventLoop,
     executor: Executor,
-) -> int:
+) -> None:
     """Run formatting of `sources` in parallel using the provided `executor`.
 
     (Use ProcessPoolExecutors for actual parallelism.)
     """Run formatting of `sources` in parallel using the provided `executor`.
 
     (Use ProcessPoolExecutors for actual parallelism.)
@@ -270,10 +267,9 @@ async def schedule_formatting(
     `line_length`, `write_back`, and `fast` options are passed to
     :func:`format_file_in_place`.
     """
     `line_length`, `write_back`, and `fast` options are passed to
     :func:`format_file_in_place`.
     """
-    report = Report(check=write_back is WriteBack.NO, quiet=quiet)
     cache: Cache = {}
     if write_back != WriteBack.DIFF:
     cache: Cache = {}
     if write_back != WriteBack.DIFF:
-        cache = read_cache()
+        cache = read_cache(line_length)
         sources, cached = filter_cached(cache, sources)
         for src in cached:
             report.done(src, Changed.CACHED)
         sources, cached = filter_cached(cache, sources)
         for src in cached:
             report.done(src, Changed.CACHED)
@@ -315,15 +311,8 @@ async def schedule_formatting(
 
     if cancelled:
         await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
 
     if cancelled:
         await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
-    elif not quiet:
-        out("All done! ✨ 🍰 ✨")
-    if not quiet:
-        click.echo(str(report))
-
-    if write_back != WriteBack.DIFF and formatted:
-        write_cache(cache, formatted)
-
-    return report.return_code
+    if write_back == WriteBack.YES and formatted:
+        write_cache(cache, formatted, line_length)
 
 
 def format_file_in_place(
 
 
 def format_file_in_place(
@@ -352,8 +341,8 @@ def format_file_in_place(
         with open(src, "w", encoding=src_buffer.encoding) as f:
             f.write(dst_contents)
     elif write_back == write_back.DIFF:
         with open(src, "w", encoding=src_buffer.encoding) as f:
             f.write(dst_contents)
     elif write_back == write_back.DIFF:
-        src_name = f"{src.name}  (original)"
-        dst_name = f"{src.name}  (formatted)"
+        src_name = f"{src}  (original)"
+        dst_name = f"{src}  (formatted)"
         diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
         if lock:
             lock.acquire()
         diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
         if lock:
             lock.acquire()
@@ -420,9 +409,10 @@ def format_str(src_contents: str, line_length: int) -> FileContent:
     """
     src_node = lib2to3_parse(src_contents)
     dst_contents = ""
     """
     src_node = lib2to3_parse(src_contents)
     dst_contents = ""
-    lines = LineGenerator()
-    elt = EmptyLineTracker()
+    future_imports = get_future_imports(src_node)
     py36 = is_python36(src_node)
     py36 = is_python36(src_node)
+    lines = LineGenerator(remove_u_prefix=py36 or "unicode_literals" in future_imports)
+    elt = EmptyLineTracker()
     empty_line = Line()
     after = 0
     for current_line in lines.visit(src_node):
     empty_line = Line()
     after = 0
     for current_line in lines.visit(src_node):
@@ -439,7 +429,6 @@ def format_str(src_contents: str, line_length: int) -> FileContent:
 GRAMMARS = [
     pygram.python_grammar_no_print_statement_no_exec_statement,
     pygram.python_grammar_no_print_statement,
 GRAMMARS = [
     pygram.python_grammar_no_print_statement_no_exec_statement,
     pygram.python_grammar_no_print_statement,
-    pygram.python_grammar_no_exec_statement,
     pygram.python_grammar,
 ]
 
     pygram.python_grammar,
 ]
 
@@ -565,19 +554,20 @@ COMPARATORS = {
     token.GREATEREQUAL,
 }
 MATH_OPERATORS = {
     token.GREATEREQUAL,
 }
 MATH_OPERATORS = {
+    token.VBAR,
+    token.CIRCUMFLEX,
+    token.AMPER,
+    token.LEFTSHIFT,
+    token.RIGHTSHIFT,
     token.PLUS,
     token.MINUS,
     token.STAR,
     token.SLASH,
     token.PLUS,
     token.MINUS,
     token.STAR,
     token.SLASH,
-    token.VBAR,
-    token.AMPER,
+    token.DOUBLESLASH,
     token.PERCENT,
     token.PERCENT,
-    token.CIRCUMFLEX,
+    token.AT,
     token.TILDE,
     token.TILDE,
-    token.LEFTSHIFT,
-    token.RIGHTSHIFT,
     token.DOUBLESTAR,
     token.DOUBLESTAR,
-    token.DOUBLESLASH,
 }
 STARS = {token.STAR, token.DOUBLESTAR}
 VARARGS_PARENTS = {
 }
 STARS = {token.STAR, token.DOUBLESTAR}
 VARARGS_PARENTS = {
@@ -593,12 +583,61 @@ UNPACKING_PARENTS = {
     syms.listmaker,
     syms.testlist_gexp,
 }
     syms.listmaker,
     syms.testlist_gexp,
 }
+TEST_DESCENDANTS = {
+    syms.test,
+    syms.lambdef,
+    syms.or_test,
+    syms.and_test,
+    syms.not_test,
+    syms.comparison,
+    syms.star_expr,
+    syms.expr,
+    syms.xor_expr,
+    syms.and_expr,
+    syms.shift_expr,
+    syms.arith_expr,
+    syms.trailer,
+    syms.term,
+    syms.power,
+}
+ASSIGNMENTS = {
+    "=",
+    "+=",
+    "-=",
+    "*=",
+    "@=",
+    "/=",
+    "%=",
+    "&=",
+    "|=",
+    "^=",
+    "<<=",
+    ">>=",
+    "**=",
+    "//=",
+}
 COMPREHENSION_PRIORITY = 20
 COMPREHENSION_PRIORITY = 20
-COMMA_PRIORITY = 10
-LOGIC_PRIORITY = 5
-STRING_PRIORITY = 4
-COMPARATOR_PRIORITY = 3
-MATH_PRIORITY = 1
+COMMA_PRIORITY = 18
+TERNARY_PRIORITY = 16
+LOGIC_PRIORITY = 14
+STRING_PRIORITY = 12
+COMPARATOR_PRIORITY = 10
+MATH_PRIORITIES = {
+    token.VBAR: 8,
+    token.CIRCUMFLEX: 7,
+    token.AMPER: 6,
+    token.LEFTSHIFT: 5,
+    token.RIGHTSHIFT: 5,
+    token.PLUS: 4,
+    token.MINUS: 4,
+    token.STAR: 3,
+    token.SLASH: 3,
+    token.DOUBLESLASH: 3,
+    token.PERCENT: 3,
+    token.AT: 3,
+    token.TILDE: 2,
+    token.DOUBLESTAR: 1,
+}
 
 
 @dataclass
 
 
 @dataclass
@@ -609,8 +648,8 @@ class BracketTracker:
     bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
     delimiters: Dict[LeafID, Priority] = Factory(dict)
     previous: Optional[Leaf] = None
     bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict)
     delimiters: Dict[LeafID, Priority] = Factory(dict)
     previous: Optional[Leaf] = None
-    _for_loop_variable: bool = False
-    _lambda_arguments: bool = False
+    _for_loop_variable: int = 0
+    _lambda_arguments: int = 0
 
     def mark(self, leaf: Leaf) -> None:
         """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
 
     def mark(self, leaf: Leaf) -> None:
         """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
@@ -672,7 +711,7 @@ class BracketTracker:
         """
         if leaf.type == token.NAME and leaf.value == "for":
             self.depth += 1
         """
         if leaf.type == token.NAME and leaf.value == "for":
             self.depth += 1
-            self._for_loop_variable = True
+            self._for_loop_variable += 1
             return True
 
         return False
             return True
 
         return False
@@ -681,7 +720,7 @@ class BracketTracker:
         """See `maybe_increment_for_loop_variable` above for explanation."""
         if self._for_loop_variable and leaf.type == token.NAME and leaf.value == "in":
             self.depth -= 1
         """See `maybe_increment_for_loop_variable` above for explanation."""
         if self._for_loop_variable and leaf.type == token.NAME and leaf.value == "in":
             self.depth -= 1
-            self._for_loop_variable = False
+            self._for_loop_variable -= 1
             return True
 
         return False
             return True
 
         return False
@@ -694,7 +733,7 @@ class BracketTracker:
         """
         if leaf.type == token.NAME and leaf.value == "lambda":
             self.depth += 1
         """
         if leaf.type == token.NAME and leaf.value == "lambda":
             self.depth += 1
-            self._lambda_arguments = True
+            self._lambda_arguments += 1
             return True
 
         return False
             return True
 
         return False
@@ -703,11 +742,15 @@ class BracketTracker:
         """See `maybe_increment_lambda_arguments` above for explanation."""
         if self._lambda_arguments and leaf.type == token.COLON:
             self.depth -= 1
         """See `maybe_increment_lambda_arguments` above for explanation."""
         if self._lambda_arguments and leaf.type == token.COLON:
             self.depth -= 1
-            self._lambda_arguments = False
+            self._lambda_arguments -= 1
             return True
 
         return False
 
             return True
 
         return False
 
+    def get_open_lsqb(self) -> Optional[Leaf]:
+        """Return the most recent opening square bracket (if any)."""
+        return self.bracket_match.get((self.depth - 1, token.RSQB))
+
 
 @dataclass
 class Line:
 
 @dataclass
 class Line:
@@ -733,14 +776,17 @@ class Line:
         if not has_value:
             return
 
         if not has_value:
             return
 
+        if token.COLON == leaf.type and self.is_class_paren_empty:
+            del self.leaves[-2:]
         if self.leaves and not preformatted:
             # Note: at this point leaf.prefix should be empty except for
             # imports, for which we only preserve newlines.
         if self.leaves and not preformatted:
             # Note: at this point leaf.prefix should be empty except for
             # imports, for which we only preserve newlines.
-            leaf.prefix += whitespace(leaf)
+            leaf.prefix += whitespace(
+                leaf, complex_subscript=self.is_complex_subscript(leaf)
+            )
         if self.inside_brackets or not preformatted:
             self.bracket_tracker.mark(leaf)
             self.maybe_remove_trailing_comma(leaf)
         if self.inside_brackets or not preformatted:
             self.bracket_tracker.mark(leaf)
             self.maybe_remove_trailing_comma(leaf)
-
         if not self.append_comment(leaf):
             self.leaves.append(leaf)
 
         if not self.append_comment(leaf):
             self.leaves.append(leaf)
 
@@ -828,6 +874,22 @@ class Line:
             and self.leaves[0].value == "yield"
         )
 
             and self.leaves[0].value == "yield"
         )
 
+    @property
+    def is_class_paren_empty(self) -> bool:
+        """Is this a class with no base classes but using parentheses?
+
+        Those are unnecessary and should be removed.
+        """
+        return (
+            bool(self)
+            and len(self.leaves) == 4
+            and self.is_class
+            and self.leaves[2].type == token.LPAR
+            and self.leaves[2].value == "("
+            and self.leaves[3].type == token.RPAR
+            and self.leaves[3].value == ")"
+        )
+
     def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
         """If so, needs to be split before emitting."""
         for leaf in self.leaves:
     def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
         """If so, needs to be split before emitting."""
         for leaf in self.leaves:
@@ -856,9 +918,14 @@ class Line:
                 self.remove_trailing_comma()
                 return True
 
                 self.remove_trailing_comma()
                 return True
 
-        # For parens let's check if it's safe to remove the comma.  If the
-        # trailing one is the only one, we might mistakenly change a tuple
-        # into a different type by removing the comma.
+        # For parens let's check if it's safe to remove the comma.
+        # Imports are always safe.
+        if self.is_import:
+            self.remove_trailing_comma()
+            return True
+
+        # Otheriwsse, if the trailing one is the only one, we might mistakenly
+        # change a tuple into a different type by removing the comma.
         depth = closing.bracket_depth + 1
         commas = 0
         opening = closing.opening_bracket
         depth = closing.bracket_depth + 1
         commas = 0
         opening = closing.opening_bracket
@@ -869,7 +936,7 @@ class Line:
         else:
             return False
 
         else:
             return False
 
-        for leaf in self.leaves[_opening_index + 1:]:
+        for leaf in self.leaves[_opening_index + 1 :]:
             if leaf is closing:
                 break
 
             if leaf is closing:
                 break
 
@@ -930,6 +997,25 @@ class Line:
                 self.comments[i] = (comma_index - 1, comment)
         self.leaves.pop()
 
                 self.comments[i] = (comma_index - 1, comment)
         self.leaves.pop()
 
+    def is_complex_subscript(self, leaf: Leaf) -> bool:
+        """Return True iff `leaf` is part of a slice with non-trivial exprs."""
+        open_lsqb = (
+            leaf if leaf.type == token.LSQB else self.bracket_tracker.get_open_lsqb()
+        )
+        if open_lsqb is None:
+            return False
+
+        subscript_start = open_lsqb.next_sibling
+        if (
+            isinstance(subscript_start, Node)
+            and subscript_start.type == syms.subscriptlist
+        ):
+            subscript_start = child_towards(subscript_start, leaf)
+        return (
+            subscript_start is not None
+            and any(n.type in TEST_DESCENDANTS for n in subscript_start.pre_order())
+        )
+
     def __str__(self) -> str:
         """Render the line."""
         if not self:
     def __str__(self) -> str:
         """Render the line."""
         if not self:
@@ -1052,8 +1138,14 @@ class EmptyLineTracker:
                 # Don't insert empty lines before the first line in the file.
                 return 0, 0
 
                 # Don't insert empty lines before the first line in the file.
                 return 0, 0
 
-            if self.previous_line and self.previous_line.is_decorator:
-                # Don't insert empty lines between decorators.
+            if self.previous_line.is_decorator:
+                return 0, 0
+
+            if (
+                self.previous_line.is_comment
+                and self.previous_line.depth == current_line.depth
+                and before == 0
+            ):
                 return 0, 0
 
             newlines = 2
                 return 0, 0
 
             newlines = 2
@@ -1061,9 +1153,6 @@ class EmptyLineTracker:
                 newlines -= 1
             return newlines, 0
 
                 newlines -= 1
             return newlines, 0
 
-        if current_line.is_flow_control:
-            return before, 1
-
         if (
             self.previous_line
             and self.previous_line.is_import
         if (
             self.previous_line
             and self.previous_line.is_import
@@ -1072,13 +1161,6 @@ class EmptyLineTracker:
         ):
             return (before or 1), 0
 
         ):
             return (before or 1), 0
 
-        if (
-            self.previous_line
-            and self.previous_line.is_yield
-            and (not current_line.is_yield or depth != self.previous_line.depth)
-        ):
-            return (before or 1), 0
-
         return before, 0
 
 
         return before, 0
 
 
@@ -1090,6 +1172,7 @@ class LineGenerator(Visitor[Line]):
     in ways that will no longer stringify to valid Python code on the tree.
     """
     current_line: Line = Factory(Line)
     in ways that will no longer stringify to valid Python code on the tree.
     """
     current_line: Line = Factory(Line)
+    remove_u_prefix: bool = False
 
     def line(self, indent: int = 0, type: Type[Line] = Line) -> Iterator[Line]:
         """Generate a line.
 
     def line(self, indent: int = 0, type: Type[Line] = Line) -> Iterator[Line]:
         """Generate a line.
@@ -1157,6 +1240,7 @@ class LineGenerator(Visitor[Line]):
             else:
                 normalize_prefix(node, inside_brackets=any_open_brackets)
                 if node.type == token.STRING:
             else:
                 normalize_prefix(node, inside_brackets=any_open_brackets)
                 if node.type == token.STRING:
+                    normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
                     normalize_string_quotes(node)
                 if node.type not in WHITESPACE:
                     self.current_line.append(node)
                     normalize_string_quotes(node)
                 if node.type not in WHITESPACE:
                     self.current_line.append(node)
@@ -1170,7 +1254,16 @@ class LineGenerator(Visitor[Line]):
 
     def visit_DEDENT(self, node: Node) -> Iterator[Line]:
         """Decrease indentation level, maybe yield a line."""
 
     def visit_DEDENT(self, node: Node) -> Iterator[Line]:
         """Decrease indentation level, maybe yield a line."""
-        # DEDENT has no value. Additionally, in blib2to3 it never holds comments.
+        # The current line might still wait for trailing comments.  At DEDENT time
+        # there won't be any (they would be prefixes on the preceding NEWLINE).
+        # Emit the line then.
+        yield from self.line()
+
+        # While DEDENT has no value, its prefix may contain standalone comments
+        # that belong to the current indentation level.  Get 'em.
+        yield from self.visit_default(node)
+
+        # Finally, emit the dedent.
         yield from self.line(-1)
 
     def visit_stmt(
         yield from self.line(-1)
 
     def visit_stmt(
@@ -1179,14 +1272,13 @@ class LineGenerator(Visitor[Line]):
         """Visit a statement.
 
         This implementation is shared for `if`, `while`, `for`, `try`, `except`,
         """Visit a statement.
 
         This implementation is shared for `if`, `while`, `for`, `try`, `except`,
-        `def`, `with`, `class`, and `assert`.
+        `def`, `with`, `class`, `assert` and assignments.
 
         The relevant Python language `keywords` for a given statement will be
         NAME leaves within it. This methods puts those on a separate line.
 
 
         The relevant Python language `keywords` for a given statement will be
         NAME leaves within it. This methods puts those on a separate line.
 
-        `parens` holds pairs of nodes where invisible parentheses should be put.
-        Keys hold nodes after which opening parentheses should be put, values
-        hold nodes before which closing parentheses should be put.
+        `parens` holds a set of string leaf values immeditely after which
+        invisible parens should be put.
         """
         normalize_invisible_parens(node, parens_after=parens)
         for child in node.children:
         """
         normalize_invisible_parens(node, parens_after=parens)
         for child in node.children:
@@ -1286,7 +1378,9 @@ class LineGenerator(Visitor[Line]):
         v = self.visit_stmt
         Ø: Set[str] = set()
         self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
         v = self.visit_stmt
         Ø: Set[str] = set()
         self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
-        self.visit_if_stmt = partial(v, keywords={"if", "else", "elif"}, parens={"if"})
+        self.visit_if_stmt = partial(
+            v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
+        )
         self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
         self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
         self.visit_try_stmt = partial(
         self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
         self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
         self.visit_try_stmt = partial(
@@ -1296,6 +1390,8 @@ class LineGenerator(Visitor[Line]):
         self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
         self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
         self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
         self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
         self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
         self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
+        self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
+        self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
         self.visit_async_funcdef = self.visit_async_stmt
         self.visit_decorated = self.visit_decorators
 
         self.visit_async_funcdef = self.visit_async_stmt
         self.visit_decorated = self.visit_decorators
 
@@ -1308,8 +1404,12 @@ BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
 
 
 ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
 
 
-def whitespace(leaf: Leaf) -> str:  # noqa C901
-    """Return whitespace prefix if needed for the given `leaf`."""
+def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str:  # noqa C901
+    """Return whitespace prefix if needed for the given `leaf`.
+
+    `complex_subscript` signals whether the given leaf is part of a subscription
+    which has non-trivial arguments, like arithmetic expressions or function calls.
+    """
     NO = ""
     SPACE = " "
     DOUBLESPACE = "  "
     NO = ""
     SPACE = " "
     DOUBLESPACE = "  "
@@ -1323,7 +1423,10 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
         return DOUBLESPACE
 
     assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
         return DOUBLESPACE
 
     assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
-    if t == token.COLON and p.type not in {syms.subscript, syms.subscriptlist}:
+    if (
+        t == token.COLON
+        and p.type not in {syms.subscript, syms.subscriptlist, syms.sliceop}
+    ):
         return NO
 
     prev = leaf.prev_sibling
         return NO
 
     prev = leaf.prev_sibling
@@ -1333,7 +1436,13 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
             return NO
 
         if t == token.COLON:
             return NO
 
         if t == token.COLON:
-            return SPACE if prevp.type == token.COMMA else NO
+            if prevp.type == token.COLON:
+                return NO
+
+            elif prevp.type != token.COMMA and not complex_subscript:
+                return NO
+
+            return SPACE
 
         if prevp.type == token.EQUAL:
             if prevp.parent:
 
         if prevp.type == token.EQUAL:
             if prevp.parent:
@@ -1354,7 +1463,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
 
         elif prevp.type == token.COLON:
             if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
 
         elif prevp.type == token.COLON:
             if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
-                return NO
+                return SPACE if complex_subscript else NO
 
         elif (
             prevp.parent
 
         elif (
             prevp.parent
@@ -1460,7 +1569,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
         if prev and prev.type == token.LPAR:
             return NO
 
         if prev and prev.type == token.LPAR:
             return NO
 
-    elif p.type == syms.subscript:
+    elif p.type in {syms.subscript, syms.sliceop}:
         # indexing
         if not prev:
             assert p.parent is not None, "subscripts are always parented"
         # indexing
         if not prev:
             assert p.parent is not None, "subscripts are always parented"
@@ -1469,7 +1578,7 @@ def whitespace(leaf: Leaf) -> str:  # noqa C901
 
             return NO
 
 
             return NO
 
-        else:
+        elif not complex_subscript:
             return NO
 
     elif p.type == syms.atom:
             return NO
 
     elif p.type == syms.atom:
@@ -1539,6 +1648,14 @@ def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
     return None
 
 
     return None
 
 
+def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]:
+    """Return the child of `ancestor` that contains `descendant`."""
+    node: Optional[LN] = descendant
+    while node and node.parent != ancestor:
+        node = node.parent
+    return node
+
+
 def is_split_after_delimiter(leaf: Leaf, previous: Leaf = None) -> int:
     """Return the priority of the `leaf` delimiter, given a line break after it.
 
 def is_split_after_delimiter(leaf: Leaf, previous: Leaf = None) -> int:
     """Return the priority of the `leaf` delimiter, given a line break after it.
 
@@ -1571,7 +1688,7 @@ def is_split_before_delimiter(leaf: Leaf, previous: Leaf = None) -> int:
         and leaf.parent
         and leaf.parent.type not in {syms.factor, syms.star_expr}
     ):
         and leaf.parent
         and leaf.parent.type not in {syms.factor, syms.star_expr}
     ):
-        return MATH_PRIORITY
+        return MATH_PRIORITIES[leaf.type]
 
     if leaf.type in COMPARATORS:
         return COMPARATOR_PRIORITY
 
     if leaf.type in COMPARATORS:
         return COMPARATOR_PRIORITY
@@ -1599,6 +1716,14 @@ def is_split_before_delimiter(leaf: Leaf, previous: Leaf = None) -> int:
     ):
         return COMPREHENSION_PRIORITY
 
     ):
         return COMPREHENSION_PRIORITY
 
+    if (
+        leaf.type == token.NAME
+        and leaf.value in {"if", "else"}
+        and leaf.parent
+        and leaf.parent.type == syms.test
+    ):
+        return TERNARY_PRIORITY
+
     if leaf.type == token.NAME and leaf.value in LOGIC_OPERATORS and leaf.parent:
         return LOGIC_PRIORITY
 
     if leaf.type == token.NAME and leaf.value in LOGIC_OPERATORS and leaf.parent:
         return LOGIC_PRIORITY
 
@@ -1710,6 +1835,8 @@ def split_line(
     split_funcs: List[SplitFunc]
     if line.is_def:
         split_funcs = [left_hand_split]
     split_funcs: List[SplitFunc]
     if line.is_def:
         split_funcs = [left_hand_split]
+    elif line.is_import:
+        split_funcs = [explode_split]
     elif line.inside_brackets:
         split_funcs = [delimiter_split, standalone_comment_split, right_hand_split]
     else:
     elif line.inside_brackets:
         split_funcs = [delimiter_split, standalone_comment_split, right_hand_split]
     else:
@@ -1742,7 +1869,8 @@ def left_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
     """Split line into many lines, starting with the first matching bracket pair.
 
     Note: this usually looks weird, only use this for function definitions.
     """Split line into many lines, starting with the first matching bracket pair.
 
     Note: this usually looks weird, only use this for function definitions.
-    Prefer RHS otherwise.
+    Prefer RHS otherwise.  This is why this function is not symmetrical with
+    :func:`right_hand_split` which also handles optional parentheses.
     """
     head = Line(depth=line.depth)
     body = Line(depth=line.depth + 1, inside_brackets=True)
     """
     head = Line(depth=line.depth)
     body = Line(depth=line.depth + 1, inside_brackets=True)
@@ -1782,7 +1910,10 @@ def left_hand_split(line: Line, py36: bool = False) -> Iterator[Line]:
 def right_hand_split(
     line: Line, py36: bool = False, omit: Collection[LeafID] = ()
 ) -> Iterator[Line]:
 def right_hand_split(
     line: Line, py36: bool = False, omit: Collection[LeafID] = ()
 ) -> Iterator[Line]:
-    """Split line into many lines, starting with the last matching bracket pair."""
+    """Split line into many lines, starting with the last matching bracket pair.
+
+    If the split was by optional parentheses, attempt splitting without them, too.
+    """
     head = Line(depth=line.depth)
     body = Line(depth=line.depth + 1, inside_brackets=True)
     tail = Line(depth=line.depth)
     head = Line(depth=line.depth)
     body = Line(depth=line.depth + 1, inside_brackets=True)
     tail = Line(depth=line.depth)
@@ -1821,20 +1952,25 @@ def right_hand_split(
     bracket_split_succeeded_or_raise(head, body, tail)
     assert opening_bracket and closing_bracket
     if (
     bracket_split_succeeded_or_raise(head, body, tail)
     assert opening_bracket and closing_bracket
     if (
+        # the opening bracket is an optional paren
         opening_bracket.type == token.LPAR
         and not opening_bracket.value
         opening_bracket.type == token.LPAR
         and not opening_bracket.value
+        # the closing bracket is an optional paren
         and closing_bracket.type == token.RPAR
         and not closing_bracket.value
         and closing_bracket.type == token.RPAR
         and not closing_bracket.value
+        # there are no delimiters or standalone comments in the body
+        and not body.bracket_tracker.delimiters
+        and not line.contains_standalone_comments(0)
+        # and it's not an import (optional parens are the only thing we can split
+        # on in this case; attempting a split without them is a waste of time)
+        and not line.is_import
     ):
     ):
-        # These parens were optional. If there aren't any delimiters or standalone
-        # comments in the body, they were unnecessary and another split without
-        # them should be attempted.
-        if not (
-            body.bracket_tracker.delimiters or line.contains_standalone_comments(0)
-        ):
-            omit = {id(closing_bracket), *omit}
+        omit = {id(closing_bracket), *omit}
+        try:
             yield from right_hand_split(line, py36=py36, omit=omit)
             return
             yield from right_hand_split(line, py36=py36, omit=omit)
             return
+        except CannotSplit:
+            pass
 
     ensure_visible(opening_bracket)
     ensure_visible(closing_bracket)
 
     ensure_visible(opening_bracket)
     ensure_visible(closing_bracket)
@@ -1976,6 +2112,26 @@ def standalone_comment_split(line: Line, py36: bool = False) -> Iterator[Line]:
         yield current_line
 
 
         yield current_line
 
 
+def explode_split(
+    line: Line, py36: bool = False, omit: Collection[LeafID] = ()
+) -> Iterator[Line]:
+    """Split by rightmost bracket and immediately split contents by a delimiter."""
+    new_lines = list(right_hand_split(line, py36, omit))
+    if len(new_lines) != 3:
+        yield from new_lines
+        return
+
+    yield new_lines[0]
+
+    try:
+        yield from delimiter_split(new_lines[1], py36)
+
+    except CannotSplit:
+        yield new_lines[1]
+
+    yield new_lines[2]
+
+
 def is_import(leaf: Leaf) -> bool:
     """Return True if the given leaf starts an import statement."""
     p = leaf.parent
 def is_import(leaf: Leaf) -> bool:
     """Return True if the given leaf starts an import statement."""
     p = leaf.parent
@@ -2008,6 +2164,22 @@ def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
     leaf.prefix = ""
 
 
     leaf.prefix = ""
 
 
+def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None:
+    """Make all string prefixes lowercase.
+
+    If remove_u_prefix is given, also removes any u prefix from the string.
+
+    Note: Mutates its argument.
+    """
+    match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL)
+    assert match is not None, f"failed to match string {leaf.value!r}"
+    orig_prefix = match.group(1)
+    new_prefix = orig_prefix.lower()
+    if remove_u_prefix:
+        new_prefix = new_prefix.replace("u", "")
+    leaf.value = f"{new_prefix}{match.group(2)}"
+
+
 def normalize_string_quotes(leaf: Leaf) -> None:
     """Prefer double quotes but only if it doesn't cause more escaping.
 
 def normalize_string_quotes(leaf: Leaf) -> None:
     """Prefer double quotes but only if it doesn't cause more escaping.
 
@@ -2037,7 +2209,7 @@ def normalize_string_quotes(leaf: Leaf) -> None:
     unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
     escaped_new_quote = re.compile(rf"([^\\]|^)\\(\\\\)*{new_quote}")
     escaped_orig_quote = re.compile(rf"([^\\]|^)\\(\\\\)*{orig_quote}")
     unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
     escaped_new_quote = re.compile(rf"([^\\]|^)\\(\\\\)*{new_quote}")
     escaped_orig_quote = re.compile(rf"([^\\]|^)\\(\\\\)*{orig_quote}")
-    body = leaf.value[first_quote_pos + len(orig_quote):-len(orig_quote)]
+    body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)]
     if "r" in prefix.casefold():
         if unescaped_new_quote.search(body):
             # There's at least one unescaped new_quote in this raw string
     if "r" in prefix.casefold():
         if unescaped_new_quote.search(body):
             # There's at least one unescaped new_quote in this raw string
@@ -2072,6 +2244,9 @@ def normalize_string_quotes(leaf: Leaf) -> None:
 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
     """Make existing optional parentheses invisible or create new ones.
 
 def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
     """Make existing optional parentheses invisible or create new ones.
 
+    `parens_after` is a set of string leaf values immeditely after which parens
+    should be put.
+
     Standardizes on visible parentheses for single-element tuples, and keeps
     existing visible parentheses for other tuples and generator expressions.
     """
     Standardizes on visible parentheses for single-element tuples, and keeps
     existing visible parentheses for other tuples and generator expressions.
     """
@@ -2079,17 +2254,7 @@ def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
     for child in list(node.children):
         if check_lpar:
             if child.type == syms.atom:
     for child in list(node.children):
         if check_lpar:
             if child.type == syms.atom:
-                if not (
-                    is_empty_tuple(child)
-                    or is_one_tuple(child)
-                    or max_delimiter_priority_in_atom(child) >= COMMA_PRIORITY
-                ):
-                    first = child.children[0]
-                    last = child.children[-1]
-                    if first.type == token.LPAR and last.type == token.RPAR:
-                        # make parentheses invisible
-                        first.value = ""  # type: ignore
-                        last.value = ""  # type: ignore
+                maybe_make_parens_invisible_in_atom(child)
             elif is_one_tuple(child):
                 # wrap child in visible parentheses
                 lpar = Leaf(token.LPAR, "(")
             elif is_one_tuple(child):
                 # wrap child in visible parentheses
                 lpar = Leaf(token.LPAR, "(")
@@ -2106,6 +2271,30 @@ def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None:
         check_lpar = isinstance(child, Leaf) and child.value in parens_after
 
 
         check_lpar = isinstance(child, Leaf) and child.value in parens_after
 
 
+def maybe_make_parens_invisible_in_atom(node: LN) -> bool:
+    """If it's safe, make the parens in the atom `node` invisible, recusively."""
+    if (
+        node.type != syms.atom
+        or is_empty_tuple(node)
+        or is_one_tuple(node)
+        or is_yield(node)
+        or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
+    ):
+        return False
+
+    first = node.children[0]
+    last = node.children[-1]
+    if first.type == token.LPAR and last.type == token.RPAR:
+        # make parentheses invisible
+        first.value = ""  # type: ignore
+        last.value = ""  # type: ignore
+        if len(node.children) > 1:
+            maybe_make_parens_invisible_in_atom(node.children[1])
+        return True
+
+    return False
+
+
 def is_empty_tuple(node: LN) -> bool:
     """Return True if `node` holds an empty tuple."""
     return (
 def is_empty_tuple(node: LN) -> bool:
     """Return True if `node` holds an empty tuple."""
     return (
@@ -2139,12 +2328,33 @@ def is_one_tuple(node: LN) -> bool:
     )
 
 
     )
 
 
+def is_yield(node: LN) -> bool:
+    """Return True if `node` holds a `yield` or `yield from` expression."""
+    if node.type == syms.yield_expr:
+        return True
+
+    if node.type == token.NAME and node.value == "yield":  # type: ignore
+        return True
+
+    if node.type != syms.atom:
+        return False
+
+    if len(node.children) != 3:
+        return False
+
+    lpar, expr, rpar = node.children
+    if lpar.type == token.LPAR and rpar.type == token.RPAR:
+        return is_yield(expr)
+
+    return False
+
+
 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
     """Return True if `leaf` is a star or double star in a vararg or kwarg.
 
     If `within` includes VARARGS_PARENTS, this applies to function signatures.
 def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool:
     """Return True if `leaf` is a star or double star in a vararg or kwarg.
 
     If `within` includes VARARGS_PARENTS, this applies to function signatures.
-    If `within` includes COLLECTION_LIBERALS_PARENTS, it applies to right
-    hand-side extended iterable unpacking (PEP 3132) and additional unpacking
+    If `within` includes UNPACKING_PARENTS, it applies to right hand-side
+    extended iterable unpacking (PEP 3132) and additional unpacking
     generalizations (PEP 448).
     """
     if leaf.type not in STARS or not leaf.parent:
     generalizations (PEP 448).
     """
     if leaf.type not in STARS or not leaf.parent:
@@ -2207,7 +2417,7 @@ def is_python36(node: Node) -> bool:
 
     Currently looking for:
     - f-strings; and
 
     Currently looking for:
     - f-strings; and
-    - trailing commas after * or ** in function signatures.
+    - trailing commas after * or ** in function signatures and calls.
     """
     for n in node.pre_order():
         if n.type == token.STRING:
     """
     for n in node.pre_order():
         if n.type == token.STRING:
@@ -2216,7 +2426,7 @@ def is_python36(node: Node) -> bool:
                 return True
 
         elif (
                 return True
 
         elif (
-            n.type == syms.typedargslist
+            n.type in {syms.typedargslist, syms.arglist}
             and n.children
             and n.children[-1].type == token.COMMA
         ):
             and n.children
             and n.children[-1].type == token.COMMA
         ):
@@ -2224,9 +2434,49 @@ def is_python36(node: Node) -> bool:
                 if ch.type in STARS:
                     return True
 
                 if ch.type in STARS:
                     return True
 
+                if ch.type == syms.argument:
+                    for argch in ch.children:
+                        if argch.type in STARS:
+                            return True
+
     return False
 
 
     return False
 
 
+def get_future_imports(node: Node) -> Set[str]:
+    """Return a set of __future__ imports in the file."""
+    imports = set()
+    for child in node.children:
+        if child.type != syms.simple_stmt:
+            break
+        first_child = child.children[0]
+        if isinstance(first_child, Leaf):
+            # Continue looking if we see a docstring; otherwise stop.
+            if (
+                len(child.children) == 2
+                and first_child.type == token.STRING
+                and child.children[1].type == token.NEWLINE
+            ):
+                continue
+            else:
+                break
+        elif first_child.type == syms.import_from:
+            module_name = first_child.children[1]
+            if not isinstance(module_name, Leaf) or module_name.value != "__future__":
+                break
+            for import_from_child in first_child.children[3:]:
+                if isinstance(import_from_child, Leaf):
+                    if import_from_child.type == token.NAME:
+                        imports.add(import_from_child.value)
+                else:
+                    assert import_from_child.type == syms.import_as_names
+                    for leaf in import_from_child.children:
+                        if isinstance(leaf, Leaf) and leaf.type == token.NAME:
+                            imports.add(leaf.value)
+        else:
+            break
+    return imports
+
+
 PYTHON_EXTENSIONS = {".py"}
 BLACKLISTED_DIRECTORIES = {
     "build", "buck-out", "dist", "_build", ".git", ".hg", ".mypy_cache", ".tox", ".venv"
 PYTHON_EXTENSIONS = {".py"}
 BLACKLISTED_DIRECTORIES = {
     "build", "buck-out", "dist", "_build", ".git", ".hg", ".mypy_cache", ".tox", ".venv"
@@ -2470,18 +2720,22 @@ def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
 
 
 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
 
 
 CACHE_DIR = Path(user_cache_dir("black", version=__version__))
-CACHE_FILE = CACHE_DIR / "cache.pickle"
 
 
 
 
-def read_cache() -> Cache:
+def get_cache_file(line_length: int) -> Path:
+    return CACHE_DIR / f"cache.{line_length}.pickle"
+
+
+def read_cache(line_length: int) -> Cache:
     """Read the cache if it exists and is well formed.
 
     If it is not well formed, the call to write_cache later should resolve the issue.
     """
     """Read the cache if it exists and is well formed.
 
     If it is not well formed, the call to write_cache later should resolve the issue.
     """
-    if not CACHE_FILE.exists():
+    cache_file = get_cache_file(line_length)
+    if not cache_file.exists():
         return {}
 
         return {}
 
-    with CACHE_FILE.open("rb") as fobj:
+    with cache_file.open("rb") as fobj:
         try:
             cache: Cache = pickle.load(fobj)
         except pickle.UnpicklingError:
         try:
             cache: Cache = pickle.load(fobj)
         except pickle.UnpicklingError:
@@ -2514,13 +2768,14 @@ def filter_cached(
     return todo, done
 
 
     return todo, done
 
 
-def write_cache(cache: Cache, sources: List[Path]) -> None:
+def write_cache(cache: Cache, sources: List[Path], line_length: int) -> None:
     """Update the cache file."""
     """Update the cache file."""
+    cache_file = get_cache_file(line_length)
     try:
         if not CACHE_DIR.exists():
             CACHE_DIR.mkdir(parents=True)
         new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
     try:
         if not CACHE_DIR.exists():
             CACHE_DIR.mkdir(parents=True)
         new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
-        with CACHE_FILE.open("wb") as fobj:
+        with cache_file.open("wb") as fobj:
             pickle.dump(new_cache, fobj, protocol=pickle.HIGHEST_PROTOCOL)
     except OSError:
         pass
             pickle.dump(new_cache, fobj, protocol=pickle.HIGHEST_PROTOCOL)
     except OSError:
         pass