+def cancel(tasks: Iterable[asyncio.Task]) -> None:
+ """asyncio signal handler that cancels all `tasks` and reports to stderr."""
+ err("Aborted!")
+ for task in tasks:
+ task.cancel()
+
+
+def shutdown(loop: BaseEventLoop) -> None:
+ """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
+ try:
+ # This part is borrowed from asyncio/runners.py in Python 3.7b2.
+ to_cancel = [task for task in asyncio.Task.all_tasks(loop) if not task.done()]
+ if not to_cancel:
+ return
+
+ for task in to_cancel:
+ task.cancel()
+ loop.run_until_complete(
+ asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
+ )
+ finally:
+ # `concurrent.futures.Future` objects cannot be cancelled once they
+ # are already running. There might be some when the `shutdown()` happened.
+ # Silence their logger's spew about the event loop being closed.
+ cf_logger = logging.getLogger("concurrent.futures")
+ cf_logger.setLevel(logging.CRITICAL)
+ loop.close()
+
+
+def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
+ """Replace `regex` with `replacement` twice on `original`.
+
+ This is used by string normalization to perform replaces on
+ overlapping matches.
+ """
+ return regex.sub(replacement, regex.sub(replacement, original))
+
+
+def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]:
+ """Like `reversed(enumerate(sequence))` if that were possible."""
+ index = len(sequence) - 1
+ for element in reversed(sequence):
+ yield (index, element)
+ index -= 1
+
+
+def enumerate_with_length(
+ line: Line, reversed: bool = False
+) -> Iterator[Tuple[Index, Leaf, int]]:
+ """Return an enumeration of leaves with their length.
+
+ Stops prematurely on multiline strings and standalone comments.
+ """
+ op = cast(
+ Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]],
+ enumerate_reversed if reversed else enumerate,
+ )
+ for index, leaf in op(line.leaves):
+ length = len(leaf.prefix) + len(leaf.value)
+ if "\n" in leaf.value:
+ return # Multiline strings, we can't continue.
+
+ comment: Optional[Leaf]
+ for comment in line.comments_after(leaf, index):
+ length += len(comment.value)
+
+ yield index, leaf, length
+
+
+def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
+ """Return True if `line` is no longer than `line_length`.
+
+ Uses the provided `line_str` rendering, if any, otherwise computes a new one.
+ """
+ if not line_str:
+ line_str = str(line).strip("\n")
+ return (
+ len(line_str) <= line_length
+ and "\n" not in line_str # multiline strings
+ and not line.contains_standalone_comments()
+ )
+
+
+def can_be_split(line: Line) -> bool:
+ """Return False if the line cannot be split *for sure*.
+
+ This is not an exhaustive search but a cheap heuristic that we can use to
+ avoid some unfortunate formattings (mostly around wrapping unsplittable code
+ in unnecessary parentheses).
+ """
+ leaves = line.leaves
+ if len(leaves) < 2:
+ return False
+
+ if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
+ call_count = 0
+ dot_count = 0
+ next = leaves[-1]
+ for leaf in leaves[-2::-1]:
+ if leaf.type in OPENING_BRACKETS:
+ if next.type not in CLOSING_BRACKETS:
+ return False
+
+ call_count += 1
+ elif leaf.type == token.DOT:
+ dot_count += 1
+ elif leaf.type == token.NAME:
+ if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
+ return False
+
+ elif leaf.type not in CLOSING_BRACKETS:
+ return False
+
+ if dot_count > 1 and call_count > 1:
+ return False
+
+ return True
+
+
+def can_omit_invisible_parens(line: Line, line_length: int) -> bool:
+ """Does `line` have a shape safe to reformat without optional parens around it?
+
+ Returns True for only a subset of potentially nice looking formattings but
+ the point is to not return false positives that end up producing lines that
+ are too long.
+ """
+ bt = line.bracket_tracker
+ if not bt.delimiters:
+ # Without delimiters the optional parentheses are useless.
+ return True
+
+ max_priority = bt.max_delimiter_priority()
+ if bt.delimiter_count_with_priority(max_priority) > 1:
+ # With more than one delimiter of a kind the optional parentheses read better.
+ return False
+
+ if max_priority == DOT_PRIORITY:
+ # A single stranded method call doesn't require optional parentheses.
+ return True
+
+ assert len(line.leaves) >= 2, "Stranded delimiter"
+
+ first = line.leaves[0]
+ second = line.leaves[1]
+ penultimate = line.leaves[-2]
+ last = line.leaves[-1]
+
+ # With a single delimiter, omit if the expression starts or ends with
+ # a bracket.
+ if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
+ remainder = False
+ length = 4 * line.depth
+ for _index, leaf, leaf_length in enumerate_with_length(line):
+ if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
+ remainder = True
+ if remainder:
+ length += leaf_length
+ if length > line_length:
+ break
+
+ if leaf.type in OPENING_BRACKETS:
+ # There are brackets we can further split on.
+ remainder = False
+
+ else:
+ # checked the entire string and line length wasn't exceeded
+ if len(line.leaves) == _index + 1:
+ return True
+
+ # Note: we are not returning False here because a line might have *both*
+ # a leading opening bracket and a trailing closing bracket. If the
+ # opening bracket doesn't match our rule, maybe the closing will.
+
+ if (
+ last.type == token.RPAR
+ or last.type == token.RBRACE
+ or (
+ # don't use indexing for omitting optional parentheses;
+ # it looks weird
+ last.type == token.RSQB
+ and last.parent
+ and last.parent.type != syms.trailer
+ )
+ ):
+ if penultimate.type in OPENING_BRACKETS:
+ # Empty brackets don't help.
+ return False
+
+ if is_multiline_string(first):
+ # Additional wrapping of a multiline string in this situation is
+ # unnecessary.
+ return True
+
+ length = 4 * line.depth
+ seen_other_brackets = False
+ for _index, leaf, leaf_length in enumerate_with_length(line):
+ length += leaf_length
+ if leaf is last.opening_bracket:
+ if seen_other_brackets or length <= line_length:
+ return True
+
+ elif leaf.type in OPENING_BRACKETS:
+ # There are brackets we can further split on.
+ seen_other_brackets = True
+
+ return False
+
+
+def get_cache_file(line_length: int, mode: FileMode) -> Path:
+ pyi = bool(mode & FileMode.PYI)
+ py36 = bool(mode & FileMode.PYTHON36)
+ return (
+ CACHE_DIR
+ / f"cache.{line_length}{'.pyi' if pyi else ''}{'.py36' if py36 else ''}.pickle"
+ )
+
+
+def read_cache(line_length: int, mode: FileMode) -> Cache:
+ """Read the cache if it exists and is well formed.
+
+ If it is not well formed, the call to write_cache later should resolve the issue.
+ """
+ cache_file = get_cache_file(line_length, mode)
+ if not cache_file.exists():
+ return {}
+
+ with cache_file.open("rb") as fobj:
+ try:
+ cache: Cache = pickle.load(fobj)
+ except pickle.UnpicklingError:
+ return {}
+
+ return cache
+
+
+def get_cache_info(path: Path) -> CacheInfo:
+ """Return the information used to check if a file is already formatted or not."""
+ stat = path.stat()
+ return stat.st_mtime, stat.st_size
+
+
+def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
+ """Split an iterable of paths in `sources` into two sets.
+
+ The first contains paths of files that modified on disk or are not in the
+ cache. The other contains paths to non-modified files.
+ """
+ todo, done = set(), set()
+ for src in sources:
+ src = src.resolve()
+ if cache.get(src) != get_cache_info(src):
+ todo.add(src)
+ else:
+ done.add(src)
+ return todo, done
+
+
+def write_cache(
+ cache: Cache, sources: Iterable[Path], line_length: int, mode: FileMode
+) -> None:
+ """Update the cache file."""
+ cache_file = get_cache_file(line_length, mode)
+ try:
+ if not CACHE_DIR.exists():
+ CACHE_DIR.mkdir(parents=True)
+ new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
+ with cache_file.open("wb") as fobj:
+ pickle.dump(new_cache, fobj, protocol=pickle.HIGHEST_PROTOCOL)
+ except OSError:
+ pass
+
+
+if __name__ == "__main__":