]> git.madduck.net Git - etc/vim.git/blob - src/black/linegen.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

2f50257a9305b0203ee662e4d5d3187bbe20741d
[etc/vim.git] / src / black / linegen.py
1 """
2 Generating lines of code.
3 """
4 import sys
5 from dataclasses import dataclass
6 from enum import Enum, auto
7 from functools import partial, wraps
8 from typing import Collection, Iterator, List, Optional, Set, Union, cast
9
10 from black.brackets import (
11     COMMA_PRIORITY,
12     DOT_PRIORITY,
13     get_leaves_inside_matching_brackets,
14     max_delimiter_priority_in_atom,
15 )
16 from black.comments import FMT_OFF, generate_comments, list_comments
17 from black.lines import (
18     Line,
19     append_leaves,
20     can_be_split,
21     can_omit_invisible_parens,
22     is_line_short_enough,
23     line_to_string,
24 )
25 from black.mode import Feature, Mode, Preview
26 from black.nodes import (
27     ASSIGNMENTS,
28     BRACKETS,
29     CLOSING_BRACKETS,
30     OPENING_BRACKETS,
31     RARROW,
32     STANDALONE_COMMENT,
33     STATEMENT,
34     WHITESPACE,
35     Visitor,
36     ensure_visible,
37     is_arith_like,
38     is_atom_with_invisible_parens,
39     is_docstring,
40     is_empty_tuple,
41     is_lpar_token,
42     is_multiline_string,
43     is_name_token,
44     is_one_sequence_between,
45     is_one_tuple,
46     is_rpar_token,
47     is_stub_body,
48     is_stub_suite,
49     is_tuple_containing_walrus,
50     is_vararg,
51     is_walrus_assignment,
52     is_yield,
53     syms,
54     wrap_in_parentheses,
55 )
56 from black.numerics import normalize_numeric_literal
57 from black.strings import (
58     fix_docstring,
59     get_string_prefix,
60     normalize_string_prefix,
61     normalize_string_quotes,
62 )
63 from black.trans import (
64     CannotTransform,
65     StringMerger,
66     StringParenStripper,
67     StringParenWrapper,
68     StringSplitter,
69     Transformer,
70     hug_power_op,
71 )
72 from blib2to3.pgen2 import token
73 from blib2to3.pytree import Leaf, Node
74
75 # types
76 LeafID = int
77 LN = Union[Leaf, Node]
78
79
80 class CannotSplit(CannotTransform):
81     """A readable split that fits the allotted line length is impossible."""
82
83
84 # This isn't a dataclass because @dataclass + Generic breaks mypyc.
85 # See also https://github.com/mypyc/mypyc/issues/827.
86 class LineGenerator(Visitor[Line]):
87     """Generates reformatted Line objects.  Empty lines are not emitted.
88
89     Note: destroys the tree it's visiting by mutating prefixes of its leaves
90     in ways that will no longer stringify to valid Python code on the tree.
91     """
92
93     def __init__(self, mode: Mode, features: Collection[Feature]) -> None:
94         self.mode = mode
95         self.features = features
96         self.current_line: Line
97         self.__post_init__()
98
99     def line(self, indent: int = 0) -> Iterator[Line]:
100         """Generate a line.
101
102         If the line is empty, only emit if it makes sense.
103         If the line is too long, split it first and then generate.
104
105         If any lines were generated, set up a new current_line.
106         """
107         if not self.current_line:
108             self.current_line.depth += indent
109             return  # Line is empty, don't emit. Creating a new one unnecessary.
110
111         complete_line = self.current_line
112         self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
113         yield complete_line
114
115     def visit_default(self, node: LN) -> Iterator[Line]:
116         """Default `visit_*()` implementation. Recurses to children of `node`."""
117         if isinstance(node, Leaf):
118             any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
119             for comment in generate_comments(node, preview=self.mode.preview):
120                 if any_open_brackets:
121                     # any comment within brackets is subject to splitting
122                     self.current_line.append(comment)
123                 elif comment.type == token.COMMENT:
124                     # regular trailing comment
125                     self.current_line.append(comment)
126                     yield from self.line()
127
128                 else:
129                     # regular standalone comment
130                     yield from self.line()
131
132                     self.current_line.append(comment)
133                     yield from self.line()
134
135             normalize_prefix(node, inside_brackets=any_open_brackets)
136             if self.mode.string_normalization and node.type == token.STRING:
137                 node.value = normalize_string_prefix(node.value)
138                 node.value = normalize_string_quotes(node.value)
139             if node.type == token.NUMBER:
140                 normalize_numeric_literal(node)
141             if node.type not in WHITESPACE:
142                 self.current_line.append(node)
143         yield from super().visit_default(node)
144
145     def visit_test(self, node: Node) -> Iterator[Line]:
146         """Visit an `x if y else z` test"""
147
148         if Preview.parenthesize_conditional_expressions in self.mode:
149             already_parenthesized = (
150                 node.prev_sibling and node.prev_sibling.type == token.LPAR
151             )
152
153             if not already_parenthesized:
154                 lpar = Leaf(token.LPAR, "")
155                 rpar = Leaf(token.RPAR, "")
156                 node.insert_child(0, lpar)
157                 node.append_child(rpar)
158
159         yield from self.visit_default(node)
160
161     def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
162         """Increase indentation level, maybe yield a line."""
163         # In blib2to3 INDENT never holds comments.
164         yield from self.line(+1)
165         yield from self.visit_default(node)
166
167     def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
168         """Decrease indentation level, maybe yield a line."""
169         # The current line might still wait for trailing comments.  At DEDENT time
170         # there won't be any (they would be prefixes on the preceding NEWLINE).
171         # Emit the line then.
172         yield from self.line()
173
174         # While DEDENT has no value, its prefix may contain standalone comments
175         # that belong to the current indentation level.  Get 'em.
176         yield from self.visit_default(node)
177
178         # Finally, emit the dedent.
179         yield from self.line(-1)
180
181     def visit_stmt(
182         self, node: Node, keywords: Set[str], parens: Set[str]
183     ) -> Iterator[Line]:
184         """Visit a statement.
185
186         This implementation is shared for `if`, `while`, `for`, `try`, `except`,
187         `def`, `with`, `class`, `assert`, and assignments.
188
189         The relevant Python language `keywords` for a given statement will be
190         NAME leaves within it. This methods puts those on a separate line.
191
192         `parens` holds a set of string leaf values immediately after which
193         invisible parens should be put.
194         """
195         normalize_invisible_parens(
196             node, parens_after=parens, mode=self.mode, features=self.features
197         )
198         for child in node.children:
199             if is_name_token(child) and child.value in keywords:
200                 yield from self.line()
201
202             yield from self.visit(child)
203
204     def visit_dictsetmaker(self, node: Node) -> Iterator[Line]:
205         if Preview.wrap_long_dict_values_in_parens in self.mode:
206             for i, child in enumerate(node.children):
207                 if i == 0:
208                     continue
209                 if node.children[i - 1].type == token.COLON:
210                     if child.type == syms.atom and child.children[0].type == token.LPAR:
211                         if maybe_make_parens_invisible_in_atom(
212                             child,
213                             parent=node,
214                             remove_brackets_around_comma=False,
215                         ):
216                             wrap_in_parentheses(node, child, visible=False)
217                     else:
218                         wrap_in_parentheses(node, child, visible=False)
219         yield from self.visit_default(node)
220
221     def visit_funcdef(self, node: Node) -> Iterator[Line]:
222         """Visit function definition."""
223         if Preview.annotation_parens not in self.mode:
224             yield from self.visit_stmt(node, keywords={"def"}, parens=set())
225         else:
226             yield from self.line()
227
228             # Remove redundant brackets around return type annotation.
229             is_return_annotation = False
230             for child in node.children:
231                 if child.type == token.RARROW:
232                     is_return_annotation = True
233                 elif is_return_annotation:
234                     if child.type == syms.atom and child.children[0].type == token.LPAR:
235                         if maybe_make_parens_invisible_in_atom(
236                             child,
237                             parent=node,
238                             remove_brackets_around_comma=False,
239                         ):
240                             wrap_in_parentheses(node, child, visible=False)
241                     else:
242                         wrap_in_parentheses(node, child, visible=False)
243                     is_return_annotation = False
244
245             for child in node.children:
246                 yield from self.visit(child)
247
248     def visit_match_case(self, node: Node) -> Iterator[Line]:
249         """Visit either a match or case statement."""
250         normalize_invisible_parens(
251             node, parens_after=set(), mode=self.mode, features=self.features
252         )
253
254         yield from self.line()
255         for child in node.children:
256             yield from self.visit(child)
257
258     def visit_suite(self, node: Node) -> Iterator[Line]:
259         """Visit a suite."""
260         if self.mode.is_pyi and is_stub_suite(node):
261             yield from self.visit(node.children[2])
262         else:
263             yield from self.visit_default(node)
264
265     def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
266         """Visit a statement without nested statements."""
267         prev_type: Optional[int] = None
268         for child in node.children:
269             if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):
270                 wrap_in_parentheses(node, child, visible=False)
271             prev_type = child.type
272
273         is_suite_like = node.parent and node.parent.type in STATEMENT
274         if is_suite_like:
275             if self.mode.is_pyi and is_stub_body(node):
276                 yield from self.visit_default(node)
277             else:
278                 yield from self.line(+1)
279                 yield from self.visit_default(node)
280                 yield from self.line(-1)
281
282         else:
283             if (
284                 not self.mode.is_pyi
285                 or not node.parent
286                 or not is_stub_suite(node.parent)
287             ):
288                 yield from self.line()
289             yield from self.visit_default(node)
290
291     def visit_async_stmt(self, node: Node) -> Iterator[Line]:
292         """Visit `async def`, `async for`, `async with`."""
293         yield from self.line()
294
295         children = iter(node.children)
296         for child in children:
297             yield from self.visit(child)
298
299             if child.type == token.ASYNC or child.type == STANDALONE_COMMENT:
300                 # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async
301                 # line.
302                 break
303
304         internal_stmt = next(children)
305         for child in internal_stmt.children:
306             yield from self.visit(child)
307
308     def visit_decorators(self, node: Node) -> Iterator[Line]:
309         """Visit decorators."""
310         for child in node.children:
311             yield from self.line()
312             yield from self.visit(child)
313
314     def visit_power(self, node: Node) -> Iterator[Line]:
315         for idx, leaf in enumerate(node.children[:-1]):
316             next_leaf = node.children[idx + 1]
317
318             if not isinstance(leaf, Leaf):
319                 continue
320
321             value = leaf.value.lower()
322             if (
323                 leaf.type == token.NUMBER
324                 and next_leaf.type == syms.trailer
325                 # Ensure that we are in an attribute trailer
326                 and next_leaf.children[0].type == token.DOT
327                 # It shouldn't wrap hexadecimal, binary and octal literals
328                 and not value.startswith(("0x", "0b", "0o"))
329                 # It shouldn't wrap complex literals
330                 and "j" not in value
331             ):
332                 wrap_in_parentheses(node, leaf)
333
334         if Preview.remove_redundant_parens in self.mode:
335             remove_await_parens(node)
336
337         yield from self.visit_default(node)
338
339     def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
340         """Remove a semicolon and put the other statement on a separate line."""
341         yield from self.line()
342
343     def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
344         """End of file. Process outstanding comments and end with a newline."""
345         yield from self.visit_default(leaf)
346         yield from self.line()
347
348     def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
349         if not self.current_line.bracket_tracker.any_open_brackets():
350             yield from self.line()
351         yield from self.visit_default(leaf)
352
353     def visit_factor(self, node: Node) -> Iterator[Line]:
354         """Force parentheses between a unary op and a binary power:
355
356         -2 ** 8 -> -(2 ** 8)
357         """
358         _operator, operand = node.children
359         if (
360             operand.type == syms.power
361             and len(operand.children) == 3
362             and operand.children[1].type == token.DOUBLESTAR
363         ):
364             lpar = Leaf(token.LPAR, "(")
365             rpar = Leaf(token.RPAR, ")")
366             index = operand.remove() or 0
367             node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
368         yield from self.visit_default(node)
369
370     def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
371         if is_docstring(leaf) and "\\\n" not in leaf.value:
372             # We're ignoring docstrings with backslash newline escapes because changing
373             # indentation of those changes the AST representation of the code.
374             if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode:
375                 # There was a bug where --skip-string-normalization wouldn't stop us
376                 # from normalizing docstring prefixes. To maintain stability, we can
377                 # only address this buggy behaviour while the preview style is enabled.
378                 if self.mode.string_normalization:
379                     docstring = normalize_string_prefix(leaf.value)
380                     # visit_default() does handle string normalization for us, but
381                     # since this method acts differently depending on quote style (ex.
382                     # see padding logic below), there's a possibility for unstable
383                     # formatting as visit_default() is called *after*. To avoid a
384                     # situation where this function formats a docstring differently on
385                     # the second pass, normalize it early.
386                     docstring = normalize_string_quotes(docstring)
387                 else:
388                     docstring = leaf.value
389             else:
390                 # ... otherwise, we'll keep the buggy behaviour >.<
391                 docstring = normalize_string_prefix(leaf.value)
392             prefix = get_string_prefix(docstring)
393             docstring = docstring[len(prefix) :]  # Remove the prefix
394             quote_char = docstring[0]
395             # A natural way to remove the outer quotes is to do:
396             #   docstring = docstring.strip(quote_char)
397             # but that breaks on """""x""" (which is '""x').
398             # So we actually need to remove the first character and the next two
399             # characters but only if they are the same as the first.
400             quote_len = 1 if docstring[1] != quote_char else 3
401             docstring = docstring[quote_len:-quote_len]
402             docstring_started_empty = not docstring
403             indent = " " * 4 * self.current_line.depth
404
405             if is_multiline_string(leaf):
406                 docstring = fix_docstring(docstring, indent)
407             else:
408                 docstring = docstring.strip()
409
410             has_trailing_backslash = False
411             if docstring:
412                 # Add some padding if the docstring starts / ends with a quote mark.
413                 if docstring[0] == quote_char:
414                     docstring = " " + docstring
415                 if docstring[-1] == quote_char:
416                     docstring += " "
417                 if docstring[-1] == "\\":
418                     backslash_count = len(docstring) - len(docstring.rstrip("\\"))
419                     if backslash_count % 2:
420                         # Odd number of tailing backslashes, add some padding to
421                         # avoid escaping the closing string quote.
422                         docstring += " "
423                         has_trailing_backslash = True
424             elif not docstring_started_empty:
425                 docstring = " "
426
427             # We could enforce triple quotes at this point.
428             quote = quote_char * quote_len
429
430             # It's invalid to put closing single-character quotes on a new line.
431             if Preview.long_docstring_quotes_on_newline in self.mode and quote_len == 3:
432                 # We need to find the length of the last line of the docstring
433                 # to find if we can add the closing quotes to the line without
434                 # exceeding the maximum line length.
435                 # If docstring is one line, we don't put the closing quotes on a
436                 # separate line because it looks ugly (#3320).
437                 lines = docstring.splitlines()
438                 last_line_length = len(lines[-1]) if docstring else 0
439
440                 # If adding closing quotes would cause the last line to exceed
441                 # the maximum line length then put a line break before the
442                 # closing quotes
443                 if (
444                     len(lines) > 1
445                     and last_line_length + quote_len > self.mode.line_length
446                     and len(indent) + quote_len <= self.mode.line_length
447                     and not has_trailing_backslash
448                 ):
449                     leaf.value = prefix + quote + docstring + "\n" + indent + quote
450                 else:
451                     leaf.value = prefix + quote + docstring + quote
452             else:
453                 leaf.value = prefix + quote + docstring + quote
454
455         yield from self.visit_default(leaf)
456
457     def __post_init__(self) -> None:
458         """You are in a twisty little maze of passages."""
459         self.current_line = Line(mode=self.mode)
460
461         v = self.visit_stmt
462         Ø: Set[str] = set()
463         self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
464         self.visit_if_stmt = partial(
465             v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
466         )
467         self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
468         self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
469         self.visit_try_stmt = partial(
470             v, keywords={"try", "except", "else", "finally"}, parens=Ø
471         )
472         if self.mode.preview:
473             self.visit_except_clause = partial(
474                 v, keywords={"except"}, parens={"except"}
475             )
476             self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
477         else:
478             self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
479             self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
480         self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
481         self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
482         self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
483         self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
484         self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
485         self.visit_async_funcdef = self.visit_async_stmt
486         self.visit_decorated = self.visit_decorators
487
488         # PEP 634
489         self.visit_match_stmt = self.visit_match_case
490         self.visit_case_block = self.visit_match_case
491
492
493 def transform_line(
494     line: Line, mode: Mode, features: Collection[Feature] = ()
495 ) -> Iterator[Line]:
496     """Transform a `line`, potentially splitting it into many lines.
497
498     They should fit in the allotted `line_length` but might not be able to.
499
500     `features` are syntactical features that may be used in the output.
501     """
502     if line.is_comment:
503         yield line
504         return
505
506     line_str = line_to_string(line)
507
508     ll = mode.line_length
509     sn = mode.string_normalization
510     string_merge = StringMerger(ll, sn)
511     string_paren_strip = StringParenStripper(ll, sn)
512     string_split = StringSplitter(ll, sn)
513     string_paren_wrap = StringParenWrapper(ll, sn)
514
515     transformers: List[Transformer]
516     if (
517         not line.contains_uncollapsable_type_comments()
518         and not line.should_split_rhs
519         and not line.magic_trailing_comma
520         and (
521             is_line_short_enough(line, line_length=mode.line_length, line_str=line_str)
522             or line.contains_unsplittable_type_ignore()
523         )
524         and not (line.inside_brackets and line.contains_standalone_comments())
525     ):
526         # Only apply basic string preprocessing, since lines shouldn't be split here.
527         if Preview.string_processing in mode:
528             transformers = [string_merge, string_paren_strip]
529         else:
530             transformers = []
531     elif line.is_def:
532         transformers = [left_hand_split]
533     else:
534
535         def _rhs(
536             self: object, line: Line, features: Collection[Feature]
537         ) -> Iterator[Line]:
538             """Wraps calls to `right_hand_split`.
539
540             The calls increasingly `omit` right-hand trailers (bracket pairs with
541             content), meaning the trailers get glued together to split on another
542             bracket pair instead.
543             """
544             for omit in generate_trailers_to_omit(line, mode.line_length):
545                 lines = list(
546                     right_hand_split(line, mode.line_length, features, omit=omit)
547                 )
548                 # Note: this check is only able to figure out if the first line of the
549                 # *current* transformation fits in the line length.  This is true only
550                 # for simple cases.  All others require running more transforms via
551                 # `transform_line()`.  This check doesn't know if those would succeed.
552                 if is_line_short_enough(lines[0], line_length=mode.line_length):
553                     yield from lines
554                     return
555
556             # All splits failed, best effort split with no omits.
557             # This mostly happens to multiline strings that are by definition
558             # reported as not fitting a single line, as well as lines that contain
559             # trailing commas (those have to be exploded).
560             yield from right_hand_split(
561                 line, line_length=mode.line_length, features=features
562             )
563
564         # HACK: nested functions (like _rhs) compiled by mypyc don't retain their
565         # __name__ attribute which is needed in `run_transformer` further down.
566         # Unfortunately a nested class breaks mypyc too. So a class must be created
567         # via type ... https://github.com/mypyc/mypyc/issues/884
568         rhs = type("rhs", (), {"__call__": _rhs})()
569
570         if Preview.string_processing in mode:
571             if line.inside_brackets:
572                 transformers = [
573                     string_merge,
574                     string_paren_strip,
575                     string_split,
576                     delimiter_split,
577                     standalone_comment_split,
578                     string_paren_wrap,
579                     rhs,
580                 ]
581             else:
582                 transformers = [
583                     string_merge,
584                     string_paren_strip,
585                     string_split,
586                     string_paren_wrap,
587                     rhs,
588                 ]
589         else:
590             if line.inside_brackets:
591                 transformers = [delimiter_split, standalone_comment_split, rhs]
592             else:
593                 transformers = [rhs]
594     # It's always safe to attempt hugging of power operations and pretty much every line
595     # could match.
596     transformers.append(hug_power_op)
597
598     for transform in transformers:
599         # We are accumulating lines in `result` because we might want to abort
600         # mission and return the original line in the end, or attempt a different
601         # split altogether.
602         try:
603             result = run_transformer(line, transform, mode, features, line_str=line_str)
604         except CannotTransform:
605             continue
606         else:
607             yield from result
608             break
609
610     else:
611         yield line
612
613
614 class _BracketSplitComponent(Enum):
615     head = auto()
616     body = auto()
617     tail = auto()
618
619
620 def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]:
621     """Split line into many lines, starting with the first matching bracket pair.
622
623     Note: this usually looks weird, only use this for function definitions.
624     Prefer RHS otherwise.  This is why this function is not symmetrical with
625     :func:`right_hand_split` which also handles optional parentheses.
626     """
627     tail_leaves: List[Leaf] = []
628     body_leaves: List[Leaf] = []
629     head_leaves: List[Leaf] = []
630     current_leaves = head_leaves
631     matching_bracket: Optional[Leaf] = None
632     for leaf in line.leaves:
633         if (
634             current_leaves is body_leaves
635             and leaf.type in CLOSING_BRACKETS
636             and leaf.opening_bracket is matching_bracket
637             and isinstance(matching_bracket, Leaf)
638         ):
639             ensure_visible(leaf)
640             ensure_visible(matching_bracket)
641             current_leaves = tail_leaves if body_leaves else head_leaves
642         current_leaves.append(leaf)
643         if current_leaves is head_leaves:
644             if leaf.type in OPENING_BRACKETS:
645                 matching_bracket = leaf
646                 current_leaves = body_leaves
647     if not matching_bracket:
648         raise CannotSplit("No brackets found")
649
650     head = bracket_split_build_line(
651         head_leaves, line, matching_bracket, component=_BracketSplitComponent.head
652     )
653     body = bracket_split_build_line(
654         body_leaves, line, matching_bracket, component=_BracketSplitComponent.body
655     )
656     tail = bracket_split_build_line(
657         tail_leaves, line, matching_bracket, component=_BracketSplitComponent.tail
658     )
659     bracket_split_succeeded_or_raise(head, body, tail)
660     for result in (head, body, tail):
661         if result:
662             yield result
663
664
665 @dataclass
666 class _RHSResult:
667     """Intermediate split result from a right hand split."""
668
669     head: Line
670     body: Line
671     tail: Line
672     opening_bracket: Leaf
673     closing_bracket: Leaf
674
675
676 def right_hand_split(
677     line: Line,
678     line_length: int,
679     features: Collection[Feature] = (),
680     omit: Collection[LeafID] = (),
681 ) -> Iterator[Line]:
682     """Split line into many lines, starting with the last matching bracket pair.
683
684     If the split was by optional parentheses, attempt splitting without them, too.
685     `omit` is a collection of closing bracket IDs that shouldn't be considered for
686     this split.
687
688     Note: running this function modifies `bracket_depth` on the leaves of `line`.
689     """
690     rhs_result = _first_right_hand_split(line, omit=omit)
691     yield from _maybe_split_omitting_optional_parens(
692         rhs_result, line, line_length, features=features, omit=omit
693     )
694
695
696 def _first_right_hand_split(
697     line: Line,
698     omit: Collection[LeafID] = (),
699 ) -> _RHSResult:
700     """Split the line into head, body, tail starting with the last bracket pair.
701
702     Note: this function should not have side effects. It's relied upon by
703     _maybe_split_omitting_optional_parens to get an opinion whether to prefer
704     splitting on the right side of an assignment statement.
705     """
706     tail_leaves: List[Leaf] = []
707     body_leaves: List[Leaf] = []
708     head_leaves: List[Leaf] = []
709     current_leaves = tail_leaves
710     opening_bracket: Optional[Leaf] = None
711     closing_bracket: Optional[Leaf] = None
712     for leaf in reversed(line.leaves):
713         if current_leaves is body_leaves:
714             if leaf is opening_bracket:
715                 current_leaves = head_leaves if body_leaves else tail_leaves
716         current_leaves.append(leaf)
717         if current_leaves is tail_leaves:
718             if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
719                 opening_bracket = leaf.opening_bracket
720                 closing_bracket = leaf
721                 current_leaves = body_leaves
722     if not (opening_bracket and closing_bracket and head_leaves):
723         # If there is no opening or closing_bracket that means the split failed and
724         # all content is in the tail.  Otherwise, if `head_leaves` are empty, it means
725         # the matching `opening_bracket` wasn't available on `line` anymore.
726         raise CannotSplit("No brackets found")
727
728     tail_leaves.reverse()
729     body_leaves.reverse()
730     head_leaves.reverse()
731     head = bracket_split_build_line(
732         head_leaves, line, opening_bracket, component=_BracketSplitComponent.head
733     )
734     body = bracket_split_build_line(
735         body_leaves, line, opening_bracket, component=_BracketSplitComponent.body
736     )
737     tail = bracket_split_build_line(
738         tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail
739     )
740     bracket_split_succeeded_or_raise(head, body, tail)
741     return _RHSResult(head, body, tail, opening_bracket, closing_bracket)
742
743
744 def _maybe_split_omitting_optional_parens(
745     rhs: _RHSResult,
746     line: Line,
747     line_length: int,
748     features: Collection[Feature] = (),
749     omit: Collection[LeafID] = (),
750 ) -> Iterator[Line]:
751     if (
752         Feature.FORCE_OPTIONAL_PARENTHESES not in features
753         # the opening bracket is an optional paren
754         and rhs.opening_bracket.type == token.LPAR
755         and not rhs.opening_bracket.value
756         # the closing bracket is an optional paren
757         and rhs.closing_bracket.type == token.RPAR
758         and not rhs.closing_bracket.value
759         # it's not an import (optional parens are the only thing we can split on
760         # in this case; attempting a split without them is a waste of time)
761         and not line.is_import
762         # there are no standalone comments in the body
763         and not rhs.body.contains_standalone_comments(0)
764         # and we can actually remove the parens
765         and can_omit_invisible_parens(rhs.body, line_length)
766     ):
767         omit = {id(rhs.closing_bracket), *omit}
768         try:
769             # The _RHSResult Omitting Optional Parens.
770             rhs_oop = _first_right_hand_split(line, omit=omit)
771             if not (
772                 Preview.prefer_splitting_right_hand_side_of_assignments in line.mode
773                 # the split is right after `=`
774                 and len(rhs.head.leaves) >= 2
775                 and rhs.head.leaves[-2].type == token.EQUAL
776                 # the left side of assignement contains brackets
777                 and any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1])
778                 # the left side of assignment is short enough (the -1 is for the ending
779                 # optional paren)
780                 and is_line_short_enough(rhs.head, line_length=line_length - 1)
781                 # the left side of assignment won't explode further because of magic
782                 # trailing comma
783                 and rhs.head.magic_trailing_comma is None
784                 # the split by omitting optional parens isn't preferred by some other
785                 # reason
786                 and not _prefer_split_rhs_oop(rhs_oop, line_length=line_length)
787             ):
788                 yield from _maybe_split_omitting_optional_parens(
789                     rhs_oop, line, line_length, features=features, omit=omit
790                 )
791                 return
792
793         except CannotSplit as e:
794             if not (
795                 can_be_split(rhs.body)
796                 or is_line_short_enough(rhs.body, line_length=line_length)
797             ):
798                 raise CannotSplit(
799                     "Splitting failed, body is still too long and can't be split."
800                 ) from e
801
802             elif (
803                 rhs.head.contains_multiline_strings()
804                 or rhs.tail.contains_multiline_strings()
805             ):
806                 raise CannotSplit(
807                     "The current optional pair of parentheses is bound to fail to"
808                     " satisfy the splitting algorithm because the head or the tail"
809                     " contains multiline strings which by definition never fit one"
810                     " line."
811                 ) from e
812
813     ensure_visible(rhs.opening_bracket)
814     ensure_visible(rhs.closing_bracket)
815     for result in (rhs.head, rhs.body, rhs.tail):
816         if result:
817             yield result
818
819
820 def _prefer_split_rhs_oop(rhs_oop: _RHSResult, line_length: int) -> bool:
821     """
822     Returns whether we should prefer the result from a split omitting optional parens.
823     """
824     has_closing_bracket_after_assign = False
825     for leaf in reversed(rhs_oop.head.leaves):
826         if leaf.type == token.EQUAL:
827             break
828         if leaf.type in CLOSING_BRACKETS:
829             has_closing_bracket_after_assign = True
830             break
831     return (
832         # contains matching brackets after the `=` (done by checking there is a
833         # closing bracket)
834         has_closing_bracket_after_assign
835         or (
836             # the split is actually from inside the optional parens (done by checking
837             # the first line still contains the `=`)
838             any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves)
839             # the first line is short enough
840             and is_line_short_enough(rhs_oop.head, line_length=line_length)
841         )
842         # contains unsplittable type ignore
843         or rhs_oop.head.contains_unsplittable_type_ignore()
844         or rhs_oop.body.contains_unsplittable_type_ignore()
845         or rhs_oop.tail.contains_unsplittable_type_ignore()
846     )
847
848
849 def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
850     """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
851
852     Do nothing otherwise.
853
854     A left- or right-hand split is based on a pair of brackets. Content before
855     (and including) the opening bracket is left on one line, content inside the
856     brackets is put on a separate line, and finally content starting with and
857     following the closing bracket is put on a separate line.
858
859     Those are called `head`, `body`, and `tail`, respectively. If the split
860     produced the same line (all content in `head`) or ended up with an empty `body`
861     and the `tail` is just the closing bracket, then it's considered failed.
862     """
863     tail_len = len(str(tail).strip())
864     if not body:
865         if tail_len == 0:
866             raise CannotSplit("Splitting brackets produced the same line")
867
868         elif tail_len < 3:
869             raise CannotSplit(
870                 f"Splitting brackets on an empty body to save {tail_len} characters is"
871                 " not worth it"
872             )
873
874
875 def bracket_split_build_line(
876     leaves: List[Leaf],
877     original: Line,
878     opening_bracket: Leaf,
879     *,
880     component: _BracketSplitComponent,
881 ) -> Line:
882     """Return a new line with given `leaves` and respective comments from `original`.
883
884     If it's the head component, brackets will be tracked so trailing commas are
885     respected.
886
887     If it's the body component, the result line is one-indented inside brackets and as
888     such has its first leaf's prefix normalized and a trailing comma added when
889     expected.
890     """
891     result = Line(mode=original.mode, depth=original.depth)
892     if component is _BracketSplitComponent.body:
893         result.inside_brackets = True
894         result.depth += 1
895         if leaves:
896             # Since body is a new indent level, remove spurious leading whitespace.
897             normalize_prefix(leaves[0], inside_brackets=True)
898             # Ensure a trailing comma for imports and standalone function arguments, but
899             # be careful not to add one after any comments or within type annotations.
900             no_commas = (
901                 original.is_def
902                 and opening_bracket.value == "("
903                 and not any(leaf.type == token.COMMA for leaf in leaves)
904                 # In particular, don't add one within a parenthesized return annotation.
905                 # Unfortunately the indicator we're in a return annotation (RARROW) may
906                 # be defined directly in the parent node, the parent of the parent ...
907                 # and so on depending on how complex the return annotation is.
908                 # This isn't perfect and there's some false negatives but they are in
909                 # contexts were a comma is actually fine.
910                 and not any(
911                     node.prev_sibling.type == RARROW
912                     for node in (
913                         leaves[0].parent,
914                         getattr(leaves[0].parent, "parent", None),
915                     )
916                     if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf)
917                 )
918             )
919
920             if original.is_import or no_commas:
921                 for i in range(len(leaves) - 1, -1, -1):
922                     if leaves[i].type == STANDALONE_COMMENT:
923                         continue
924
925                     if leaves[i].type != token.COMMA:
926                         new_comma = Leaf(token.COMMA, ",")
927                         leaves.insert(i + 1, new_comma)
928                     break
929
930     leaves_to_track: Set[LeafID] = set()
931     if (
932         Preview.handle_trailing_commas_in_head in original.mode
933         and component is _BracketSplitComponent.head
934     ):
935         leaves_to_track = get_leaves_inside_matching_brackets(leaves)
936     # Populate the line
937     for leaf in leaves:
938         result.append(
939             leaf,
940             preformatted=True,
941             track_bracket=id(leaf) in leaves_to_track,
942         )
943         for comment_after in original.comments_after(leaf):
944             result.append(comment_after, preformatted=True)
945     if component is _BracketSplitComponent.body and should_split_line(
946         result, opening_bracket
947     ):
948         result.should_split_rhs = True
949     return result
950
951
952 def dont_increase_indentation(split_func: Transformer) -> Transformer:
953     """Normalize prefix of the first leaf in every line returned by `split_func`.
954
955     This is a decorator over relevant split functions.
956     """
957
958     @wraps(split_func)
959     def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
960         for split_line in split_func(line, features):
961             normalize_prefix(split_line.leaves[0], inside_brackets=True)
962             yield split_line
963
964     return split_wrapper
965
966
967 @dont_increase_indentation
968 def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
969     """Split according to delimiters of the highest priority.
970
971     If the appropriate Features are given, the split will add trailing commas
972     also in function signatures and calls that contain `*` and `**`.
973     """
974     try:
975         last_leaf = line.leaves[-1]
976     except IndexError:
977         raise CannotSplit("Line empty") from None
978
979     bt = line.bracket_tracker
980     try:
981         delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
982     except ValueError:
983         raise CannotSplit("No delimiters found") from None
984
985     if delimiter_priority == DOT_PRIORITY:
986         if bt.delimiter_count_with_priority(delimiter_priority) == 1:
987             raise CannotSplit("Splitting a single attribute from its owner looks wrong")
988
989     current_line = Line(
990         mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
991     )
992     lowest_depth = sys.maxsize
993     trailing_comma_safe = True
994
995     def append_to_line(leaf: Leaf) -> Iterator[Line]:
996         """Append `leaf` to current line or to new line if appending impossible."""
997         nonlocal current_line
998         try:
999             current_line.append_safe(leaf, preformatted=True)
1000         except ValueError:
1001             yield current_line
1002
1003             current_line = Line(
1004                 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
1005             )
1006             current_line.append(leaf)
1007
1008     for leaf in line.leaves:
1009         yield from append_to_line(leaf)
1010
1011         for comment_after in line.comments_after(leaf):
1012             yield from append_to_line(comment_after)
1013
1014         lowest_depth = min(lowest_depth, leaf.bracket_depth)
1015         if leaf.bracket_depth == lowest_depth:
1016             if is_vararg(leaf, within={syms.typedargslist}):
1017                 trailing_comma_safe = (
1018                     trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
1019                 )
1020             elif is_vararg(leaf, within={syms.arglist, syms.argument}):
1021                 trailing_comma_safe = (
1022                     trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
1023                 )
1024
1025         leaf_priority = bt.delimiters.get(id(leaf))
1026         if leaf_priority == delimiter_priority:
1027             yield current_line
1028
1029             current_line = Line(
1030                 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
1031             )
1032     if current_line:
1033         if (
1034             trailing_comma_safe
1035             and delimiter_priority == COMMA_PRIORITY
1036             and current_line.leaves[-1].type != token.COMMA
1037             and current_line.leaves[-1].type != STANDALONE_COMMENT
1038         ):
1039             new_comma = Leaf(token.COMMA, ",")
1040             current_line.append(new_comma)
1041         yield current_line
1042
1043
1044 @dont_increase_indentation
1045 def standalone_comment_split(
1046     line: Line, features: Collection[Feature] = ()
1047 ) -> Iterator[Line]:
1048     """Split standalone comments from the rest of the line."""
1049     if not line.contains_standalone_comments(0):
1050         raise CannotSplit("Line does not have any standalone comments")
1051
1052     current_line = Line(
1053         mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
1054     )
1055
1056     def append_to_line(leaf: Leaf) -> Iterator[Line]:
1057         """Append `leaf` to current line or to new line if appending impossible."""
1058         nonlocal current_line
1059         try:
1060             current_line.append_safe(leaf, preformatted=True)
1061         except ValueError:
1062             yield current_line
1063
1064             current_line = Line(
1065                 line.mode, depth=line.depth, inside_brackets=line.inside_brackets
1066             )
1067             current_line.append(leaf)
1068
1069     for leaf in line.leaves:
1070         yield from append_to_line(leaf)
1071
1072         for comment_after in line.comments_after(leaf):
1073             yield from append_to_line(comment_after)
1074
1075     if current_line:
1076         yield current_line
1077
1078
1079 def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
1080     """Leave existing extra newlines if not `inside_brackets`. Remove everything
1081     else.
1082
1083     Note: don't use backslashes for formatting or you'll lose your voting rights.
1084     """
1085     if not inside_brackets:
1086         spl = leaf.prefix.split("#")
1087         if "\\" not in spl[0]:
1088             nl_count = spl[-1].count("\n")
1089             if len(spl) > 1:
1090                 nl_count -= 1
1091             leaf.prefix = "\n" * nl_count
1092             return
1093
1094     leaf.prefix = ""
1095
1096
1097 def normalize_invisible_parens(
1098     node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature]
1099 ) -> None:
1100     """Make existing optional parentheses invisible or create new ones.
1101
1102     `parens_after` is a set of string leaf values immediately after which parens
1103     should be put.
1104
1105     Standardizes on visible parentheses for single-element tuples, and keeps
1106     existing visible parentheses for other tuples and generator expressions.
1107     """
1108     for pc in list_comments(node.prefix, is_endmarker=False, preview=mode.preview):
1109         if pc.value in FMT_OFF:
1110             # This `node` has a prefix with `# fmt: off`, don't mess with parens.
1111             return
1112
1113     # The multiple context managers grammar has a different pattern, thus this is
1114     # separate from the for-loop below. This possibly wraps them in invisible parens,
1115     # and later will be removed in remove_with_parens when needed.
1116     if node.type == syms.with_stmt:
1117         _maybe_wrap_cms_in_parens(node, mode, features)
1118
1119     check_lpar = False
1120     for index, child in enumerate(list(node.children)):
1121         # Fixes a bug where invisible parens are not properly stripped from
1122         # assignment statements that contain type annotations.
1123         if isinstance(child, Node) and child.type == syms.annassign:
1124             normalize_invisible_parens(
1125                 child, parens_after=parens_after, mode=mode, features=features
1126             )
1127
1128         # Add parentheses around long tuple unpacking in assignments.
1129         if (
1130             index == 0
1131             and isinstance(child, Node)
1132             and child.type == syms.testlist_star_expr
1133         ):
1134             check_lpar = True
1135
1136         if check_lpar:
1137             if (
1138                 mode.preview
1139                 and child.type == syms.atom
1140                 and node.type == syms.for_stmt
1141                 and isinstance(child.prev_sibling, Leaf)
1142                 and child.prev_sibling.type == token.NAME
1143                 and child.prev_sibling.value == "for"
1144             ):
1145                 if maybe_make_parens_invisible_in_atom(
1146                     child,
1147                     parent=node,
1148                     remove_brackets_around_comma=True,
1149                 ):
1150                     wrap_in_parentheses(node, child, visible=False)
1151             elif (
1152                 mode.preview and isinstance(child, Node) and node.type == syms.with_stmt
1153             ):
1154                 remove_with_parens(child, node)
1155             elif child.type == syms.atom:
1156                 if maybe_make_parens_invisible_in_atom(
1157                     child,
1158                     parent=node,
1159                 ):
1160                     wrap_in_parentheses(node, child, visible=False)
1161             elif is_one_tuple(child):
1162                 wrap_in_parentheses(node, child, visible=True)
1163             elif node.type == syms.import_from:
1164                 _normalize_import_from(node, child, index)
1165                 break
1166             elif (
1167                 index == 1
1168                 and child.type == token.STAR
1169                 and node.type == syms.except_clause
1170             ):
1171                 # In except* (PEP 654), the star is actually part of
1172                 # of the keyword. So we need to skip the insertion of
1173                 # invisible parentheses to work more precisely.
1174                 continue
1175
1176             elif not (isinstance(child, Leaf) and is_multiline_string(child)):
1177                 wrap_in_parentheses(node, child, visible=False)
1178
1179         comma_check = child.type == token.COMMA if mode.preview else False
1180
1181         check_lpar = isinstance(child, Leaf) and (
1182             child.value in parens_after or comma_check
1183         )
1184
1185
1186 def _normalize_import_from(parent: Node, child: LN, index: int) -> None:
1187     # "import from" nodes store parentheses directly as part of
1188     # the statement
1189     if is_lpar_token(child):
1190         assert is_rpar_token(parent.children[-1])
1191         # make parentheses invisible
1192         child.value = ""
1193         parent.children[-1].value = ""
1194     elif child.type != token.STAR:
1195         # insert invisible parentheses
1196         parent.insert_child(index, Leaf(token.LPAR, ""))
1197         parent.append_child(Leaf(token.RPAR, ""))
1198
1199
1200 def remove_await_parens(node: Node) -> None:
1201     if node.children[0].type == token.AWAIT and len(node.children) > 1:
1202         if (
1203             node.children[1].type == syms.atom
1204             and node.children[1].children[0].type == token.LPAR
1205         ):
1206             if maybe_make_parens_invisible_in_atom(
1207                 node.children[1],
1208                 parent=node,
1209                 remove_brackets_around_comma=True,
1210             ):
1211                 wrap_in_parentheses(node, node.children[1], visible=False)
1212
1213             # Since await is an expression we shouldn't remove
1214             # brackets in cases where this would change
1215             # the AST due to operator precedence.
1216             # Therefore we only aim to remove brackets around
1217             # power nodes that aren't also await expressions themselves.
1218             # https://peps.python.org/pep-0492/#updated-operator-precedence-table
1219             # N.B. We've still removed any redundant nested brackets though :)
1220             opening_bracket = cast(Leaf, node.children[1].children[0])
1221             closing_bracket = cast(Leaf, node.children[1].children[-1])
1222             bracket_contents = cast(Node, node.children[1].children[1])
1223             if bracket_contents.type != syms.power:
1224                 ensure_visible(opening_bracket)
1225                 ensure_visible(closing_bracket)
1226             elif (
1227                 bracket_contents.type == syms.power
1228                 and bracket_contents.children[0].type == token.AWAIT
1229             ):
1230                 ensure_visible(opening_bracket)
1231                 ensure_visible(closing_bracket)
1232                 # If we are in a nested await then recurse down.
1233                 remove_await_parens(bracket_contents)
1234
1235
1236 def _maybe_wrap_cms_in_parens(
1237     node: Node, mode: Mode, features: Collection[Feature]
1238 ) -> None:
1239     """When enabled and safe, wrap the multiple context managers in invisible parens.
1240
1241     It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS.
1242     """
1243     if (
1244         Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features
1245         or Preview.wrap_multiple_context_managers_in_parens not in mode
1246         or len(node.children) <= 2
1247         # If it's an atom, it's already wrapped in parens.
1248         or node.children[1].type == syms.atom
1249     ):
1250         return
1251     colon_index: Optional[int] = None
1252     for i in range(2, len(node.children)):
1253         if node.children[i].type == token.COLON:
1254             colon_index = i
1255             break
1256     if colon_index is not None:
1257         lpar = Leaf(token.LPAR, "")
1258         rpar = Leaf(token.RPAR, "")
1259         context_managers = node.children[1:colon_index]
1260         for child in context_managers:
1261             child.remove()
1262         # After wrapping, the with_stmt will look like this:
1263         #   with_stmt
1264         #     NAME 'with'
1265         #     atom
1266         #       LPAR ''
1267         #       testlist_gexp
1268         #         ... <-- context_managers
1269         #       /testlist_gexp
1270         #       RPAR ''
1271         #     /atom
1272         #     COLON ':'
1273         new_child = Node(
1274             syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar]
1275         )
1276         node.insert_child(1, new_child)
1277
1278
1279 def remove_with_parens(node: Node, parent: Node) -> None:
1280     """Recursively hide optional parens in `with` statements."""
1281     # Removing all unnecessary parentheses in with statements in one pass is a tad
1282     # complex as different variations of bracketed statements result in pretty
1283     # different parse trees:
1284     #
1285     # with (open("file")) as f:                       # this is an asexpr_test
1286     #     ...
1287     #
1288     # with (open("file") as f):                       # this is an atom containing an
1289     #     ...                                         # asexpr_test
1290     #
1291     # with (open("file")) as f, (open("file")) as f:  # this is asexpr_test, COMMA,
1292     #     ...                                         # asexpr_test
1293     #
1294     # with (open("file") as f, open("file") as f):    # an atom containing a
1295     #     ...                                         # testlist_gexp which then
1296     #                                                 # contains multiple asexpr_test(s)
1297     if node.type == syms.atom:
1298         if maybe_make_parens_invisible_in_atom(
1299             node,
1300             parent=parent,
1301             remove_brackets_around_comma=True,
1302         ):
1303             wrap_in_parentheses(parent, node, visible=False)
1304         if isinstance(node.children[1], Node):
1305             remove_with_parens(node.children[1], node)
1306     elif node.type == syms.testlist_gexp:
1307         for child in node.children:
1308             if isinstance(child, Node):
1309                 remove_with_parens(child, node)
1310     elif node.type == syms.asexpr_test and not any(
1311         leaf.type == token.COLONEQUAL for leaf in node.leaves()
1312     ):
1313         if maybe_make_parens_invisible_in_atom(
1314             node.children[0],
1315             parent=node,
1316             remove_brackets_around_comma=True,
1317         ):
1318             wrap_in_parentheses(node, node.children[0], visible=False)
1319
1320
1321 def maybe_make_parens_invisible_in_atom(
1322     node: LN,
1323     parent: LN,
1324     remove_brackets_around_comma: bool = False,
1325 ) -> bool:
1326     """If it's safe, make the parens in the atom `node` invisible, recursively.
1327     Additionally, remove repeated, adjacent invisible parens from the atom `node`
1328     as they are redundant.
1329
1330     Returns whether the node should itself be wrapped in invisible parentheses.
1331     """
1332     if (
1333         node.type != syms.atom
1334         or is_empty_tuple(node)
1335         or is_one_tuple(node)
1336         or (is_yield(node) and parent.type != syms.expr_stmt)
1337         or (
1338             # This condition tries to prevent removing non-optional brackets
1339             # around a tuple, however, can be a bit overzealous so we provide
1340             # and option to skip this check for `for` and `with` statements.
1341             not remove_brackets_around_comma
1342             and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
1343         )
1344         or is_tuple_containing_walrus(node)
1345     ):
1346         return False
1347
1348     if is_walrus_assignment(node):
1349         if parent.type in [
1350             syms.annassign,
1351             syms.expr_stmt,
1352             syms.assert_stmt,
1353             syms.return_stmt,
1354             syms.except_clause,
1355             syms.funcdef,
1356             syms.with_stmt,
1357             # these ones aren't useful to end users, but they do please fuzzers
1358             syms.for_stmt,
1359             syms.del_stmt,
1360             syms.for_stmt,
1361         ]:
1362             return False
1363
1364     first = node.children[0]
1365     last = node.children[-1]
1366     if is_lpar_token(first) and is_rpar_token(last):
1367         middle = node.children[1]
1368         # make parentheses invisible
1369         first.value = ""
1370         last.value = ""
1371         maybe_make_parens_invisible_in_atom(
1372             middle,
1373             parent=parent,
1374             remove_brackets_around_comma=remove_brackets_around_comma,
1375         )
1376
1377         if is_atom_with_invisible_parens(middle):
1378             # Strip the invisible parens from `middle` by replacing
1379             # it with the child in-between the invisible parens
1380             middle.replace(middle.children[1])
1381
1382         return False
1383
1384     return True
1385
1386
1387 def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
1388     """Should `line` be immediately split with `delimiter_split()` after RHS?"""
1389
1390     if not (opening_bracket.parent and opening_bracket.value in "[{("):
1391         return False
1392
1393     # We're essentially checking if the body is delimited by commas and there's more
1394     # than one of them (we're excluding the trailing comma and if the delimiter priority
1395     # is still commas, that means there's more).
1396     exclude = set()
1397     trailing_comma = False
1398     try:
1399         last_leaf = line.leaves[-1]
1400         if last_leaf.type == token.COMMA:
1401             trailing_comma = True
1402             exclude.add(id(last_leaf))
1403         max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
1404     except (IndexError, ValueError):
1405         return False
1406
1407     return max_priority == COMMA_PRIORITY and (
1408         (line.mode.magic_trailing_comma and trailing_comma)
1409         # always explode imports
1410         or opening_bracket.parent.type in {syms.atom, syms.import_from}
1411     )
1412
1413
1414 def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
1415     """Generate sets of closing bracket IDs that should be omitted in a RHS.
1416
1417     Brackets can be omitted if the entire trailer up to and including
1418     a preceding closing bracket fits in one line.
1419
1420     Yielded sets are cumulative (contain results of previous yields, too).  First
1421     set is empty, unless the line should explode, in which case bracket pairs until
1422     the one that needs to explode are omitted.
1423     """
1424
1425     omit: Set[LeafID] = set()
1426     if not line.magic_trailing_comma:
1427         yield omit
1428
1429     length = 4 * line.depth
1430     opening_bracket: Optional[Leaf] = None
1431     closing_bracket: Optional[Leaf] = None
1432     inner_brackets: Set[LeafID] = set()
1433     for index, leaf, leaf_length in line.enumerate_with_length(reversed=True):
1434         length += leaf_length
1435         if length > line_length:
1436             break
1437
1438         has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
1439         if leaf.type == STANDALONE_COMMENT or has_inline_comment:
1440             break
1441
1442         if opening_bracket:
1443             if leaf is opening_bracket:
1444                 opening_bracket = None
1445             elif leaf.type in CLOSING_BRACKETS:
1446                 prev = line.leaves[index - 1] if index > 0 else None
1447                 if (
1448                     prev
1449                     and prev.type == token.COMMA
1450                     and leaf.opening_bracket is not None
1451                     and not is_one_sequence_between(
1452                         leaf.opening_bracket, leaf, line.leaves
1453                     )
1454                 ):
1455                     # Never omit bracket pairs with trailing commas.
1456                     # We need to explode on those.
1457                     break
1458
1459                 inner_brackets.add(id(leaf))
1460         elif leaf.type in CLOSING_BRACKETS:
1461             prev = line.leaves[index - 1] if index > 0 else None
1462             if prev and prev.type in OPENING_BRACKETS:
1463                 # Empty brackets would fail a split so treat them as "inner"
1464                 # brackets (e.g. only add them to the `omit` set if another
1465                 # pair of brackets was good enough.
1466                 inner_brackets.add(id(leaf))
1467                 continue
1468
1469             if closing_bracket:
1470                 omit.add(id(closing_bracket))
1471                 omit.update(inner_brackets)
1472                 inner_brackets.clear()
1473                 yield omit
1474
1475             if (
1476                 prev
1477                 and prev.type == token.COMMA
1478                 and leaf.opening_bracket is not None
1479                 and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves)
1480             ):
1481                 # Never omit bracket pairs with trailing commas.
1482                 # We need to explode on those.
1483                 break
1484
1485             if leaf.value:
1486                 opening_bracket = leaf.opening_bracket
1487                 closing_bracket = leaf
1488
1489
1490 def run_transformer(
1491     line: Line,
1492     transform: Transformer,
1493     mode: Mode,
1494     features: Collection[Feature],
1495     *,
1496     line_str: str = "",
1497 ) -> List[Line]:
1498     if not line_str:
1499         line_str = line_to_string(line)
1500     result: List[Line] = []
1501     for transformed_line in transform(line, features):
1502         if str(transformed_line).strip("\n") == line_str:
1503             raise CannotTransform("Line transformer returned an unchanged result")
1504
1505         result.extend(transform_line(transformed_line, mode=mode, features=features))
1506
1507     features_set = set(features)
1508     if (
1509         Feature.FORCE_OPTIONAL_PARENTHESES in features_set
1510         or transform.__class__.__name__ != "rhs"
1511         or not line.bracket_tracker.invisible
1512         or any(bracket.value for bracket in line.bracket_tracker.invisible)
1513         or line.contains_multiline_strings()
1514         or result[0].contains_uncollapsable_type_comments()
1515         or result[0].contains_unsplittable_type_ignore()
1516         or is_line_short_enough(result[0], line_length=mode.line_length)
1517         # If any leaves have no parents (which _can_ occur since
1518         # `transform(line)` potentially destroys the line's underlying node
1519         # structure), then we can't proceed. Doing so would cause the below
1520         # call to `append_leaves()` to fail.
1521         or any(leaf.parent is None for leaf in line.leaves)
1522     ):
1523         return result
1524
1525     line_copy = line.clone()
1526     append_leaves(line_copy, line, line.leaves)
1527     features_fop = features_set | {Feature.FORCE_OPTIONAL_PARENTHESES}
1528     second_opinion = run_transformer(
1529         line_copy, transform, mode, features_fop, line_str=line_str
1530     )
1531     if all(
1532         is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion
1533     ):
1534         result = second_opinion
1535     return result