X-Git-Url: https://git.madduck.net/etc/vim.git/blobdiff_plain/159984a7351bfc4789bc0fc85b5f408112efca85..6310a405f6663948f7e0b9411cb54e5db2b712a6:/src/black/linegen.py?ds=sidebyside diff --git a/src/black/linegen.py b/src/black/linegen.py index 2e75bc9..507e860 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -2,7 +2,7 @@ Generating lines of code. """ import sys -from dataclasses import dataclass +from dataclasses import replace from enum import Enum, auto from functools import partial, wraps from typing import Collection, Iterator, List, Optional, Set, Union, cast @@ -16,6 +16,7 @@ from black.brackets import ( from black.comments import FMT_OFF, generate_comments, list_comments from black.lines import ( Line, + RHSResult, append_leaves, can_be_split, can_omit_invisible_parens, @@ -35,6 +36,7 @@ from black.nodes import ( Visitor, ensure_visible, is_arith_like, + is_async_stmt_or_funcdef, is_atom_with_invisible_parens, is_docstring, is_empty_tuple, @@ -46,6 +48,8 @@ from black.nodes import ( is_rpar_token, is_stub_body, is_stub_suite, + is_tuple_containing_walrus, + is_type_ignore_comment_string, is_vararg, is_walrus_assignment, is_yield, @@ -58,6 +62,7 @@ from black.strings import ( get_string_prefix, normalize_string_prefix, normalize_string_quotes, + normalize_unicode_escape_sequences, ) from black.trans import ( CannotTransform, @@ -89,8 +94,9 @@ class LineGenerator(Visitor[Line]): in ways that will no longer stringify to valid Python code on the tree. """ - def __init__(self, mode: Mode) -> None: + def __init__(self, mode: Mode, features: Collection[Feature]) -> None: self.mode = mode + self.features = features self.current_line: Line self.__post_init__() @@ -106,6 +112,17 @@ class LineGenerator(Visitor[Line]): self.current_line.depth += indent return # Line is empty, don't emit. Creating a new one unnecessary. + if ( + Preview.improved_async_statements_handling in self.mode + and len(self.current_line.leaves) == 1 + and is_async_stmt_or_funcdef(self.current_line.leaves[0]) + ): + # Special case for async def/for/with statements. `visit_async_stmt` + # adds an `ASYNC` leaf then visits the child def/for/with statement + # nodes. Line yields from those nodes shouldn't treat the former + # `ASYNC` leaf as a complete line. + return + complete_line = self.current_line self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) yield complete_line @@ -114,7 +131,7 @@ class LineGenerator(Visitor[Line]): """Default `visit_*()` implementation. Recurses to children of `node`.""" if isinstance(node, Leaf): any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() - for comment in generate_comments(node, preview=self.mode.preview): + for comment in generate_comments(node): if any_open_brackets: # any comment within brackets is subject to splitting self.current_line.append(comment) @@ -140,6 +157,22 @@ class LineGenerator(Visitor[Line]): self.current_line.append(node) yield from super().visit_default(node) + def visit_test(self, node: Node) -> Iterator[Line]: + """Visit an `x if y else z` test""" + + if Preview.parenthesize_conditional_expressions in self.mode: + already_parenthesized = ( + node.prev_sibling and node.prev_sibling.type == token.LPAR + ) + + if not already_parenthesized: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + node.insert_child(0, lpar) + node.append_child(rpar) + + yield from self.visit_default(node) + def visit_INDENT(self, node: Leaf) -> Iterator[Line]: """Increase indentation level, maybe yield a line.""" # In blib2to3 INDENT never holds comments. @@ -174,13 +207,27 @@ class LineGenerator(Visitor[Line]): `parens` holds a set of string leaf values immediately after which invisible parens should be put. """ - normalize_invisible_parens(node, parens_after=parens, preview=self.mode.preview) + normalize_invisible_parens( + node, parens_after=parens, mode=self.mode, features=self.features + ) for child in node.children: if is_name_token(child) and child.value in keywords: yield from self.line() yield from self.visit(child) + def visit_typeparams(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[0].prefix = "" + + def visit_typevartuple(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[1].prefix = "" + + def visit_paramspec(self, node: Node) -> Iterator[Line]: + yield from self.visit_default(node) + node.children[1].prefix = "" + def visit_dictsetmaker(self, node: Node) -> Iterator[Line]: if Preview.wrap_long_dict_values_in_parens in self.mode: for i, child in enumerate(node.children): @@ -200,34 +247,33 @@ class LineGenerator(Visitor[Line]): def visit_funcdef(self, node: Node) -> Iterator[Line]: """Visit function definition.""" - if Preview.annotation_parens not in self.mode: - yield from self.visit_stmt(node, keywords={"def"}, parens=set()) - else: - yield from self.line() + yield from self.line() - # Remove redundant brackets around return type annotation. - is_return_annotation = False - for child in node.children: - if child.type == token.RARROW: - is_return_annotation = True - elif is_return_annotation: - if child.type == syms.atom and child.children[0].type == token.LPAR: - if maybe_make_parens_invisible_in_atom( - child, - parent=node, - remove_brackets_around_comma=False, - ): - wrap_in_parentheses(node, child, visible=False) - else: + # Remove redundant brackets around return type annotation. + is_return_annotation = False + for child in node.children: + if child.type == token.RARROW: + is_return_annotation = True + elif is_return_annotation: + if child.type == syms.atom and child.children[0].type == token.LPAR: + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + remove_brackets_around_comma=False, + ): wrap_in_parentheses(node, child, visible=False) - is_return_annotation = False + else: + wrap_in_parentheses(node, child, visible=False) + is_return_annotation = False - for child in node.children: - yield from self.visit(child) + for child in node.children: + yield from self.visit(child) def visit_match_case(self, node: Node) -> Iterator[Line]: """Visit either a match or case statement.""" - normalize_invisible_parens(node, parens_after=set(), preview=self.mode.preview) + normalize_invisible_parens( + node, parens_after=set(), mode=self.mode, features=self.features + ) yield from self.line() for child in node.children: @@ -235,7 +281,9 @@ class LineGenerator(Visitor[Line]): def visit_suite(self, node: Node) -> Iterator[Line]: """Visit a suite.""" - if self.mode.is_pyi and is_stub_suite(node): + if ( + self.mode.is_pyi or Preview.dummy_implementations in self.mode + ) and is_stub_suite(node): yield from self.visit(node.children[2]) else: yield from self.visit_default(node) @@ -250,7 +298,9 @@ class LineGenerator(Visitor[Line]): is_suite_like = node.parent and node.parent.type in STATEMENT if is_suite_like: - if self.mode.is_pyi and is_stub_body(node): + if ( + self.mode.is_pyi or Preview.dummy_implementations in self.mode + ) and is_stub_body(node): yield from self.visit_default(node) else: yield from self.line(+1) @@ -259,7 +309,7 @@ class LineGenerator(Visitor[Line]): else: if ( - not self.mode.is_pyi + not (self.mode.is_pyi or Preview.dummy_implementations in self.mode) or not node.parent or not is_stub_suite(node.parent) ): @@ -280,8 +330,11 @@ class LineGenerator(Visitor[Line]): break internal_stmt = next(children) - for child in internal_stmt.children: - yield from self.visit(child) + if Preview.improved_async_statements_handling in self.mode: + yield from self.visit(internal_stmt) + else: + for child in internal_stmt.children: + yield from self.visit(child) def visit_decorators(self, node: Node) -> Iterator[Line]: """Visit decorators.""" @@ -309,8 +362,7 @@ class LineGenerator(Visitor[Line]): ): wrap_in_parentheses(node, leaf) - if Preview.remove_redundant_parens in self.mode: - remove_await_parens(node) + remove_await_parens(node) yield from self.visit_default(node) @@ -346,27 +398,23 @@ class LineGenerator(Visitor[Line]): yield from self.visit_default(node) def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: + if Preview.hex_codes_in_unicode_sequences in self.mode: + normalize_unicode_escape_sequences(leaf) + if is_docstring(leaf) and "\\\n" not in leaf.value: # We're ignoring docstrings with backslash newline escapes because changing # indentation of those changes the AST representation of the code. - if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode: - # There was a bug where --skip-string-normalization wouldn't stop us - # from normalizing docstring prefixes. To maintain stability, we can - # only address this buggy behaviour while the preview style is enabled. - if self.mode.string_normalization: - docstring = normalize_string_prefix(leaf.value) - # visit_default() does handle string normalization for us, but - # since this method acts differently depending on quote style (ex. - # see padding logic below), there's a possibility for unstable - # formatting as visit_default() is called *after*. To avoid a - # situation where this function formats a docstring differently on - # the second pass, normalize it early. - docstring = normalize_string_quotes(docstring) - else: - docstring = leaf.value - else: - # ... otherwise, we'll keep the buggy behaviour >.< + if self.mode.string_normalization: docstring = normalize_string_prefix(leaf.value) + # visit_default() does handle string normalization for us, but + # since this method acts differently depending on quote style (ex. + # see padding logic below), there's a possibility for unstable + # formatting as visit_default() is called *after*. To avoid a + # situation where this function formats a docstring differently on + # the second pass, normalize it early. + docstring = normalize_string_quotes(docstring) + else: + docstring = leaf.value prefix = get_string_prefix(docstring) docstring = docstring[len(prefix) :] # Remove the prefix quote_char = docstring[0] @@ -385,6 +433,7 @@ class LineGenerator(Visitor[Line]): else: docstring = docstring.strip() + has_trailing_backslash = False if docstring: # Add some padding if the docstring starts / ends with a quote mark. if docstring[0] == quote_char: @@ -397,6 +446,7 @@ class LineGenerator(Visitor[Line]): # Odd number of tailing backslashes, add some padding to # avoid escaping the closing string quote. docstring += " " + has_trailing_backslash = True elif not docstring_started_empty: docstring = " " @@ -404,7 +454,7 @@ class LineGenerator(Visitor[Line]): quote = quote_char * quote_len # It's invalid to put closing single-character quotes on a new line. - if Preview.long_docstring_quotes_on_newline in self.mode and quote_len == 3: + if self.mode and quote_len == 3: # We need to find the length of the last line of the docstring # to find if we can add the closing quotes to the line without # exceeding the maximum line length. @@ -419,6 +469,8 @@ class LineGenerator(Visitor[Line]): if ( len(lines) > 1 and last_line_length + quote_len > self.mode.line_length + and len(indent) + quote_len <= self.mode.line_length + and not has_trailing_backslash ): leaf.value = prefix + quote + docstring + "\n" + indent + quote else: @@ -443,14 +495,8 @@ class LineGenerator(Visitor[Line]): self.visit_try_stmt = partial( v, keywords={"try", "except", "else", "finally"}, parens=Ø ) - if self.mode.preview: - self.visit_except_clause = partial( - v, keywords={"except"}, parens={"except"} - ) - self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) - else: - self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) - self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) + self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"}) + self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) @@ -492,7 +538,7 @@ def transform_line( and not line.should_split_rhs and not line.magic_trailing_comma and ( - is_line_short_enough(line, line_length=mode.line_length, line_str=line_str) + is_line_short_enough(line, mode=mode, line_str=line_str) or line.contains_unsplittable_type_ignore() ) and not (line.inside_brackets and line.contains_standalone_comments()) @@ -507,7 +553,7 @@ def transform_line( else: def _rhs( - self: object, line: Line, features: Collection[Feature] + self: object, line: Line, features: Collection[Feature], mode: Mode ) -> Iterator[Line]: """Wraps calls to `right_hand_split`. @@ -516,14 +562,12 @@ def transform_line( bracket pair instead. """ for omit in generate_trailers_to_omit(line, mode.line_length): - lines = list( - right_hand_split(line, mode.line_length, features, omit=omit) - ) + lines = list(right_hand_split(line, mode, features, omit=omit)) # Note: this check is only able to figure out if the first line of the # *current* transformation fits in the line length. This is true only # for simple cases. All others require running more transforms via # `transform_line()`. This check doesn't know if those would succeed. - if is_line_short_enough(lines[0], line_length=mode.line_length): + if is_line_short_enough(lines[0], mode=mode): yield from lines return @@ -531,9 +575,7 @@ def transform_line( # This mostly happens to multiline strings that are by definition # reported as not fitting a single line, as well as lines that contain # trailing commas (those have to be exploded). - yield from right_hand_split( - line, line_length=mode.line_length, features=features - ) + yield from right_hand_split(line, mode, features=features) # HACK: nested functions (like _rhs) compiled by mypyc don't retain their # __name__ attribute which is needed in `run_transformer` further down. @@ -591,7 +633,9 @@ class _BracketSplitComponent(Enum): tail = auto() -def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator[Line]: +def left_hand_split( + line: Line, _features: Collection[Feature], mode: Mode +) -> Iterator[Line]: """Split line into many lines, starting with the first matching bracket pair. Note: this usually looks weird, only use this for function definitions. @@ -636,20 +680,9 @@ def left_hand_split(line: Line, _features: Collection[Feature] = ()) -> Iterator yield result -@dataclass -class _RHSResult: - """Intermediate split result from a right hand split.""" - - head: Line - body: Line - tail: Line - opening_bracket: Leaf - closing_bracket: Leaf - - def right_hand_split( line: Line, - line_length: int, + mode: Mode, features: Collection[Feature] = (), omit: Collection[LeafID] = (), ) -> Iterator[Line]: @@ -663,14 +696,14 @@ def right_hand_split( """ rhs_result = _first_right_hand_split(line, omit=omit) yield from _maybe_split_omitting_optional_parens( - rhs_result, line, line_length, features=features, omit=omit + rhs_result, line, mode, features=features, omit=omit ) def _first_right_hand_split( line: Line, omit: Collection[LeafID] = (), -) -> _RHSResult: +) -> RHSResult: """Split the line into head, body, tail starting with the last bracket pair. Note: this function should not have side effects. It's relied upon by @@ -712,13 +745,13 @@ def _first_right_hand_split( tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail ) bracket_split_succeeded_or_raise(head, body, tail) - return _RHSResult(head, body, tail, opening_bracket, closing_bracket) + return RHSResult(head, body, tail, opening_bracket, closing_bracket) def _maybe_split_omitting_optional_parens( - rhs: _RHSResult, + rhs: RHSResult, line: Line, - line_length: int, + mode: Mode, features: Collection[Feature] = (), omit: Collection[LeafID] = (), ) -> Iterator[Line]: @@ -736,38 +769,39 @@ def _maybe_split_omitting_optional_parens( # there are no standalone comments in the body and not rhs.body.contains_standalone_comments(0) # and we can actually remove the parens - and can_omit_invisible_parens(rhs.body, line_length) + and can_omit_invisible_parens(rhs, mode.line_length) ): omit = {id(rhs.closing_bracket), *omit} try: - # The _RHSResult Omitting Optional Parens. + # The RHSResult Omitting Optional Parens. rhs_oop = _first_right_hand_split(line, omit=omit) if not ( Preview.prefer_splitting_right_hand_side_of_assignments in line.mode # the split is right after `=` and len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL - # the left side of assignement contains brackets + # the left side of assignment contains brackets and any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]) # the left side of assignment is short enough (the -1 is for the ending # optional paren) - and is_line_short_enough(rhs.head, line_length=line_length - 1) + and is_line_short_enough( + rhs.head, mode=replace(mode, line_length=mode.line_length - 1) + ) # the left side of assignment won't explode further because of magic # trailing comma and rhs.head.magic_trailing_comma is None # the split by omitting optional parens isn't preferred by some other # reason - and not _prefer_split_rhs_oop(rhs_oop, line_length=line_length) + and not _prefer_split_rhs_oop(rhs_oop, mode) ): yield from _maybe_split_omitting_optional_parens( - rhs_oop, line, line_length, features=features, omit=omit + rhs_oop, line, mode, features=features, omit=omit ) return except CannotSplit as e: if not ( - can_be_split(rhs.body) - or is_line_short_enough(rhs.body, line_length=line_length) + can_be_split(rhs.body) or is_line_short_enough(rhs.body, mode=mode) ): raise CannotSplit( "Splitting failed, body is still too long and can't be split." @@ -791,7 +825,7 @@ def _maybe_split_omitting_optional_parens( yield result -def _prefer_split_rhs_oop(rhs_oop: _RHSResult, line_length: int) -> bool: +def _prefer_split_rhs_oop(rhs_oop: RHSResult, mode: Mode) -> bool: """ Returns whether we should prefer the result from a split omitting optional parens. """ @@ -811,7 +845,7 @@ def _prefer_split_rhs_oop(rhs_oop: _RHSResult, line_length: int) -> bool: # the first line still contains the `=`) any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves) # the first line is short enough - and is_line_short_enough(rhs_oop.head, line_length=line_length) + and is_line_short_enough(rhs_oop.head, mode=mode) ) # contains unsplittable type ignore or rhs_oop.head.contains_unsplittable_type_ignore() @@ -889,6 +923,13 @@ def bracket_split_build_line( ) if isinstance(node, Node) and isinstance(node.prev_sibling, Leaf) ) + # Except the false negatives above for PEP 604 unions where we + # can't add the comma. + and not ( + leaves[0].parent + and leaves[0].parent.next_sibling + and leaves[0].parent.next_sibling.type == token.VBAR + ) ) if original.is_import or no_commas: @@ -902,10 +943,7 @@ def bracket_split_build_line( break leaves_to_track: Set[LeafID] = set() - if ( - Preview.handle_trailing_commas_in_head in original.mode - and component is _BracketSplitComponent.head - ): + if component is _BracketSplitComponent.head: leaves_to_track = get_leaves_inside_matching_brackets(leaves) # Populate the line for leaf in leaves: @@ -930,16 +968,39 @@ def dont_increase_indentation(split_func: Transformer) -> Transformer: """ @wraps(split_func) - def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - for split_line in split_func(line, features): + def split_wrapper( + line: Line, features: Collection[Feature], mode: Mode + ) -> Iterator[Line]: + for split_line in split_func(line, features, mode): normalize_prefix(split_line.leaves[0], inside_brackets=True) yield split_line return split_wrapper +def _get_last_non_comment_leaf(line: Line) -> Optional[int]: + for leaf_idx in range(len(line.leaves) - 1, 0, -1): + if line.leaves[leaf_idx].type != STANDALONE_COMMENT: + return leaf_idx + return None + + +def _safe_add_trailing_comma(safe: bool, delimiter_priority: int, line: Line) -> Line: + if ( + safe + and delimiter_priority == COMMA_PRIORITY + and line.leaves[-1].type != token.COMMA + and line.leaves[-1].type != STANDALONE_COMMENT + ): + new_comma = Leaf(token.COMMA, ",") + line.append(new_comma) + return line + + @dont_increase_indentation -def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: +def delimiter_split( + line: Line, features: Collection[Feature], mode: Mode +) -> Iterator[Line]: """Split according to delimiters of the highest priority. If the appropriate Features are given, the split will add trailing commas @@ -979,7 +1040,8 @@ def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[ ) current_line.append(leaf) - for leaf in line.leaves: + last_non_comment_leaf = _get_last_non_comment_leaf(line) + for leaf_idx, leaf in enumerate(line.leaves): yield from append_to_line(leaf) for comment_after in line.comments_after(leaf): @@ -996,6 +1058,15 @@ def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[ trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features ) + if ( + Preview.add_trailing_comma_consistently in mode + and last_leaf.type == STANDALONE_COMMENT + and leaf_idx == last_non_comment_leaf + ): + current_line = _safe_add_trailing_comma( + trailing_comma_safe, delimiter_priority, current_line + ) + leaf_priority = bt.delimiters.get(id(leaf)) if leaf_priority == delimiter_priority: yield current_line @@ -1004,20 +1075,15 @@ def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[ mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets ) if current_line: - if ( - trailing_comma_safe - and delimiter_priority == COMMA_PRIORITY - and current_line.leaves[-1].type != token.COMMA - and current_line.leaves[-1].type != STANDALONE_COMMENT - ): - new_comma = Leaf(token.COMMA, ",") - current_line.append(new_comma) + current_line = _safe_add_trailing_comma( + trailing_comma_safe, delimiter_priority, current_line + ) yield current_line @dont_increase_indentation def standalone_comment_split( - line: Line, features: Collection[Feature] = () + line: Line, features: Collection[Feature], mode: Mode ) -> Iterator[Line]: """Split standalone comments from the rest of the line.""" if not line.contains_standalone_comments(0): @@ -1069,7 +1135,7 @@ def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: def normalize_invisible_parens( - node: Node, parens_after: Set[str], *, preview: bool + node: Node, parens_after: Set[str], *, mode: Mode, features: Collection[Feature] ) -> None: """Make existing optional parentheses invisible or create new ones. @@ -1079,17 +1145,24 @@ def normalize_invisible_parens( Standardizes on visible parentheses for single-element tuples, and keeps existing visible parentheses for other tuples and generator expressions. """ - for pc in list_comments(node.prefix, is_endmarker=False, preview=preview): + for pc in list_comments(node.prefix, is_endmarker=False): if pc.value in FMT_OFF: # This `node` has a prefix with `# fmt: off`, don't mess with parens. return + + # The multiple context managers grammar has a different pattern, thus this is + # separate from the for-loop below. This possibly wraps them in invisible parens, + # and later will be removed in remove_with_parens when needed. + if node.type == syms.with_stmt: + _maybe_wrap_cms_in_parens(node, mode, features) + check_lpar = False for index, child in enumerate(list(node.children)): # Fixes a bug where invisible parens are not properly stripped from # assignment statements that contain type annotations. if isinstance(child, Node) and child.type == syms.annassign: normalize_invisible_parens( - child, parens_after=parens_after, preview=preview + child, parens_after=parens_after, mode=mode, features=features ) # Add parentheses around long tuple unpacking in assignments. @@ -1102,8 +1175,7 @@ def normalize_invisible_parens( if check_lpar: if ( - preview - and child.type == syms.atom + child.type == syms.atom and node.type == syms.for_stmt and isinstance(child.prev_sibling, Leaf) and child.prev_sibling.type == token.NAME @@ -1115,7 +1187,7 @@ def normalize_invisible_parens( remove_brackets_around_comma=True, ): wrap_in_parentheses(node, child, visible=False) - elif preview and isinstance(child, Node) and node.type == syms.with_stmt: + elif isinstance(child, Node) and node.type == syms.with_stmt: remove_with_parens(child, node) elif child.type == syms.atom: if maybe_make_parens_invisible_in_atom( @@ -1126,17 +1198,7 @@ def normalize_invisible_parens( elif is_one_tuple(child): wrap_in_parentheses(node, child, visible=True) elif node.type == syms.import_from: - # "import from" nodes store parentheses directly as part of - # the statement - if is_lpar_token(child): - assert is_rpar_token(node.children[-1]) - # make parentheses invisible - child.value = "" - node.children[-1].value = "" - elif child.type != token.STAR: - # insert invisible parentheses - node.insert_child(index, Leaf(token.LPAR, "")) - node.append_child(Leaf(token.RPAR, "")) + _normalize_import_from(node, child, index) break elif ( index == 1 @@ -1151,13 +1213,27 @@ def normalize_invisible_parens( elif not (isinstance(child, Leaf) and is_multiline_string(child)): wrap_in_parentheses(node, child, visible=False) - comma_check = child.type == token.COMMA if preview else False + comma_check = child.type == token.COMMA check_lpar = isinstance(child, Leaf) and ( child.value in parens_after or comma_check ) +def _normalize_import_from(parent: Node, child: LN, index: int) -> None: + # "import from" nodes store parentheses directly as part of + # the statement + if is_lpar_token(child): + assert is_rpar_token(parent.children[-1]) + # make parentheses invisible + child.value = "" + parent.children[-1].value = "" + elif child.type != token.STAR: + # insert invisible parentheses + parent.insert_child(index, Leaf(token.LPAR, "")) + parent.append_child(Leaf(token.RPAR, "")) + + def remove_await_parens(node: Node) -> None: if node.children[0].type == token.AWAIT and len(node.children) > 1: if ( @@ -1180,18 +1256,62 @@ def remove_await_parens(node: Node) -> None: # N.B. We've still removed any redundant nested brackets though :) opening_bracket = cast(Leaf, node.children[1].children[0]) closing_bracket = cast(Leaf, node.children[1].children[-1]) - bracket_contents = cast(Node, node.children[1].children[1]) - if bracket_contents.type != syms.power: - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - elif ( - bracket_contents.type == syms.power - and bracket_contents.children[0].type == token.AWAIT - ): - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - # If we are in a nested await then recurse down. - remove_await_parens(bracket_contents) + bracket_contents = node.children[1].children[1] + if isinstance(bracket_contents, Node): + if bracket_contents.type != syms.power: + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + elif ( + bracket_contents.type == syms.power + and bracket_contents.children[0].type == token.AWAIT + ): + ensure_visible(opening_bracket) + ensure_visible(closing_bracket) + # If we are in a nested await then recurse down. + remove_await_parens(bracket_contents) + + +def _maybe_wrap_cms_in_parens( + node: Node, mode: Mode, features: Collection[Feature] +) -> None: + """When enabled and safe, wrap the multiple context managers in invisible parens. + + It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS. + """ + if ( + Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features + or Preview.wrap_multiple_context_managers_in_parens not in mode + or len(node.children) <= 2 + # If it's an atom, it's already wrapped in parens. + or node.children[1].type == syms.atom + ): + return + colon_index: Optional[int] = None + for i in range(2, len(node.children)): + if node.children[i].type == token.COLON: + colon_index = i + break + if colon_index is not None: + lpar = Leaf(token.LPAR, "") + rpar = Leaf(token.RPAR, "") + context_managers = node.children[1:colon_index] + for child in context_managers: + child.remove() + # After wrapping, the with_stmt will look like this: + # with_stmt + # NAME 'with' + # atom + # LPAR '' + # testlist_gexp + # ... <-- context_managers + # /testlist_gexp + # RPAR '' + # /atom + # COLON ':' + new_child = Node( + syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar] + ) + node.insert_child(1, new_child) def remove_with_parens(node: Node, parent: Node) -> None: @@ -1259,6 +1379,7 @@ def maybe_make_parens_invisible_in_atom( not remove_brackets_around_comma and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY ) + or is_tuple_containing_walrus(node) ): return False @@ -1270,9 +1391,11 @@ def maybe_make_parens_invisible_in_atom( syms.return_stmt, syms.except_clause, syms.funcdef, + syms.with_stmt, # these ones aren't useful to end users, but they do please fuzzers syms.for_stmt, syms.del_stmt, + syms.for_stmt, ]: return False @@ -1281,8 +1404,13 @@ def maybe_make_parens_invisible_in_atom( if is_lpar_token(first) and is_rpar_token(last): middle = node.children[1] # make parentheses invisible - first.value = "" - last.value = "" + if ( + # If the prefix of `middle` includes a type comment with + # ignore annotation, then we do not remove the parentheses + not is_type_ignore_comment_string(middle.prefix.strip()) + ): + first.value = "" + last.value = "" maybe_make_parens_invisible_in_atom( middle, parent=parent, @@ -1413,7 +1541,7 @@ def run_transformer( if not line_str: line_str = line_to_string(line) result: List[Line] = [] - for transformed_line in transform(line, features): + for transformed_line in transform(line, features, mode): if str(transformed_line).strip("\n") == line_str: raise CannotTransform("Line transformer returned an unchanged result") @@ -1428,7 +1556,7 @@ def run_transformer( or line.contains_multiline_strings() or result[0].contains_uncollapsable_type_comments() or result[0].contains_unsplittable_type_ignore() - or is_line_short_enough(result[0], line_length=mode.line_length) + or is_line_short_enough(result[0], mode=mode) # If any leaves have no parents (which _can_ occur since # `transform(line)` potentially destroys the line's underlying node # structure), then we can't proceed. Doing so would cause the below @@ -1443,8 +1571,6 @@ def run_transformer( second_opinion = run_transformer( line_copy, transform, mode, features_fop, line_str=line_str ) - if all( - is_line_short_enough(ln, line_length=mode.line_length) for ln in second_opinion - ): + if all(is_line_short_enough(ln, mode=mode) for ln in second_opinion): result = second_opinion return result