<!-- Changes that affect Black's stable style -->
+- Introduce the 2023 stable style, which incorporates most aspects of last year's
+ preview style (#3418). Specific changes:
+ - Enforce empty lines before classes and functions with sticky leading comments
+ (#3302) (22.12.0)
+ - Reformat empty and whitespace-only files as either an empty file (if no newline is
+ present) or as a single newline character (if a newline is present) (#3348)
+ (22.12.0)
+ - Implicitly concatenated strings used as function args are now wrapped inside
+ parentheses (#3307) (22.12.0)
+ - Correctly handle trailing commas that are inside a line's leading non-nested parens
+ (#3370) (22.12.0)
+ - `--skip-string-normalization` / `-S` now prevents docstring prefixes from being
+ normalized as expected (#3168) (since 22.8.0)
+ - When using `--skip-magic-trailing-comma` or `-C`, trailing commas are stripped from
+ subscript expressions with more than 1 element (#3209) (22.8.0)
+ - Implicitly concatenated strings inside a list, set, or tuple are now wrapped inside
+ parentheses (#3162) (22.8.0)
+ - Fix a string merging/split issue when a comment is present in the middle of
+ implicitly concatenated strings on its own line (#3227) (22.8.0)
+ - Docstring quotes are no longer moved if it would violate the line length limit
+ (#3044, #3430) (22.6.0)
+ - Parentheses around return annotations are now managed (#2990) (22.6.0)
+ - Remove unnecessary parentheses around awaited objects (#2991) (22.6.0)
+ - Remove unnecessary parentheses in `with` statements (#2926) (22.6.0)
+ - Remove trailing newlines after code block open (#3035) (22.6.0)
+ - Code cell separators `#%%` are now standardised to `# %%` (#2919) (22.3.0)
+ - Remove unnecessary parentheses from `except` statements (#2939) (22.3.0)
+ - Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) (22.3.0)
+ - Avoid magic-trailing-comma in single-element subscripts (#2942) (22.3.0)
- Fix a crash when a colon line is marked between `# fmt: off` and `# fmt: on` (#3439)
### Preview style
_Black_ will allow single empty lines inside functions, and single and double empty
lines on module level left by the original editors, except when they're within
parenthesized expressions. Since such expressions are always reformatted to fit minimal
-space, this whitespace is lost.
+space, this whitespace is lost. The other exception is that it will remove any empty
+lines immediately following a statement that introduces a new indentation level.
+
+```python
+# in:
+
+def foo():
+
+ print("All the newlines above me should be deleted!")
+
+
+if condition:
+
+ print("No newline above me!")
+
+ print("There is a newline above me, and that's OK!")
+
+
+class Point:
+
+ x: int
+ y: int
+
+# out:
+
+def foo():
+ print("All the newlines above me should be deleted!")
+
+
+if condition:
+ print("No newline above me!")
+
+ print("There is a newline above me, and that's OK!")
+
+
+class Point:
+ x: int
+ y: int
+```
It will also insert proper spacing before and after function definitions. It's one line
before and after inner functions and two lines before and after module-level functions
limit. Line continuation backslashes are converted into parenthesized strings.
Unnecessary parentheses are stripped. The stability and status of this feature is
tracked in [this issue](https://github.com/psf/black/issues/2188).
-
-### Improved empty line management
-
-1. _Black_ will remove newlines in the beginning of new code blocks, i.e. when the
- indentation level is increased. For example:
-
- ```python
- def my_func():
-
- print("The line above me will be deleted!")
- ```
-
- will be changed to:
-
- ```python
- def my_func():
- print("The line above me will be deleted!")
- ```
-
- This new feature will be applied to **all code blocks**: `def`, `class`, `if`,
- `for`, `while`, `with`, `case` and `match`.
-
-2. _Black_ will enforce empty lines before classes and functions with leading comments.
- For example:
-
- ```python
- some_var = 1
- # Leading sticky comment
- def my_func():
- ...
- ```
-
- will be changed to:
-
- ```python
- some_var = 1
-
-
- # Leading sticky comment
- def my_func():
- ...
- ```
-
-### Improved parentheses management
-
-_Black_ will format parentheses around return annotations similarly to other sets of
-parentheses. For example:
-
-```python
-def foo() -> (int):
- ...
-
-def foo() -> looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong:
- ...
-```
-
-will be changed to:
-
-```python
-def foo() -> int:
- ...
-
-
-def foo() -> (
- looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong
-):
- ...
-```
-
-And, extra parentheses in `await` expressions and `with` statements are removed. For
-example:
-
-```python
-with ((open("bla.txt")) as f, open("x")):
- ...
-
-async def main():
- await (asyncio.sleep(1))
-```
-
-will be changed to:
-
-```python
-with open("bla.txt") as f, open("x"):
- ...
-
-
-async def main():
- await asyncio.sleep(1)
-```
valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
`mode` is passed to :func:`format_str`.
"""
- if not mode.preview and not src_contents.strip():
- raise NothingChanged
-
if mode.is_ipynb:
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
else:
Operate cell-by-cell, only on code cells, only for Python notebooks.
If the ``.ipynb`` originally had a trailing newline, it'll be preserved.
"""
- if mode.preview and not src_contents:
+ if not src_contents:
raise NothingChanged
trailing_newline = src_contents[-1] == "\n"
for feature in {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
if supports_feature(versions, feature)
}
- normalize_fmt_off(src_node, preview=mode.preview)
+ normalize_fmt_off(src_node)
lines = LineGenerator(mode=mode, features=context_manager_features)
elt = EmptyLineTracker(mode=mode)
split_line_features = {
dst_contents = []
for block in dst_blocks:
dst_contents.extend(block.all_lines())
- if mode.preview and not dst_contents:
+ if not dst_contents:
# Use decode_bytes to retrieve the correct source newline (CRLF or LF),
# and check if normalized_content has more than one line
normalized_content, _, newline = decode_bytes(src_contents.encode("utf-8"))
FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP}
FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"}
-COMMENT_EXCEPTIONS = {True: " !:#'", False: " !:#'%"}
+COMMENT_EXCEPTIONS = " !:#'"
@dataclass
consumed: int # how many characters of the original leaf's prefix did we consume
-def generate_comments(leaf: LN, *, preview: bool) -> Iterator[Leaf]:
+def generate_comments(leaf: LN) -> Iterator[Leaf]:
"""Clean the prefix of the `leaf` and generate comments from it, if any.
Comments in lib2to3 are shoved into the whitespace prefix. This happens
Inline comments are emitted as regular token.COMMENT leaves. Standalone
are emitted with a fake STANDALONE_COMMENT token identifier.
"""
- for pc in list_comments(
- leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, preview=preview
- ):
+ for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
@lru_cache(maxsize=4096)
-def list_comments(
- prefix: str, *, is_endmarker: bool, preview: bool
-) -> List[ProtoComment]:
+def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]:
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
result: List[ProtoComment] = []
if not prefix or "#" not in prefix:
comment_type = token.COMMENT # simple trailing comment
else:
comment_type = STANDALONE_COMMENT
- comment = make_comment(line, preview=preview)
+ comment = make_comment(line)
result.append(
ProtoComment(
type=comment_type, value=comment, newlines=nlines, consumed=consumed
return result
-def make_comment(content: str, *, preview: bool) -> str:
+def make_comment(content: str) -> str:
"""Return a consistently formatted comment from the given `content` string.
All comments (except for "##", "#!", "#:", '#'") should have a single
and not content.lstrip().startswith("type:")
):
content = " " + content[1:] # Replace NBSP by a simple space
- if content and content[0] not in COMMENT_EXCEPTIONS[preview]:
+ if content and content[0] not in COMMENT_EXCEPTIONS:
content = " " + content
return "#" + content
-def normalize_fmt_off(node: Node, *, preview: bool) -> None:
+def normalize_fmt_off(node: Node) -> None:
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
try_again = True
while try_again:
- try_again = convert_one_fmt_off_pair(node, preview=preview)
+ try_again = convert_one_fmt_off_pair(node)
-def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool:
+def convert_one_fmt_off_pair(node: Node) -> bool:
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
Returns True if a pair was converted.
"""
for leaf in node.leaves():
previous_consumed = 0
- for comment in list_comments(leaf.prefix, is_endmarker=False, preview=preview):
+ for comment in list_comments(leaf.prefix, is_endmarker=False):
if comment.value not in FMT_PASS:
previous_consumed = comment.consumed
continue
if comment.value in FMT_SKIP and prev.type in WHITESPACE:
continue
- ignored_nodes = list(generate_ignored_nodes(leaf, comment, preview=preview))
+ ignored_nodes = list(generate_ignored_nodes(leaf, comment))
if not ignored_nodes:
continue
return False
-def generate_ignored_nodes(
- leaf: Leaf, comment: ProtoComment, *, preview: bool
-) -> Iterator[LN]:
+def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]:
"""Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
If comment is skip, returns leaf only.
Stops at the end of the block.
"""
if comment.value in FMT_SKIP:
- yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, preview=preview)
+ yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment)
return
container: Optional[LN] = container_of(leaf)
while container is not None and container.type != token.ENDMARKER:
- if is_fmt_on(container, preview=preview):
+ if is_fmt_on(container):
return
# fix for fmt: on in children
- if children_contains_fmt_on(container, preview=preview):
+ if children_contains_fmt_on(container):
for index, child in enumerate(container.children):
- if isinstance(child, Leaf) and is_fmt_on(child, preview=preview):
+ if isinstance(child, Leaf) and is_fmt_on(child):
if child.type in CLOSING_BRACKETS:
# This means `# fmt: on` is placed at a different bracket level
# than `# fmt: off`. This is an invalid use, but as a courtesy,
if (
child.type == token.INDENT
and index < len(container.children) - 1
- and children_contains_fmt_on(
- container.children[index + 1], preview=preview
- )
+ and children_contains_fmt_on(container.children[index + 1])
):
# This means `# fmt: on` is placed right after an indentation
# level, and we shouldn't swallow the previous INDENT token.
return
- if children_contains_fmt_on(child, preview=preview):
+ if children_contains_fmt_on(child):
return
yield child
else:
def _generate_ignored_nodes_from_fmt_skip(
- leaf: Leaf, comment: ProtoComment, *, preview: bool
+ leaf: Leaf, comment: ProtoComment
) -> Iterator[LN]:
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
prev_sibling = leaf.prev_sibling
parent = leaf.parent
# Need to properly format the leaf prefix to compare it to comment.value,
# which is also formatted
- comments = list_comments(leaf.prefix, is_endmarker=False, preview=preview)
+ comments = list_comments(leaf.prefix, is_endmarker=False)
if not comments or comment.value != comments[0].value:
return
if prev_sibling is not None:
yield from iter(ignored_nodes)
-def is_fmt_on(container: LN, preview: bool) -> bool:
+def is_fmt_on(container: LN) -> bool:
"""Determine whether formatting is switched on within a container.
Determined by whether the last `# fmt:` comment is `on` or `off`.
"""
fmt_on = False
- for comment in list_comments(container.prefix, is_endmarker=False, preview=preview):
+ for comment in list_comments(container.prefix, is_endmarker=False):
if comment.value in FMT_ON:
fmt_on = True
elif comment.value in FMT_OFF:
return fmt_on
-def children_contains_fmt_on(container: LN, *, preview: bool) -> bool:
+def children_contains_fmt_on(container: LN) -> bool:
"""Determine if children have formatting switched on."""
for child in container.children:
leaf = first_leaf_of(child)
- if leaf is not None and is_fmt_on(leaf, preview=preview):
+ if leaf is not None and is_fmt_on(leaf):
return True
return False
"""Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
- for comment in generate_comments(node, preview=self.mode.preview):
+ for comment in generate_comments(node):
if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
def visit_funcdef(self, node: Node) -> Iterator[Line]:
"""Visit function definition."""
- if Preview.annotation_parens not in self.mode:
- yield from self.visit_stmt(node, keywords={"def"}, parens=set())
- else:
- yield from self.line()
+ yield from self.line()
- # Remove redundant brackets around return type annotation.
- is_return_annotation = False
- for child in node.children:
- if child.type == token.RARROW:
- is_return_annotation = True
- elif is_return_annotation:
- if child.type == syms.atom and child.children[0].type == token.LPAR:
- if maybe_make_parens_invisible_in_atom(
- child,
- parent=node,
- remove_brackets_around_comma=False,
- ):
- wrap_in_parentheses(node, child, visible=False)
- else:
+ # Remove redundant brackets around return type annotation.
+ is_return_annotation = False
+ for child in node.children:
+ if child.type == token.RARROW:
+ is_return_annotation = True
+ elif is_return_annotation:
+ if child.type == syms.atom and child.children[0].type == token.LPAR:
+ if maybe_make_parens_invisible_in_atom(
+ child,
+ parent=node,
+ remove_brackets_around_comma=False,
+ ):
wrap_in_parentheses(node, child, visible=False)
- is_return_annotation = False
+ else:
+ wrap_in_parentheses(node, child, visible=False)
+ is_return_annotation = False
- for child in node.children:
- yield from self.visit(child)
+ for child in node.children:
+ yield from self.visit(child)
def visit_match_case(self, node: Node) -> Iterator[Line]:
"""Visit either a match or case statement."""
):
wrap_in_parentheses(node, leaf)
- if Preview.remove_redundant_parens in self.mode:
- remove_await_parens(node)
+ remove_await_parens(node)
yield from self.visit_default(node)
if is_docstring(leaf) and "\\\n" not in leaf.value:
# We're ignoring docstrings with backslash newline escapes because changing
# indentation of those changes the AST representation of the code.
- if Preview.normalize_docstring_quotes_and_prefixes_properly in self.mode:
- # There was a bug where --skip-string-normalization wouldn't stop us
- # from normalizing docstring prefixes. To maintain stability, we can
- # only address this buggy behaviour while the preview style is enabled.
- if self.mode.string_normalization:
- docstring = normalize_string_prefix(leaf.value)
- # visit_default() does handle string normalization for us, but
- # since this method acts differently depending on quote style (ex.
- # see padding logic below), there's a possibility for unstable
- # formatting as visit_default() is called *after*. To avoid a
- # situation where this function formats a docstring differently on
- # the second pass, normalize it early.
- docstring = normalize_string_quotes(docstring)
- else:
- docstring = leaf.value
- else:
- # ... otherwise, we'll keep the buggy behaviour >.<
+ if self.mode.string_normalization:
docstring = normalize_string_prefix(leaf.value)
+ # visit_default() does handle string normalization for us, but
+ # since this method acts differently depending on quote style (ex.
+ # see padding logic below), there's a possibility for unstable
+ # formatting as visit_default() is called *after*. To avoid a
+ # situation where this function formats a docstring differently on
+ # the second pass, normalize it early.
+ docstring = normalize_string_quotes(docstring)
+ else:
+ docstring = leaf.value
prefix = get_string_prefix(docstring)
docstring = docstring[len(prefix) :] # Remove the prefix
quote_char = docstring[0]
quote = quote_char * quote_len
# It's invalid to put closing single-character quotes on a new line.
- if Preview.long_docstring_quotes_on_newline in self.mode and quote_len == 3:
+ if self.mode and quote_len == 3:
# We need to find the length of the last line of the docstring
# to find if we can add the closing quotes to the line without
# exceeding the maximum line length.
self.visit_try_stmt = partial(
v, keywords={"try", "except", "else", "finally"}, parens=Ø
)
- if self.mode.preview:
- self.visit_except_clause = partial(
- v, keywords={"except"}, parens={"except"}
- )
- self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
- else:
- self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø)
- self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø)
+ self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"})
+ self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
break
leaves_to_track: Set[LeafID] = set()
- if (
- Preview.handle_trailing_commas_in_head in original.mode
- and component is _BracketSplitComponent.head
- ):
+ if component is _BracketSplitComponent.head:
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
# Populate the line
for leaf in leaves:
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
- for pc in list_comments(node.prefix, is_endmarker=False, preview=mode.preview):
+ for pc in list_comments(node.prefix, is_endmarker=False):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
if check_lpar:
if (
- mode.preview
- and child.type == syms.atom
+ child.type == syms.atom
and node.type == syms.for_stmt
and isinstance(child.prev_sibling, Leaf)
and child.prev_sibling.type == token.NAME
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, child, visible=False)
- elif (
- mode.preview and isinstance(child, Node) and node.type == syms.with_stmt
- ):
+ elif isinstance(child, Node) and node.type == syms.with_stmt:
remove_with_parens(child, node)
elif child.type == syms.atom:
if maybe_make_parens_invisible_in_atom(
elif not (isinstance(child, Leaf) and is_multiline_string(child)):
wrap_in_parentheses(node, child, visible=False)
- comma_check = child.type == token.COMMA if mode.preview else False
+ comma_check = child.type == token.COMMA
check_lpar = isinstance(child, Leaf) and (
child.value in parens_after or comma_check
)
from black.brackets import DOT_PRIORITY, BracketTracker
-from black.mode import Mode, Preview
+from black.mode import Mode
from black.nodes import (
BRACKETS,
CLOSING_BRACKETS,
- it's not a single-element subscript
Additionally, if ensure_removable:
- it's not from square bracket indexing
- (specifically, single-element square bracket indexing with
- Preview.skip_magic_trailing_comma_in_subscript)
+ (specifically, single-element square bracket indexing)
"""
if not (
closing.type in CLOSING_BRACKETS
if closing.type == token.RSQB:
if (
- Preview.one_element_subscript in self.mode
- and closing.parent
+ closing.parent
and closing.parent.type == syms.trailer
and closing.opening_bracket
and is_one_sequence_between(
comma = self.leaves[-1]
if comma.parent is None:
return False
- if Preview.skip_magic_trailing_comma_in_subscript in self.mode:
- return (
- comma.parent.type != syms.subscriptlist
- or closing.opening_bracket is None
- or not is_one_sequence_between(
- closing.opening_bracket,
- closing,
- self.leaves,
- brackets=(token.LSQB, token.RSQB),
- )
+ return (
+ comma.parent.type != syms.subscriptlist
+ or closing.opening_bracket is None
+ or not is_one_sequence_between(
+ closing.opening_bracket,
+ closing,
+ self.leaves,
+ brackets=(token.LSQB, token.RSQB),
)
- return comma.parent.type == syms.listmaker
+ )
if self.is_import:
return True
):
return before, 1
- if (
- Preview.remove_block_trailing_newline in current_line.mode
- and self.previous_line
- and self.previous_line.opens_block
- ):
+ if self.previous_line and self.previous_line.opens_block:
return 0, 0
return before, 0
):
slc = self.semantic_leading_comment
if (
- Preview.empty_lines_before_class_or_def_with_leading_comments
- in current_line.mode
- and slc is not None
+ slc is not None
and slc.previous_block is not None
and not slc.previous_block.original_line.is_class
and not slc.previous_block.original_line.opens_block
"""Individual preview style features."""
hex_codes_in_unicode_sequences = auto()
- annotation_parens = auto()
- empty_lines_before_class_or_def_with_leading_comments = auto()
- handle_trailing_commas_in_head = auto()
- long_docstring_quotes_on_newline = auto()
- normalize_docstring_quotes_and_prefixes_properly = auto()
- one_element_subscript = auto()
prefer_splitting_right_hand_side_of_assignments = auto()
- remove_block_trailing_newline = auto()
- remove_redundant_parens = auto()
# NOTE: string_processing requires wrap_long_dict_values_in_parens
# for https://github.com/psf/black/issues/3117 to be fixed.
string_processing = auto()
-tuple[
- str, int, float, dict[str, int]
-]
+-tuple[str, int, float, dict[str, int],]
++tuple[str, int, float, dict[str, int]]
+tuple[str, int, float, dict[str, int]]
- tuple[str, int, float, dict[str, int],]
very_long_variable_name_filters: t.List[
t.Tuple[str, t.Union[str, t.List[t.Optional[str]]]],
]
--- /dev/null
+# Long string example
+def frobnicate() -> "ThisIsTrulyUnreasonablyExtremelyLongClassName | list[ThisIsTrulyUnreasonablyExtremelyLongClassName]":
+ pass
+
+# output
+
+# Long string example
+def frobnicate() -> (
+ "ThisIsTrulyUnreasonablyExtremelyLongClassName |"
+ " list[ThisIsTrulyUnreasonablyExtremelyLongClassName]"
+):
+ pass
CtxManager3() as example3,
):
...
+
+# output
+
+with CtxManager() as example:
+ ...
+
+with CtxManager1(), CtxManager2():
+ ...
+
+with CtxManager1() as example, CtxManager2():
+ ...
+
+with CtxManager1(), CtxManager2() as example:
+ ...
+
+with CtxManager1() as example1, CtxManager2() as example2:
+ ...
+
+with (
+ CtxManager1() as example1,
+ CtxManager2() as example2,
+ CtxManager3() as example3,
+):
+ ...
except:
try:
raise TypeError(int)
- except* (Exception):
+ except* Exception:
pass
1 / 0
except Exception as e:
with (x := await a, y := await b):
pass
- # Ideally we should remove one set of parentheses
with ((x := await a, y := await b)):
pass
with (x := await a, y := await b):
pass
- # Ideally we should remove one set of parentheses
- with ((x := await a, y := await b)):
+ with (x := await a, y := await b):
pass
with (x := await a), (y := await b):
add_compiler(compilers[(7.0, 32)])
# add_compiler(compilers[(7.1, 64)])
+
# Comment before function.
def inline_comments_in_brackets_ruin_everything():
if typedargslist:
# The percent-percent comments are Spyder IDE cells.
-#%%
+
+# %%
def func():
x = """
a really long string
)
-#%%
\ No newline at end of file
+# %%
\ No newline at end of file
# Preview.empty_lines_before_class_or_def_with_leading_comments.
# In the current style, the user will have to split those lines by hand.
some_instruction
+
+
# This comment should be split from `some_instruction` by two lines but isn't.
def g():
...
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
-
return NO
if prevp.type == token.EQUAL:
# Comment 2
+
# fmt: off
def func_no_args():
a; b; c
pass
-def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> Set[
- "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
-]:
+def xxxxxxxxxxxxxxxxxxxxxxxxxxxx() -> (
+ Set["xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"]
+):
json = {
"k": {
"k2": {
def some_method_with_a_really_long_name(
very_long_parameter_so_yeah: str, another_long_parameter: int
-) -> (
- another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not
-):
+) -> another_case_of_returning_a_deeply_nested_import_of_a_type_i_suppose_cause_why_not:
pass
def func() -> (
- (
- also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black(
- this_shouldn_t_get_a_trailing_comma_too
- )
+ also_super_long_type_annotation_that_may_cause_an_AST_related_crash_in_black(
+ this_shouldn_t_get_a_trailing_comma_too
)
):
pass
def foo() -> tuple[int, int, int,]:
return 2
-# Long string example
-def frobnicate() -> "ThisIsTrulyUnreasonablyExtremelyLongClassName | list[ThisIsTrulyUnreasonablyExtremelyLongClassName]":
- pass
-
# output
# Control
def double(a: int) -> int:
]
):
return 2
-
-
-# Long string example
-def frobnicate() -> (
- "ThisIsTrulyUnreasonablyExtremelyLongClassName |"
- " list[ThisIsTrulyUnreasonablyExtremelyLongClassName]"
-):
- pass
msg = (
"Expected diff isn't equal to the actual. If you made changes to"
" expression.py and this is an anticipated difference, overwrite"
- f" tests/data/expression_skip_magic_trailing_comma.diff with {dump}"
+ " tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff"
+ f" with {dump}"
)
self.assertEqual(expected, actual, msg)
@pytest.mark.filterwarnings("ignore:invalid escape sequence.*:DeprecationWarning")
@pytest.mark.parametrize("filename", all_data_cases("simple_cases"))
def test_simple_format(filename: str) -> None:
- check_file("simple_cases", filename, DEFAULT_MODE)
-
-
-@pytest.mark.parametrize("filename", all_data_cases("preview"))
-def test_preview_format(filename: str) -> None:
magic_trailing_comma = filename != "skip_magic_trailing_comma"
check_file(
- "preview",
- filename,
- black.Mode(preview=True, magic_trailing_comma=magic_trailing_comma),
+ "simple_cases", filename, black.Mode(magic_trailing_comma=magic_trailing_comma)
)
-@pytest.mark.parametrize("filename", all_data_cases("preview_39"))
-def test_preview_minimum_python_39_format(filename: str) -> None:
- source, expected = read_data("preview_39", filename)
- mode = black.Mode(preview=True)
- assert_format(source, expected, mode, minimum_version=(3, 9))
-
-
-@pytest.mark.parametrize("filename", all_data_cases("preview_310"))
-def test_preview_minimum_python_310_format(filename: str) -> None:
- source, expected = read_data("preview_310", filename)
- mode = black.Mode(preview=True)
- assert_format(source, expected, mode, minimum_version=(3, 10))
+@pytest.mark.parametrize("filename", all_data_cases("preview"))
+def test_preview_format(filename: str) -> None:
+ check_file("preview", filename, black.Mode(preview=True))
def test_preview_context_managers_targeting_py38() -> None: