@click.argument(
'src',
nargs=-1,
- type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True),
+ type=click.Path(
+ exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
+ ),
)
@click.pass_context
def main(
elif p.is_file():
# if a file was explicitly given, we don't care about its extension
sources.append(p)
+ elif s == '-':
+ sources.append(Path('-'))
else:
err(f'invalid path: {s}')
if len(sources) == 0:
p = sources[0]
report = Report()
try:
- changed = format_file_in_place(
- p, line_length=line_length, fast=fast, write_back=not check
- )
+ if not p.is_file() and str(p) == '-':
+ changed = format_stdin_to_stdout(
+ line_length=line_length, fast=fast, write_back=not check
+ )
+ else:
+ changed = format_file_in_place(
+ p, line_length=line_length, fast=fast, write_back=not check
+ )
report.done(p, changed)
except Exception as exc:
report.failed(p, str(exc))
src: Path, line_length: int, fast: bool, write_back: bool = False
) -> bool:
"""Format the file and rewrite if changed. Return True if changed."""
+ with tokenize.open(src) as src_buffer:
+ src_contents = src_buffer.read()
try:
- contents, encoding = format_file(src, line_length=line_length, fast=fast)
+ contents = format_file_contents(
+ src_contents, line_length=line_length, fast=fast
+ )
except NothingChanged:
return False
if write_back:
- with open(src, "w", encoding=encoding) as f:
+ with open(src, "w", encoding=src_buffer.encoding) as f:
f.write(contents)
return True
-def format_file(
- src: Path, line_length: int, fast: bool
-) -> Tuple[FileContent, Encoding]:
+def format_stdin_to_stdout(
+ line_length: int, fast: bool, write_back: bool = False
+) -> bool:
+ """Format file on stdin and pipe output to stdout. Return True if changed."""
+ contents = sys.stdin.read()
+ try:
+ contents = format_file_contents(contents, line_length=line_length, fast=fast)
+ return True
+
+ except NothingChanged:
+ return False
+
+ finally:
+ if write_back:
+ sys.stdout.write(contents)
+
+
+def format_file_contents(
+ src_contents: str, line_length: int, fast: bool
+) -> FileContent:
"""Reformats a file and returns its contents and encoding."""
- with tokenize.open(src) as src_buffer:
- src_contents = src_buffer.read()
if src_contents.strip() == '':
- raise NothingChanged(src)
+ raise NothingChanged
dst_contents = format_str(src_contents, line_length=line_length)
if src_contents == dst_contents:
- raise NothingChanged(src)
+ raise NothingChanged
if not fast:
assert_equivalent(src_contents, dst_contents)
assert_stable(src_contents, dst_contents, line_length=line_length)
- return dst_contents, src_buffer.encoding
+ return dst_contents
def format_str(src_contents: str, line_length: int) -> FileContent:
dst_contents += str(line)
else:
comments.append(current_line)
- for comment in comments:
- dst_contents += str(comment)
+ if comments:
+ if elt.previous_defs:
+ # Separate postscriptum comments from the last module-level def.
+ dst_contents += str(empty_line)
+ dst_contents += str(empty_line)
+ for comment in comments:
+ dst_contents += str(comment)
return dst_contents
return (
(first_leaf.type == token.NAME and first_leaf.value == 'def')
or (
- first_leaf.type == token.NAME
- and first_leaf.value == 'async'
+ first_leaf.type == token.ASYNC
and second_leaf is not None
and second_leaf.type == token.NAME
and second_leaf.value == 'def'
"""Provides a stateful method that returns the number of potential extra
empty lines needed before and after the currently processed line.
- Note: this tracker works on lines that haven't been split yet.
+ Note: this tracker works on lines that haven't been split yet. It assumes
+ the prefix of the first leaf consists of optional newlines. Those newlines
+ are consumed by `maybe_empty_lines()` and included in the computation.
"""
previous_line: Optional[Line] = None
previous_after: int = 0
(two on module-level), as well as providing an extra empty line after flow
control keywords to make them more prominent.
"""
+ if current_line.is_comment:
+ # Don't count standalone comments towards previous empty lines.
+ return 0, 0
+
before, after = self._maybe_empty_lines(current_line)
+ before -= self.previous_after
self.previous_after = after
self.previous_line = current_line
return before, after
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
- before = 0
+ if current_line.leaves:
+ # Consume the first leaf's extra newlines.
+ first_leaf = current_line.leaves[0]
+ before = int('\n' in first_leaf.prefix)
+ first_leaf.prefix = ''
+ else:
+ before = 0
depth = current_line.depth
while self.previous_defs and self.previous_defs[-1] >= depth:
self.previous_defs.pop()
- before = (1 if depth else 2) - self.previous_after
+ before = 1 if depth else 2
is_decorator = current_line.is_decorator
if is_decorator or current_line.is_def or current_line.is_class:
if not is_decorator:
newlines = 2
if current_line.depth:
newlines -= 1
- newlines -= self.previous_after
return newlines, 0
if current_line.is_flow_control:
def visit_default(self, node: LN) -> Iterator[Line]:
if isinstance(node, Leaf):
+ any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node):
- if self.current_line.bracket_tracker.any_open_brackets():
+ if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular standalone comment, to be processed later (see
# docstring in `generate_comments()`
self.standalone_comments.append(comment)
- normalize_prefix(node)
+ normalize_prefix(node, inside_brackets=any_open_brackets)
if node.type not in WHITESPACE:
for comment in self.standalone_comments:
yield from self.line()
for child in children:
yield from self.visit(child)
- if child.type == token.NAME and child.value == 'async': # type: ignore
+ if child.type == token.ASYNC:
break
internal_stmt = next(children)
OPENING_BRACKETS = set(BRACKET.keys())
CLOSING_BRACKETS = set(BRACKET.values())
BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
-ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, token.COLON, STANDALONE_COMMENT}
+ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT}
def whitespace(leaf: Leaf) -> str: # noqa C901
return DOUBLESPACE
assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
+ if t == token.COLON and p.type not in {syms.subscript, syms.subscriptlist}:
+ return NO
+
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
+ if t == token.COLON:
+ return SPACE if prevp.type == token.COMMA else NO
+
if prevp.type == token.EQUAL:
if prevp.parent and prevp.parent.type in {
syms.typedargslist,
return NO
elif prevp.type == token.COLON:
- if prevp.parent and prevp.parent.type == syms.subscript:
+ if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
return NO
elif prevp.parent and prevp.parent.type in {syms.factor, syms.star_expr}:
return NO
- elif prev.type == token.COLON:
+ else:
return NO
elif p.type == syms.atom:
if content and (content[0] not in {' ', '!', '#'}):
content = ' ' + content
is_standalone_comment = (
- '\n' in before_comment or '\n' in content or leaf.type == token.DEDENT
+ '\n' in before_comment or '\n' in content or leaf.type == token.ENDMARKER
)
if not is_standalone_comment:
# simple trailing comment
current_leaves = body_leaves
# Since body is a new indent level, remove spurious leading whitespace.
if body_leaves:
- normalize_prefix(body_leaves[0])
+ normalize_prefix(body_leaves[0], inside_brackets=True)
# Build the new lines.
for result, leaves in (
(head, head_leaves), (body, body_leaves), (tail, tail_leaves)
head_leaves.reverse()
# Since body is a new indent level, remove spurious leading whitespace.
if body_leaves:
- normalize_prefix(body_leaves[0])
+ normalize_prefix(body_leaves[0], inside_brackets=True)
# Build the new lines.
for result, leaves in (
(head, head_leaves), (body, body_leaves), (tail, tail_leaves)
trailing_comma_safe = trailing_comma_safe and py36
leaf_priority = delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
- normalize_prefix(current_line.leaves[0])
+ normalize_prefix(current_line.leaves[0], inside_brackets=True)
yield current_line
current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
and trailing_comma_safe
):
current_line.append(Leaf(token.COMMA, ','))
- normalize_prefix(current_line.leaves[0])
+ normalize_prefix(current_line.leaves[0], inside_brackets=True)
yield current_line
)
-def normalize_prefix(leaf: Leaf) -> None:
- """Leave existing extra newlines for imports. Remove everything else."""
- if is_import(leaf):
+def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None:
+ """Leave existing extra newlines if not `inside_brackets`.
+
+ Remove everything else. Note: don't use backslashes for formatting or
+ you'll lose your voting rights.
+ """
+ if not inside_brackets:
spl = leaf.prefix.split('#', 1)
- nl_count = spl[0].count('\n')
- if len(spl) > 1:
- # Skip one newline since it was for a standalone comment.
- nl_count -= 1
- leaf.prefix = '\n' * nl_count
- return
+ if '\\' not in spl[0]:
+ nl_count = spl[0].count('\n')
+ leaf.prefix = '\n' * nl_count
+ return
leaf.prefix = ''