#!/usr/bin/env python3
+
import asyncio
from asyncio.base_events import BaseEventLoop
from concurrent.futures import Executor, ProcessPoolExecutor
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.parse import ParseError
-__version__ = "18.3a2"
+__version__ = "18.3a3"
DEFAULT_LINE_LENGTH = 88
# types
syms = pygram.python_symbols
class CannotSplit(Exception):
"""A readable split that fits the allotted line length is impossible.
- Raised by `left_hand_split()` and `right_hand_split()`.
+ Raised by `left_hand_split()`, `right_hand_split()`, and `delimiter_split()`.
"""
is_flag=True,
help=(
"Don't write back the files, just return the status. Return code 0 "
- "means nothing changed. Return code 1 means some files were "
+ "means nothing would change. Return code 1 means some files would be "
"reformatted. Return code 123 means there was an internal error."
),
)
ctx.exit(0)
elif len(sources) == 1:
p = sources[0]
- report = Report()
+ report = Report(check=check)
try:
if not p.is_file() and str(p) == '-':
changed = format_stdin_to_stdout(
"""Reformats a string and returns new contents."""
src_node = lib2to3_parse(src_contents)
dst_contents = ""
- comments: List[Line] = []
lines = LineGenerator()
elt = EmptyLineTracker()
py36 = is_python36(src_node)
before, after = elt.maybe_empty_lines(current_line)
for _ in range(before):
dst_contents += str(empty_line)
- if not current_line.is_comment:
- for comment in comments:
- dst_contents += str(comment)
- comments = []
- for line in split_line(current_line, line_length=line_length, py36=py36):
- dst_contents += str(line)
- else:
- comments.append(current_line)
- if comments:
- if elt.previous_defs:
- # Separate postscriptum comments from the last module-level def.
- dst_contents += str(empty_line)
- dst_contents += str(empty_line)
- for comment in comments:
- dst_contents += str(comment)
+ for line in split_line(current_line, line_length=line_length, py36=py36):
+ dst_contents += str(line)
return dst_contents
+GRAMMARS = [
+ pygram.python_grammar_no_print_statement_no_exec_statement,
+ pygram.python_grammar_no_print_statement,
+ pygram.python_grammar_no_exec_statement,
+ pygram.python_grammar,
+]
+
+
def lib2to3_parse(src_txt: str) -> Node:
"""Given a string with source, return the lib2to3 Node."""
grammar = pygram.python_grammar_no_print_statement
- drv = driver.Driver(grammar, pytree.convert)
if src_txt[-1] != '\n':
nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n'
src_txt += nl
- try:
- result = drv.parse_string(src_txt, True)
- except ParseError as pe:
- lineno, column = pe.context[1]
- lines = src_txt.splitlines()
+ for grammar in GRAMMARS:
+ drv = driver.Driver(grammar, pytree.convert)
try:
- faulty_line = lines[lineno - 1]
- except IndexError:
- faulty_line = "<line number missing in source>"
- raise ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}") from None
+ result = drv.parse_string(src_txt, True)
+ break
+
+ except ParseError as pe:
+ lineno, column = pe.context[1]
+ lines = src_txt.splitlines()
+ try:
+ faulty_line = lines[lineno - 1]
+ except IndexError:
+ faulty_line = "<line number missing in source>"
+ exc = ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}")
+ else:
+ raise exc from None
if isinstance(result, Leaf):
result = Node(syms.file_input, [result])
token.AMPER,
token.PERCENT,
token.CIRCUMFLEX,
+ token.TILDE,
token.LEFTSHIFT,
token.RIGHTSHIFT,
token.DOUBLESTAR,
"""Returns True if there is an yet unmatched open bracket on the line."""
return bool(self.bracket_match)
- def max_priority(self, exclude: Iterable[LeafID] =()) -> int:
+ def max_priority(self, exclude: Iterable[LeafID] = ()) -> int:
"""Returns the highest priority of a delimiter found on the line.
Values are consistent with what `is_delimiter()` returns.
):
return False
- if closing.type == token.RSQB or closing.type == token.RBRACE:
+ if closing.type == token.RBRACE:
self.leaves.pop()
return True
+ if closing.type == token.RSQB:
+ comma = self.leaves[-1]
+ if comma.parent and comma.parent.type == syms.listmaker:
+ self.leaves.pop()
+ return True
+
# For parens let's check if it's safe to remove the comma. If the
# trailing one is the only one, we might mistakenly change a tuple
# into a different type by removing the comma.
(two on module-level), as well as providing an extra empty line after flow
control keywords to make them more prominent.
"""
- if current_line.is_comment:
- # Don't count standalone comments towards previous empty lines.
- return 0, 0
-
before, after = self._maybe_empty_lines(current_line)
before -= self.previous_after
self.previous_after = after
return before, after
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
+ max_allowed = 1
+ if current_line.is_comment and current_line.depth == 0:
+ max_allowed = 2
if current_line.leaves:
# Consume the first leaf's extra newlines.
first_leaf = current_line.leaves[0]
- before = int('\n' in first_leaf.prefix)
+ before = first_leaf.prefix.count('\n')
+ before = min(before, max(before, max_allowed))
first_leaf.prefix = ''
else:
before = 0
in ways that will no longer stringify to valid Python code on the tree.
"""
current_line: Line = Factory(Line)
- standalone_comments: List[Leaf] = Factory(list)
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
yield from self.line()
else:
- # regular standalone comment, to be processed later (see
- # docstring in `generate_comments()`
- self.standalone_comments.append(comment)
- normalize_prefix(node, inside_brackets=any_open_brackets)
- if node.type not in WHITESPACE:
- for comment in self.standalone_comments:
+ # regular standalone comment
yield from self.line()
self.current_line.append(comment)
yield from self.line()
- self.standalone_comments = []
+ normalize_prefix(node, inside_brackets=any_open_brackets)
+ if node.type not in WHITESPACE:
self.current_line.append(node)
yield from super().visit_default(node)
- def visit_suite(self, node: Node) -> Iterator[Line]:
- """Body of a statement after a colon."""
- children = iter(node.children)
- # Process newline before indenting. It might contain an inline
- # comment that should go right after the colon.
- newline = next(children)
- yield from self.visit(newline)
+ def visit_INDENT(self, node: Node) -> Iterator[Line]:
yield from self.line(+1)
+ yield from self.visit_default(node)
- for child in children:
- yield from self.visit(child)
-
+ def visit_DEDENT(self, node: Node) -> Iterator[Line]:
yield from self.line(-1)
def visit_stmt(self, node: Node, keywords: Set[str]) -> Iterator[Line]:
return SPACE if prevp.type == token.COMMA else NO
if prevp.type == token.EQUAL:
- if prevp.parent and prevp.parent.type in {
- syms.typedargslist,
- syms.varargslist,
- syms.parameters,
- syms.arglist,
- syms.argument,
- }:
- return NO
+ if prevp.parent:
+ if prevp.parent.type in {
+ syms.arglist, syms.argument, syms.parameters, syms.varargslist
+ }:
+ return NO
+
+ elif prevp.parent.type == syms.typedargslist:
+ # A bit hacky: if the equal sign has whitespace, it means we
+ # previously found it's a typed argument. So, we're using
+ # that, too.
+ return prevp.prefix
elif prevp.type == token.DOUBLESTAR:
if prevp.parent and prevp.parent.type in {
- syms.typedargslist,
- syms.varargslist,
- syms.parameters,
syms.arglist,
+ syms.argument,
syms.dictsetmaker,
+ syms.parameters,
+ syms.typedargslist,
+ syms.varargslist,
}:
return NO
if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
return NO
- elif prevp.parent and prevp.parent.type in {syms.factor, syms.star_expr}:
+ elif (
+ prevp.parent
+ and prevp.parent.type in {syms.factor, syms.star_expr}
+ and prevp.type in MATH_OPERATORS
+ ):
+ return NO
+
+ elif (
+ prevp.type == token.RIGHTSHIFT
+ and prevp.parent
+ and prevp.parent.type == syms.shift_expr
+ and prevp.prev_sibling
+ and prevp.prev_sibling.type == token.NAME
+ and prevp.prev_sibling.value == 'print' # type: ignore
+ ):
+ # Python 2 print chevron
return NO
elif prev.type in OPENING_BRACKETS:
if not prev or prev.type != token.COMMA:
return NO
- if p.type == syms.varargslist:
+ elif p.type == syms.varargslist:
# lambdas
if t == token.RPAR:
return NO
Inline comments are emitted as regular token.COMMENT leaves. Standalone
are emitted with a fake STANDALONE_COMMENT token identifier.
"""
- if not leaf.prefix:
+ p = leaf.prefix
+ if not p:
return
- if '#' not in leaf.prefix:
+ if '#' not in p:
return
- before_comment, content = leaf.prefix.split('#', 1)
- content = content.rstrip()
- if content and (content[0] not in {' ', '!', '#'}):
- content = ' ' + content
- is_standalone_comment = (
- '\n' in before_comment or '\n' in content or leaf.type == token.ENDMARKER
- )
- if not is_standalone_comment:
- # simple trailing comment
- yield Leaf(token.COMMENT, value='#' + content)
- return
-
- for line in ('#' + content).split('\n'):
+ nlines = 0
+ for index, line in enumerate(p.split('\n')):
line = line.lstrip()
+ if not line:
+ nlines += 1
if not line.startswith('#'):
continue
- yield Leaf(STANDALONE_COMMENT, line)
+ if index == 0 and leaf.type != token.ENDMARKER:
+ comment_type = token.COMMENT # simple trailing comment
+ else:
+ comment_type = STANDALONE_COMMENT
+ yield Leaf(comment_type, make_comment(line), prefix='\n' * nlines)
+
+ nlines = 0
+
+
+def make_comment(content: str) -> str:
+ content = content.rstrip()
+ if not content:
+ return '#'
+
+ if content[0] == '#':
+ content = content[1:]
+ if content and content[0] not in ' !:#':
+ content = ' ' + content
+ return '#' + content
def split_line(
you'll lose your voting rights.
"""
if not inside_brackets:
- spl = leaf.prefix.split('#', 1)
+ spl = leaf.prefix.split('#')
if '\\' not in spl[0]:
- nl_count = spl[0].count('\n')
+ nl_count = spl[-1].count('\n')
+ if len(spl) > 1:
+ nl_count -= 1
leaf.prefix = '\n' * nl_count
return
@dataclass
class Report:
"""Provides a reformatting counter."""
+ check: bool = False
change_count: int = 0
same_count: int = 0
failure_count: int = 0
def done(self, src: Path, changed: bool) -> None:
"""Increment the counter for successful reformatting. Write out a message."""
if changed:
- out(f'reformatted {src}')
+ reformatted = 'would reformat' if self.check else 'reformatted'
+ out(f'{reformatted} {src}')
self.change_count += 1
else:
out(f'{src} already well formatted, good job.', bold=False)
if self.failure_count:
return 123
- elif self.change_count:
+ elif self.change_count and self.check:
return 1
return 0
Use `click.unstyle` to remove colors.
"""
+ if self.check:
+ reformatted = "would be reformatted"
+ unchanged = "would be left unchanged"
+ failed = "would fail to reformat"
+ else:
+ reformatted = "reformatted"
+ unchanged = "left unchanged"
+ failed = "failed to reformat"
report = []
if self.change_count:
s = 's' if self.change_count > 1 else ''
report.append(
- click.style(f'{self.change_count} file{s} reformatted', bold=True)
+ click.style(f'{self.change_count} file{s} {reformatted}', bold=True)
)
if self.same_count:
s = 's' if self.same_count > 1 else ''
- report.append(f'{self.same_count} file{s} left unchanged')
+ report.append(f'{self.same_count} file{s} {unchanged}')
if self.failure_count:
s = 's' if self.failure_count > 1 else ''
report.append(
- click.style(
- f'{self.failure_count} file{s} failed to reformat', fg='red'
- )
+ click.style(f'{self.failure_count} file{s} {failed}', fg='red')
)
return ', '.join(report) + '.'
try:
src_ast = ast.parse(src)
except Exception as exc:
- raise AssertionError(f"cannot parse source: {exc}") from None
+ major, minor = sys.version_info[:2]
+ raise AssertionError(
+ f"cannot use --safe with this file; failed to parse source file "
+ f"with Python {major}.{minor}'s builtin AST. Re-run with --fast "
+ f"or stop using deprecated Python 2 syntax. AST error message: {exc}"
+ )
try:
dst_ast = ast.parse(dst)