From 9ce3c806e402abdc8a5383df0f0d1f82d930bd2e Mon Sep 17 00:00:00 2001 From: Richard Si <63936253+ichard26@users.noreply.github.com> Date: Mon, 14 Mar 2022 19:41:46 -0400 Subject: [PATCH 01/16] Bump mypy, flake8, and pre-commit-hooks in pre-commit (GH-2922) --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index af3c5c2..b96bc62 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,8 +31,8 @@ repos: files: '(CHANGES\.md|the_basics\.md)$' additional_dependencies: *version_check_dependencies - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.2 + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 hooks: - id: flake8 additional_dependencies: @@ -41,7 +41,7 @@ repos: - flake8-simplify - repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.910-1 + rev: v0.940 hooks: - id: mypy exclude: ^docs/conf.py @@ -60,7 +60,7 @@ repos: exclude: \.github/workflows/diff_shades\.yml - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v4.1.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace -- 2.39.5 From a57ab326b20b720518ab6f513bd0f8ba357d8d86 Mon Sep 17 00:00:00 2001 From: Richard Si <63936253+ichard26@users.noreply.github.com> Date: Tue, 15 Mar 2022 15:57:59 -0400 Subject: [PATCH 02/16] Farewell black-primer, it was nice knowing you (#2924) Enjoy your retirement at https://github.com/cooperlees/black-primer --- CHANGES.md | 2 + docs/contributing/gauging_changes.md | 6 - mypy.ini | 8 - setup.py | 4 +- src/black_primer/__init__.py | 0 src/black_primer/cli.py | 195 ------------ src/black_primer/lib.py | 423 --------------------------- src/black_primer/primer.json | 181 ------------ tests/test_format.py | 3 - tests/test_primer.py | 291 ------------------ 10 files changed, 3 insertions(+), 1110 deletions(-) delete mode 100644 src/black_primer/__init__.py delete mode 100644 src/black_primer/cli.py delete mode 100644 src/black_primer/lib.py delete mode 100644 src/black_primer/primer.json delete mode 100644 tests/test_primer.py diff --git a/CHANGES.md b/CHANGES.md index edca0dc..da51e94 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -50,6 +50,8 @@ - On Python 3.11 and newer, use the standard library's `tomllib` instead of `tomli` (#2903) +- `black-primer`, the deprecated internal devtool, has been removed and copied to a + [separate repository](https://github.com/cooperlees/black-primer) (#2924) ### Parser diff --git a/docs/contributing/gauging_changes.md b/docs/contributing/gauging_changes.md index 59c40eb..f28e811 100644 --- a/docs/contributing/gauging_changes.md +++ b/docs/contributing/gauging_changes.md @@ -7,12 +7,6 @@ It's recommended you evaluate the quantifiable changes your _Black_ formatting modification causes before submitting a PR. Think about if the change seems disruptive enough to cause frustration to projects that are already "black formatted". -## black-primer - -`black-primer` is an obsolete tool (now replaced with `diff-shades`) that was used to -gauge the impact of changes in _Black_ on open-source code. It is no longer used -internally and will be removed from the _Black_ repository in the future. - ## diff-shades diff-shades is a tool that runs _Black_ across a list of Git cloneable OSS projects diff --git a/mypy.ini b/mypy.ini index cfceaa3..3bb92a6 100644 --- a/mypy.ini +++ b/mypy.ini @@ -39,11 +39,3 @@ cache_dir=/dev/null # The following is because of `patch_click()`. Remove when # we drop Python 3.6 support. warn_unused_ignores=False - -[mypy-black_primer.*] -# Until we're not supporting 3.6 primer needs this -disallow_any_generics=False - -[mypy-tests.test_primer] -# Until we're not supporting 3.6 primer needs this -disallow_any_generics=False diff --git a/setup.py b/setup.py index 6b5b957..e23a58c 100644 --- a/setup.py +++ b/setup.py @@ -58,7 +58,7 @@ if USE_MYPYC: "black/__main__.py", ] discovered = [] - # black-primer and blackd have no good reason to be compiled. + # There's no good reason for blackd to be compiled. discovered.extend(find_python_files(src / "black")) discovered.extend(find_python_files(src / "blib2to3")) mypyc_targets = [ @@ -92,7 +92,6 @@ setup( package_data={ "blib2to3": ["*.txt"], "black": ["py.typed"], - "black_primer": ["primer.json"], }, python_requires=">=3.6.2", zip_safe=False, @@ -132,7 +131,6 @@ setup( "console_scripts": [ "black=black:patched_main", "blackd=blackd:patched_main [d]", - "black-primer=black_primer.cli:main", ] }, ) diff --git a/src/black_primer/__init__.py b/src/black_primer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/black_primer/cli.py b/src/black_primer/cli.py deleted file mode 100644 index 8524b59..0000000 --- a/src/black_primer/cli.py +++ /dev/null @@ -1,195 +0,0 @@ -# coding=utf8 - -import asyncio -import json -import logging -import sys -from datetime import datetime -from pathlib import Path -from shutil import rmtree, which -from tempfile import gettempdir -from typing import Any, List, Optional, Union - -import click - -from black_primer import lib - -# If our environment has uvloop installed lets use it -try: - import uvloop - - uvloop.install() -except ImportError: - pass - - -DEFAULT_CONFIG = Path(__file__).parent / "primer.json" -_timestamp = datetime.now().strftime("%Y%m%d%H%M%S") -DEFAULT_WORKDIR = Path(gettempdir()) / f"primer.{_timestamp}" -LOG = logging.getLogger(__name__) - - -def _handle_debug( - ctx: Optional[click.core.Context], - param: Optional[Union[click.core.Option, click.core.Parameter]], - debug: Union[bool, int, str], -) -> Union[bool, int, str]: - """Turn on debugging if asked otherwise INFO default""" - log_level = logging.DEBUG if debug else logging.INFO - logging.basicConfig( - format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)", - level=log_level, - ) - return debug - - -def load_projects(config_path: Path) -> List[str]: - with open(config_path) as config: - return sorted(json.load(config)["projects"].keys()) - - -# Unfortunately does import time file IO - but appears to be the only -# way to get `black-primer --help` to show projects list -DEFAULT_PROJECTS = load_projects(DEFAULT_CONFIG) - - -def _projects_callback( - ctx: click.core.Context, - param: Optional[Union[click.core.Option, click.core.Parameter]], - projects: str, -) -> List[str]: - requested_projects = set(projects.split(",")) - available_projects = set( - DEFAULT_PROJECTS - if str(DEFAULT_CONFIG) == ctx.params["config"] - else load_projects(ctx.params["config"]) - ) - - unavailable = requested_projects - available_projects - if unavailable: - LOG.error(f"Projects not found: {unavailable}. Available: {available_projects}") - - return sorted(requested_projects & available_projects) - - -async def async_main( - config: str, - debug: bool, - keep: bool, - long_checkouts: bool, - no_diff: bool, - projects: List[str], - rebase: bool, - workdir: str, - workers: int, -) -> int: - work_path = Path(workdir) - if not work_path.exists(): - LOG.debug(f"Creating {work_path}") - work_path.mkdir() - - if not which("black"): - LOG.error("Can not find 'black' executable in PATH. No point in running") - return -1 - - try: - ret_val = await lib.process_queue( - config, - work_path, - workers, - projects, - keep, - long_checkouts, - rebase, - no_diff, - ) - return int(ret_val) - - finally: - if not keep and work_path.exists(): - LOG.debug(f"Removing {work_path}") - rmtree(work_path, onerror=lib.handle_PermissionError) - - -@click.command(context_settings={"help_option_names": ["-h", "--help"]}) -@click.option( - "-c", - "--config", - default=str(DEFAULT_CONFIG), - type=click.Path(exists=True), - show_default=True, - help="JSON config file path", - # Eager - because config path is used by other callback options - is_eager=True, -) -@click.option( - "--debug", - is_flag=True, - callback=_handle_debug, - show_default=True, - help="Turn on debug logging", -) -@click.option( - "-k", - "--keep", - is_flag=True, - show_default=True, - help="Keep workdir + repos post run", -) -@click.option( - "-L", - "--long-checkouts", - is_flag=True, - show_default=True, - help="Pull big projects to test", -) -@click.option( - "--no-diff", - is_flag=True, - show_default=True, - help="Disable showing source file changes in black output", -) -@click.option( - "--projects", - default=",".join(DEFAULT_PROJECTS), - callback=_projects_callback, - show_default=True, - help="Comma separated list of projects to run", -) -@click.option( - "-R", - "--rebase", - is_flag=True, - show_default=True, - help="Rebase project if already checked out", -) -@click.option( - "-w", - "--workdir", - default=str(DEFAULT_WORKDIR), - type=click.Path(exists=False), - show_default=True, - help="Directory path for repo checkouts", -) -@click.option( - "-W", - "--workers", - default=2, - type=int, - show_default=True, - help="Number of parallel worker coroutines", -) -@click.pass_context -def main(ctx: click.core.Context, **kwargs: Any) -> None: - """primer - prime projects for blackening... 🏴""" - LOG.debug(f"Starting {sys.argv[0]}") - # TODO: Change to asyncio.run when Black >= 3.7 only - loop = asyncio.get_event_loop() - try: - ctx.exit(loop.run_until_complete(async_main(**kwargs))) - finally: - loop.close() - - -if __name__ == "__main__": # pragma: nocover - main() diff --git a/src/black_primer/lib.py b/src/black_primer/lib.py deleted file mode 100644 index 13724f4..0000000 --- a/src/black_primer/lib.py +++ /dev/null @@ -1,423 +0,0 @@ -import asyncio -import errno -import json -import logging -import os -import stat -import sys -from functools import partial -from pathlib import Path -from platform import system -from shutil import rmtree, which -from subprocess import CalledProcessError -from sys import version_info -from tempfile import TemporaryDirectory -from typing import ( - Any, - Callable, - Dict, - List, - NamedTuple, - Optional, - Sequence, - Tuple, - Union, -) -from urllib.parse import urlparse - -import click - - -TEN_MINUTES_SECONDS = 600 -WINDOWS = system() == "Windows" -BLACK_BINARY = "black.exe" if WINDOWS else "black" -GIT_BINARY = "git.exe" if WINDOWS else "git" -LOG = logging.getLogger(__name__) - - -# Windows needs a ProactorEventLoop if you want to exec subprocesses -# Starting with 3.8 this is the default - can remove when Black >= 3.8 -# mypy only respects sys.platform if directly in the evaluation -# https://mypy.readthedocs.io/en/latest/common_issues.html#python-version-and-system-platform-checks # noqa: B950 -if sys.platform == "win32": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - - -class Results(NamedTuple): - stats: Dict[str, int] = {} - failed_projects: Dict[str, CalledProcessError] = {} - - -async def _gen_check_output( - cmd: Sequence[str], - timeout: float = TEN_MINUTES_SECONDS, - env: Optional[Dict[str, str]] = None, - cwd: Optional[Path] = None, - stdin: Optional[bytes] = None, -) -> Tuple[bytes, bytes]: - process = await asyncio.create_subprocess_exec( - *cmd, - stdin=asyncio.subprocess.PIPE, - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.STDOUT, - env=env, - cwd=cwd, - ) - try: - (stdout, stderr) = await asyncio.wait_for(process.communicate(stdin), timeout) - except asyncio.TimeoutError: - process.kill() - await process.wait() - raise - - # A non-optional timeout was supplied to asyncio.wait_for, guaranteeing - # a timeout or completed process. A terminated Python process will have a - # non-empty returncode value. - assert process.returncode is not None - - if process.returncode != 0: - cmd_str = " ".join(cmd) - raise CalledProcessError( - process.returncode, cmd_str, output=stdout, stderr=stderr - ) - - return (stdout, stderr) - - -def analyze_results(project_count: int, results: Results) -> int: - failed_pct = round(((results.stats["failed"] / project_count) * 100), 2) - success_pct = round(((results.stats["success"] / project_count) * 100), 2) - - if results.failed_projects: - click.secho("\nFailed projects:\n", bold=True) - - for project_name, project_cpe in results.failed_projects.items(): - print(f"## {project_name}:") - print(f" - Returned {project_cpe.returncode}") - if project_cpe.stderr: - print(f" - stderr:\n{project_cpe.stderr.decode('utf8')}") - if project_cpe.stdout: - print(f" - stdout:\n{project_cpe.stdout.decode('utf8')}") - print("") - - click.secho("-- primer results 📊 --\n", bold=True) - click.secho( - f"{results.stats['success']} / {project_count} succeeded ({success_pct}%) ✅", - bold=True, - fg="green", - ) - click.secho( - f"{results.stats['failed']} / {project_count} FAILED ({failed_pct}%) 💩", - bold=bool(results.stats["failed"]), - fg="red", - ) - s = "" if results.stats["disabled"] == 1 else "s" - click.echo(f" - {results.stats['disabled']} project{s} disabled by config") - s = "" if results.stats["wrong_py_ver"] == 1 else "s" - click.echo( - f" - {results.stats['wrong_py_ver']} project{s} skipped due to Python version" - ) - click.echo( - f" - {results.stats['skipped_long_checkout']} skipped due to long checkout" - ) - - if results.failed_projects: - failed = ", ".join(results.failed_projects.keys()) - click.secho(f"\nFailed projects: {failed}\n", bold=True) - - return results.stats["failed"] - - -def _flatten_cli_args(cli_args: List[Union[Sequence[str], str]]) -> List[str]: - """Allow a user to put long arguments into a list of strs - to make the JSON human readable""" - flat_args = [] - for arg in cli_args: - if isinstance(arg, str): - flat_args.append(arg) - continue - - args_as_str = "".join(arg) - flat_args.append(args_as_str) - - return flat_args - - -async def black_run( - project_name: str, - repo_path: Optional[Path], - project_config: Dict[str, Any], - results: Results, - no_diff: bool = False, -) -> None: - """Run Black and record failures""" - if not repo_path: - results.stats["failed"] += 1 - results.failed_projects[project_name] = CalledProcessError( - 69, [], f"{project_name} has no repo_path: {repo_path}".encode(), b"" - ) - return - - stdin_test = project_name.upper() == "STDIN" - cmd = [str(which(BLACK_BINARY))] - if "cli_arguments" in project_config and project_config["cli_arguments"]: - cmd.extend(_flatten_cli_args(project_config["cli_arguments"])) - cmd.append("--check") - if not no_diff: - cmd.append("--diff") - - # Workout if we should read in a python file or search from cwd - stdin = None - if stdin_test: - cmd.append("-") - stdin = repo_path.read_bytes() - elif "base_path" in project_config: - cmd.append(project_config["base_path"]) - else: - cmd.append(".") - - timeout = ( - project_config["timeout_seconds"] - if "timeout_seconds" in project_config - else TEN_MINUTES_SECONDS - ) - with TemporaryDirectory() as tmp_path: - # Prevent reading top-level user configs by manipulating environment variables - env = { - **os.environ, - "XDG_CONFIG_HOME": tmp_path, # Unix-like - "USERPROFILE": tmp_path, # Windows (changes `Path.home()` output) - } - - cwd_path = repo_path.parent if stdin_test else repo_path - try: - LOG.debug(f"Running black for {project_name}: {' '.join(cmd)}") - _stdout, _stderr = await _gen_check_output( - cmd, cwd=cwd_path, env=env, stdin=stdin, timeout=timeout - ) - except asyncio.TimeoutError: - results.stats["failed"] += 1 - LOG.error(f"Running black for {repo_path} timed out ({cmd})") - except CalledProcessError as cpe: - # TODO: Tune for smarter for higher signal - # If any other return value than 1 we raise - can disable project in config - if cpe.returncode == 1: - if not project_config["expect_formatting_changes"]: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = cpe - else: - results.stats["success"] += 1 - return - elif cpe.returncode > 1: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = cpe - return - - LOG.error(f"Unknown error with {repo_path}") - raise - - # If we get here and expect formatting changes something is up - if project_config["expect_formatting_changes"]: - results.stats["failed"] += 1 - results.failed_projects[repo_path.name] = CalledProcessError( - 0, cmd, b"Expected formatting changes but didn't get any!", b"" - ) - return - - results.stats["success"] += 1 - - -async def git_checkout_or_rebase( - work_path: Path, - project_config: Dict[str, Any], - rebase: bool = False, - *, - depth: int = 1, -) -> Optional[Path]: - """git Clone project or rebase""" - git_bin = str(which(GIT_BINARY)) - if not git_bin: - LOG.error("No git binary found") - return None - - repo_url_parts = urlparse(project_config["git_clone_url"]) - path_parts = repo_url_parts.path[1:].split("/", maxsplit=1) - - repo_path: Path = work_path / path_parts[1].replace(".git", "") - cmd = [git_bin, "clone", "--depth", str(depth), project_config["git_clone_url"]] - cwd = work_path - if repo_path.exists() and rebase: - cmd = [git_bin, "pull", "--rebase"] - cwd = repo_path - elif repo_path.exists(): - return repo_path - - try: - _stdout, _stderr = await _gen_check_output(cmd, cwd=cwd) - except (asyncio.TimeoutError, CalledProcessError) as e: - LOG.error(f"Unable to git clone / pull {project_config['git_clone_url']}: {e}") - return None - - return repo_path - - -def handle_PermissionError( - func: Callable[..., None], path: Path, exc: Tuple[Any, Any, Any] -) -> None: - """ - Handle PermissionError during shutil.rmtree. - - This checks if the erroring function is either 'os.rmdir' or 'os.unlink', and that - the error was EACCES (i.e. Permission denied). If true, the path is set writable, - readable, and executable by everyone. Finally, it tries the error causing delete - operation again. - - If the check is false, then the original error will be reraised as this function - can't handle it. - """ - excvalue = exc[1] - LOG.debug(f"Handling {excvalue} from {func.__name__}... ") - if func in (os.rmdir, os.unlink) and excvalue.errno == errno.EACCES: - LOG.debug(f"Setting {path} writable, readable, and executable by everyone... ") - os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # chmod 0777 - func(path) # Try the error causing delete operation again - else: - raise - - -async def load_projects_queue( - config_path: Path, - projects_to_run: List[str], -) -> Tuple[Dict[str, Any], asyncio.Queue]: - """Load project config and fill queue with all the project names""" - with config_path.open("r") as cfp: - config = json.load(cfp) - - # TODO: Offer more options here - # e.g. Run on X random packages etc. - queue: asyncio.Queue = asyncio.Queue(maxsize=len(projects_to_run)) - for project in projects_to_run: - await queue.put(project) - - return config, queue - - -async def project_runner( - idx: int, - config: Dict[str, Any], - queue: asyncio.Queue, - work_path: Path, - results: Results, - long_checkouts: bool = False, - rebase: bool = False, - keep: bool = False, - no_diff: bool = False, -) -> None: - """Check out project and run Black on it + record result""" - loop = asyncio.get_event_loop() - py_version = f"{version_info[0]}.{version_info[1]}" - while True: - try: - project_name = queue.get_nowait() - except asyncio.QueueEmpty: - LOG.debug(f"project_runner {idx} exiting") - return - LOG.debug(f"worker {idx} working on {project_name}") - - project_config = config["projects"][project_name] - - # Check if disabled by config - if "disabled" in project_config and project_config["disabled"]: - results.stats["disabled"] += 1 - LOG.info(f"Skipping {project_name} as it's disabled via config") - continue - - # Check if we should run on this version of Python - if ( - "all" not in project_config["py_versions"] - and py_version not in project_config["py_versions"] - ): - results.stats["wrong_py_ver"] += 1 - LOG.debug(f"Skipping {project_name} as it's not enabled for {py_version}") - continue - - # Check if we're doing big projects / long checkouts - if not long_checkouts and project_config["long_checkout"]: - results.stats["skipped_long_checkout"] += 1 - LOG.debug(f"Skipping {project_name} as it's configured as a long checkout") - continue - - repo_path: Optional[Path] = Path(__file__) - stdin_project = project_name.upper() == "STDIN" - if not stdin_project: - repo_path = await git_checkout_or_rebase(work_path, project_config, rebase) - if not repo_path: - continue - await black_run(project_name, repo_path, project_config, results, no_diff) - - if not keep and not stdin_project: - LOG.debug(f"Removing {repo_path}") - rmtree_partial = partial( - rmtree, path=repo_path, onerror=handle_PermissionError - ) - await loop.run_in_executor(None, rmtree_partial) - - LOG.info(f"Finished {project_name}") - - -async def process_queue( - config_file: str, - work_path: Path, - workers: int, - projects_to_run: List[str], - keep: bool = False, - long_checkouts: bool = False, - rebase: bool = False, - no_diff: bool = False, -) -> int: - """ - Process the queue with X workers and evaluate results - - Success is guaged via the config "expect_formatting_changes" - - Integer return equals the number of failed projects - """ - results = Results() - results.stats["disabled"] = 0 - results.stats["failed"] = 0 - results.stats["skipped_long_checkout"] = 0 - results.stats["success"] = 0 - results.stats["wrong_py_ver"] = 0 - - config, queue = await load_projects_queue(Path(config_file), projects_to_run) - project_count = queue.qsize() - s = "" if project_count == 1 else "s" - LOG.info(f"{project_count} project{s} to run Black over") - if project_count < 1: - return -1 - - s = "" if workers == 1 else "s" - LOG.debug(f"Using {workers} parallel worker{s} to run Black") - # Wait until we finish running all the projects before analyzing - await asyncio.gather( - *[ - project_runner( - i, - config, - queue, - work_path, - results, - long_checkouts, - rebase, - keep, - no_diff, - ) - for i in range(workers) - ] - ) - - LOG.info("Analyzing results") - return analyze_results(project_count, results) - - -if __name__ == "__main__": # pragma: nocover - raise NotImplementedError("lib is a library, funnily enough.") diff --git a/src/black_primer/primer.json b/src/black_primer/primer.json deleted file mode 100644 index a6bfd4a..0000000 --- a/src/black_primer/primer.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "configuration_format_version": 20210815, - "projects": { - "STDIN": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": false, - "git_clone_url": "", - "long_checkout": false, - "py_versions": ["all"] - }, - "aioexabgp": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/cooperlees/aioexabgp.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "attrs": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python-attrs/attrs.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "bandersnatch": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pypa/bandersnatch.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "channels": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/django/channels.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "cpython": { - "disabled": true, - "disabled_reason": "To big / slow for GitHub Actions but handy to keep config to use manually or in some other CI in the future", - "base_path": "Lib", - "cli_arguments": [ - "--experimental-string-processing", - "--extend-exclude", - [ - "Lib/lib2to3/tests/data/different_encoding.py", - "|Lib/lib2to3/tests/data/false_encoding.py", - "|Lib/lib2to3/tests/data/py2_test_grammar.py", - "|Lib/test/bad_coding.py", - "|Lib/test/bad_coding2.py", - "|Lib/test/badsyntax_3131.py", - "|Lib/test/badsyntax_pep3120.py", - "|Lib/test/test_base64.py", - "|Lib/test/test_exceptions.py", - "|Lib/test/test_grammar.py", - "|Lib/test/test_named_expressions.py", - "|Lib/test/test_patma.py", - "|Lib/test/test_tokenize.py", - "|Lib/test/test_xml_etree.py", - "|Lib/traceback.py" - ] - ], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python/cpython.git", - "long_checkout": false, - "py_versions": ["3.9", "3.10"], - "timeout_seconds": 900 - }, - "django": { - "cli_arguments": [ - "--experimental-string-processing", - "--skip-string-normalization", - "--extend-exclude", - "/((docs|scripts)/|django/forms/models.py|tests/gis_tests/test_spatialrefsys.py|tests/test_runner_apps/tagged/tests_syntax_error.py)" - ], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/django/django.git", - "long_checkout": false, - "py_versions": ["3.8", "3.9", "3.10"] - }, - "flake8-bugbear": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/PyCQA/flake8-bugbear.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "hypothesis": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/HypothesisWorks/hypothesis.git", - "long_checkout": false, - "py_versions": ["3.8", "3.9", "3.10"] - }, - "pandas": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pandas-dev/pandas.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pillow": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python-pillow/Pillow.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "poetry": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/python-poetry/poetry.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pyanalyze": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/quora/pyanalyze.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pyramid": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/Pylons/pyramid.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "ptr": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": false, - "git_clone_url": "https://github.com/facebookincubator/ptr.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "pytest": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pytest-dev/pytest.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "scikit-lego": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/koaning/scikit-lego", - "long_checkout": false, - "py_versions": ["all"] - }, - "tox": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/tox-dev/tox.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "typeshed": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/python/typeshed.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "virtualenv": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pypa/virtualenv.git", - "long_checkout": false, - "py_versions": ["all"] - }, - "warehouse": { - "cli_arguments": ["--experimental-string-processing"], - "expect_formatting_changes": true, - "git_clone_url": "https://github.com/pypa/warehouse.git", - "long_checkout": false, - "py_versions": ["all"] - } - } -} diff --git a/tests/test_format.py b/tests/test_format.py index 04eda43..269bbac 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -103,8 +103,6 @@ SOURCES: List[str] = [ "src/black/strings.py", "src/black/trans.py", "src/blackd/__init__.py", - "src/black_primer/cli.py", - "src/black_primer/lib.py", "src/blib2to3/pygram.py", "src/blib2to3/pytree.py", "src/blib2to3/pgen2/conv.py", @@ -119,7 +117,6 @@ SOURCES: List[str] = [ "tests/test_black.py", "tests/test_blackd.py", "tests/test_format.py", - "tests/test_primer.py", "tests/optional.py", "tests/util.py", "tests/conftest.py", diff --git a/tests/test_primer.py b/tests/test_primer.py deleted file mode 100644 index 0a9d2ae..0000000 --- a/tests/test_primer.py +++ /dev/null @@ -1,291 +0,0 @@ -#!/usr/bin/env python3 - -import asyncio -import sys -import unittest -from contextlib import contextmanager -from copy import deepcopy -from io import StringIO -from os import getpid -from pathlib import Path -from platform import system -from pytest import LogCaptureFixture -from subprocess import CalledProcessError -from tempfile import TemporaryDirectory, gettempdir -from typing import Any, Callable, Iterator, List, Tuple, TypeVar -from unittest.mock import Mock, patch - -from click.testing import CliRunner - -from black_primer import cli, lib - - -EXPECTED_ANALYSIS_OUTPUT = """\ - -Failed projects: - -## black: - - Returned 69 - - stdout: -Black didn't work - --- primer results 📊 -- - -68 / 69 succeeded (98.55%) ✅ -1 / 69 FAILED (1.45%) 💩 - - 0 projects disabled by config - - 0 projects skipped due to Python version - - 0 skipped due to long checkout - -Failed projects: black - -""" -FAKE_PROJECT_CONFIG = { - "cli_arguments": ["--unittest"], - "expect_formatting_changes": False, - "git_clone_url": "https://github.com/psf/black.git", -} - - -@contextmanager -def capture_stdout( - command: Callable[..., Any], *args: Any, **kwargs: Any -) -> Iterator[str]: - old_stdout, sys.stdout = sys.stdout, StringIO() - try: - command(*args, **kwargs) - sys.stdout.seek(0) - yield sys.stdout.read() - finally: - sys.stdout = old_stdout - - -@contextmanager -def event_loop() -> Iterator[None]: - policy = asyncio.get_event_loop_policy() - loop = policy.new_event_loop() - asyncio.set_event_loop(loop) - if sys.platform == "win32": - asyncio.set_event_loop(asyncio.ProactorEventLoop()) - try: - yield - finally: - loop.close() - - -async def raise_subprocess_error_1(*args: Any, **kwargs: Any) -> None: - raise CalledProcessError(1, ["unittest", "error"], b"", b"") - - -async def raise_subprocess_error_123(*args: Any, **kwargs: Any) -> None: - raise CalledProcessError(123, ["unittest", "error"], b"", b"") - - -async def return_false(*args: Any, **kwargs: Any) -> bool: - return False - - -async def return_subproccess_output(*args: Any, **kwargs: Any) -> Tuple[bytes, bytes]: - return (b"stdout", b"stderr") - - -async def return_zero(*args: Any, **kwargs: Any) -> int: - return 0 - - -if sys.version_info >= (3, 9): - T = TypeVar("T") - Q = asyncio.Queue[T] -else: - T = Any - Q = asyncio.Queue - - -def collect(queue: Q) -> List[T]: - ret = [] - while True: - try: - item = queue.get_nowait() - ret.append(item) - except asyncio.QueueEmpty: - return ret - - -class PrimerLibTests(unittest.TestCase): - def test_analyze_results(self) -> None: - fake_results = lib.Results( - { - "disabled": 0, - "failed": 1, - "skipped_long_checkout": 0, - "success": 68, - "wrong_py_ver": 0, - }, - {"black": CalledProcessError(69, ["black"], b"Black didn't work", b"")}, - ) - with capture_stdout(lib.analyze_results, 69, fake_results) as analyze_stdout: - self.assertEqual(EXPECTED_ANALYSIS_OUTPUT, analyze_stdout) - - @event_loop() - def test_black_run(self) -> None: - """Pretend to run Black to ensure we cater for all scenarios""" - loop = asyncio.get_event_loop() - project_name = "unittest" - repo_path = Path(gettempdir()) - project_config = deepcopy(FAKE_PROJECT_CONFIG) - results = lib.Results({"failed": 0, "success": 0}, {}) - - # Test a successful Black run - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - loop.run_until_complete( - lib.black_run(project_name, repo_path, project_config, results) - ) - self.assertEqual(1, results.stats["success"]) - self.assertFalse(results.failed_projects) - - # Test a fail based on expecting formatting changes but not getting any - project_config["expect_formatting_changes"] = True - results = lib.Results({"failed": 0, "success": 0}, {}) - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - loop.run_until_complete( - lib.black_run(project_name, repo_path, project_config, results) - ) - self.assertEqual(1, results.stats["failed"]) - self.assertTrue(results.failed_projects) - - # Test a fail based on returning 1 and not expecting formatting changes - project_config["expect_formatting_changes"] = False - results = lib.Results({"failed": 0, "success": 0}, {}) - with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1): - loop.run_until_complete( - lib.black_run(project_name, repo_path, project_config, results) - ) - self.assertEqual(1, results.stats["failed"]) - self.assertTrue(results.failed_projects) - - # Test a formatting error based on returning 123 - with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123): - loop.run_until_complete( - lib.black_run(project_name, repo_path, project_config, results) - ) - self.assertEqual(2, results.stats["failed"]) - - def test_flatten_cli_args(self) -> None: - fake_long_args = ["--arg", ["really/", "|long", "|regex", "|splitup"], "--done"] - expected = ["--arg", "really/|long|regex|splitup", "--done"] - self.assertEqual(expected, lib._flatten_cli_args(fake_long_args)) - - @event_loop() - def test_gen_check_output(self) -> None: - loop = asyncio.get_event_loop() - stdout, stderr = loop.run_until_complete( - lib._gen_check_output([lib.BLACK_BINARY, "--help"]) - ) - self.assertIn("The uncompromising code formatter", stdout.decode("utf8")) - self.assertEqual(None, stderr) - - # TODO: Add a test to see failure works on Windows - if lib.WINDOWS: - return - - false_bin = "/usr/bin/false" if system() == "Darwin" else "/bin/false" - with self.assertRaises(CalledProcessError): - loop.run_until_complete(lib._gen_check_output([false_bin])) - - with self.assertRaises(asyncio.TimeoutError): - loop.run_until_complete( - lib._gen_check_output(["/bin/sleep", "2"], timeout=0.1) - ) - - @event_loop() - def test_git_checkout_or_rebase(self) -> None: - loop = asyncio.get_event_loop() - project_config = deepcopy(FAKE_PROJECT_CONFIG) - work_path = Path(gettempdir()) - - expected_repo_path = work_path / "black" - with patch("black_primer.lib._gen_check_output", return_subproccess_output): - returned_repo_path = loop.run_until_complete( - lib.git_checkout_or_rebase(work_path, project_config) - ) - self.assertEqual(expected_repo_path, returned_repo_path) - - @patch("sys.stdout", new_callable=StringIO) - @event_loop() - def test_process_queue(self, mock_stdout: Mock) -> None: - """Test the process queue on primer itself - - If you have non black conforming formatting in primer itself this can fail""" - loop = asyncio.get_event_loop() - config_path = Path(lib.__file__).parent / "primer.json" - with patch("black_primer.lib.git_checkout_or_rebase", return_false): - with TemporaryDirectory() as td: - return_val = loop.run_until_complete( - lib.process_queue( - str(config_path), Path(td), 2, ["django", "pyramid"] - ) - ) - self.assertEqual(0, return_val) - - @event_loop() - def test_load_projects_queue(self) -> None: - """Test the process queue on primer itself - - If you have non black conforming formatting in primer itself this can fail""" - loop = asyncio.get_event_loop() - config_path = Path(lib.__file__).parent / "primer.json" - - config, projects_queue = loop.run_until_complete( - lib.load_projects_queue(config_path, ["django", "pyramid"]) - ) - projects = collect(projects_queue) - self.assertEqual(projects, ["django", "pyramid"]) - - -class PrimerCLITests(unittest.TestCase): - @event_loop() - def test_async_main(self) -> None: - loop = asyncio.get_event_loop() - work_dir = Path(gettempdir()) / f"primer_ut_{getpid()}" - args = { - "config": "/config", - "debug": False, - "keep": False, - "long_checkouts": False, - "rebase": False, - "workdir": str(work_dir), - "workers": 69, - "no_diff": False, - "projects": "", - } - with patch("black_primer.cli.lib.process_queue", return_zero): - return_val = loop.run_until_complete(cli.async_main(**args)) # type: ignore - self.assertEqual(0, return_val) - - def test_handle_debug(self) -> None: - self.assertTrue(cli._handle_debug(None, None, True)) - - def test_help_output(self) -> None: - runner = CliRunner() - result = runner.invoke(cli.main, ["--help"]) - self.assertEqual(result.exit_code, 0) - - -def test_projects(caplog: LogCaptureFixture) -> None: - with event_loop(): - runner = CliRunner() - result = runner.invoke(cli.main, ["--projects=STDIN,asdf"]) - assert result.exit_code == 0 - assert "1 / 1 succeeded" in result.output - assert "Projects not found: {'asdf'}" in caplog.text - - caplog.clear() - - with event_loop(): - runner = CliRunner() - result = runner.invoke(cli.main, ["--projects=fdsa,STDIN"]) - assert result.exit_code == 0 - assert "1 / 1 succeeded" in result.output - assert "Projects not found: {'fdsa'}" in caplog.text - - -if __name__ == "__main__": - unittest.main() -- 2.39.5 From 086ae68076de570b0cb1881a3c3b9da592b46ee0 Mon Sep 17 00:00:00 2001 From: Shivansh-007 Date: Wed, 16 Mar 2022 08:43:56 +0530 Subject: [PATCH 03/16] Remove power hugging formatting from preview (#2928) It is falsely placed in preview features and always formats the power operators, it was added in #2789 but there is no check for formatting added along with it. --- src/black/mode.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/black/mode.py b/src/black/mode.py index 455ed36..35a072c 100644 --- a/src/black/mode.py +++ b/src/black/mode.py @@ -127,7 +127,6 @@ class Preview(Enum): """Individual preview style features.""" string_processing = auto() - hug_simple_powers = auto() class Deprecated(UserWarning): @@ -163,7 +162,9 @@ class Mode: """ if feature is Preview.string_processing: return self.preview or self.experimental_string_processing - return self.preview + # TODO: Remove type ignore comment once preview contains more features + # than just ESP + return self.preview # type: ignore def get_cache_key(self) -> str: if self.target_versions: -- 2.39.5 From fa7f01592b02229ff47f3bcab39a9b7d6c59f07c Mon Sep 17 00:00:00 2001 From: Joseph Young <80432516+jpy-git@users.noreply.github.com> Date: Wed, 16 Mar 2022 17:00:30 +0000 Subject: [PATCH 04/16] Update pylint config docs (#2931) --- CHANGES.md | 2 ++ docs/compatible_configs/pylint/pylintrc | 3 -- docs/compatible_configs/pylint/pyproject.toml | 3 -- docs/compatible_configs/pylint/setup.cfg | 3 -- docs/guides/using_black_with_other_tools.md | 32 +++---------------- 5 files changed, 6 insertions(+), 37 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index da51e94..bb3ccb9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -32,6 +32,8 @@ +- Update pylint config documentation (#2931) + ### Integrations diff --git a/docs/compatible_configs/pylint/pylintrc b/docs/compatible_configs/pylint/pylintrc index 7abddd2..e863488 100644 --- a/docs/compatible_configs/pylint/pylintrc +++ b/docs/compatible_configs/pylint/pylintrc @@ -1,5 +1,2 @@ -[MESSAGES CONTROL] -disable = C0330, C0326 - [format] max-line-length = 88 diff --git a/docs/compatible_configs/pylint/pyproject.toml b/docs/compatible_configs/pylint/pyproject.toml index 49ad7a2..ef51f98 100644 --- a/docs/compatible_configs/pylint/pyproject.toml +++ b/docs/compatible_configs/pylint/pyproject.toml @@ -1,5 +1,2 @@ -[tool.pylint.messages_control] -disable = "C0330, C0326" - [tool.pylint.format] max-line-length = "88" diff --git a/docs/compatible_configs/pylint/setup.cfg b/docs/compatible_configs/pylint/setup.cfg index 3ada245..0b754cd 100644 --- a/docs/compatible_configs/pylint/setup.cfg +++ b/docs/compatible_configs/pylint/setup.cfg @@ -1,5 +1,2 @@ [pylint] max-line-length = 88 - -[pylint.messages_control] -disable = C0330, C0326 diff --git a/docs/guides/using_black_with_other_tools.md b/docs/guides/using_black_with_other_tools.md index bde99f7..1d380bd 100644 --- a/docs/guides/using_black_with_other_tools.md +++ b/docs/guides/using_black_with_other_tools.md @@ -210,31 +210,16 @@ mixed feelings about _Black_'s formatting style. #### Configuration ``` -disable = C0326, C0330 max-line-length = 88 ``` #### Why those options above? -When _Black_ is folding very long expressions, the closing brackets will -[be dedented](../the_black_code_style/current_style.md#how-black-wraps-lines). +Pylint should be configured to only complain about lines that surpass `88` characters +via `max-line-length = 88`. -```py3 -ImportantClass.important_method( - exc, limit, lookup_lines, capture_locals, callback -) -``` - -Although this style is PEP 8 compliant, Pylint will raise -`C0330: Wrong hanging indentation before block (add 4 spaces)` warnings. Since _Black_ -isn't configurable on this style, Pylint should be told to ignore these warnings via -`disable = C0330`. - -Also, since _Black_ deals with whitespace around operators and brackets, Pylint's -warning `C0326: Bad whitespace` should be disabled using `disable = C0326`. - -And as usual, Pylint should be configured to only complain about lines that surpass `88` -characters via `max-line-length = 88`. +If using `pylint<2.6.0`, also disable `C0326` and `C0330` as these are incompatible with +_Black_ formatting and have since been removed. #### Formats @@ -242,9 +227,6 @@ characters via `max-line-length = 88`. pylintrc ```ini -[MESSAGES CONTROL] -disable = C0326, C0330 - [format] max-line-length = 88 ``` @@ -257,9 +239,6 @@ max-line-length = 88 ```cfg [pylint] max-line-length = 88 - -[pylint.messages_control] -disable = C0326, C0330 ``` @@ -268,9 +247,6 @@ disable = C0326, C0330 pyproject.toml ```toml -[tool.pylint.messages_control] -disable = "C0326, C0330" - [tool.pylint.format] max-line-length = "88" ``` -- 2.39.5 From f87df0e3c8735de416b6392ce7f21c6ba194424d Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Mon, 21 Mar 2022 21:51:07 +0000 Subject: [PATCH 05/16] dont skip formatting #%% (#2919) Fixes #2588 --- CHANGES.md | 2 ++ src/black/__init__.py | 2 +- src/black/comments.py | 48 ++++++++++++++++++++++++----------------- src/black/linegen.py | 16 ++++++++------ tests/data/comments8.py | 15 +++++++++++++ tests/test_format.py | 1 + 6 files changed, 57 insertions(+), 27 deletions(-) create mode 100644 tests/data/comments8.py diff --git a/CHANGES.md b/CHANGES.md index bb3ccb9..d0faf7c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,6 +14,8 @@ +- Code cell separators `#%%` are now standardised to `# %%` (#2919) + ### _Blackd_ diff --git a/src/black/__init__.py b/src/black/__init__.py index c4ec99b..51e31e9 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -1166,7 +1166,7 @@ def _format_str_once(src_contents: str, *, mode: Mode) -> str: else: versions = detect_target_versions(src_node, future_imports=future_imports) - normalize_fmt_off(src_node) + normalize_fmt_off(src_node, preview=mode.preview) lines = LineGenerator(mode=mode) elt = EmptyLineTracker(is_pyi=mode.is_pyi) empty_line = Line(mode=mode) diff --git a/src/black/comments.py b/src/black/comments.py index 28b9117..4553264 100644 --- a/src/black/comments.py +++ b/src/black/comments.py @@ -23,6 +23,8 @@ FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"} FMT_PASS: Final = {*FMT_OFF, *FMT_SKIP} FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"} +COMMENT_EXCEPTIONS = {True: " !:#'", False: " !:#'%"} + @dataclass class ProtoComment: @@ -42,7 +44,7 @@ class ProtoComment: consumed: int # how many characters of the original leaf's prefix did we consume -def generate_comments(leaf: LN) -> Iterator[Leaf]: +def generate_comments(leaf: LN, *, preview: bool) -> Iterator[Leaf]: """Clean the prefix of the `leaf` and generate comments from it, if any. Comments in lib2to3 are shoved into the whitespace prefix. This happens @@ -61,12 +63,16 @@ def generate_comments(leaf: LN) -> Iterator[Leaf]: Inline comments are emitted as regular token.COMMENT leaves. Standalone are emitted with a fake STANDALONE_COMMENT token identifier. """ - for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): + for pc in list_comments( + leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, preview=preview + ): yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) @lru_cache(maxsize=4096) -def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: +def list_comments( + prefix: str, *, is_endmarker: bool, preview: bool +) -> List[ProtoComment]: """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" result: List[ProtoComment] = [] if not prefix or "#" not in prefix: @@ -92,7 +98,7 @@ def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: comment_type = token.COMMENT # simple trailing comment else: comment_type = STANDALONE_COMMENT - comment = make_comment(line) + comment = make_comment(line, preview=preview) result.append( ProtoComment( type=comment_type, value=comment, newlines=nlines, consumed=consumed @@ -102,10 +108,10 @@ def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: return result -def make_comment(content: str) -> str: +def make_comment(content: str, *, preview: bool) -> str: """Return a consistently formatted comment from the given `content` string. - All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single + All comments (except for "##", "#!", "#:", '#'") should have a single space between the hash sign and the content. If `content` didn't start with a hash sign, one is provided. @@ -123,26 +129,26 @@ def make_comment(content: str) -> str: and not content.lstrip().startswith("type:") ): content = " " + content[1:] # Replace NBSP by a simple space - if content and content[0] not in " !:#'%": + if content and content[0] not in COMMENT_EXCEPTIONS[preview]: content = " " + content return "#" + content -def normalize_fmt_off(node: Node) -> None: +def normalize_fmt_off(node: Node, *, preview: bool) -> None: """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" try_again = True while try_again: - try_again = convert_one_fmt_off_pair(node) + try_again = convert_one_fmt_off_pair(node, preview=preview) -def convert_one_fmt_off_pair(node: Node) -> bool: +def convert_one_fmt_off_pair(node: Node, *, preview: bool) -> bool: """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. Returns True if a pair was converted. """ for leaf in node.leaves(): previous_consumed = 0 - for comment in list_comments(leaf.prefix, is_endmarker=False): + for comment in list_comments(leaf.prefix, is_endmarker=False, preview=preview): if comment.value not in FMT_PASS: previous_consumed = comment.consumed continue @@ -157,7 +163,7 @@ def convert_one_fmt_off_pair(node: Node) -> bool: if comment.value in FMT_SKIP and prev.type in WHITESPACE: continue - ignored_nodes = list(generate_ignored_nodes(leaf, comment)) + ignored_nodes = list(generate_ignored_nodes(leaf, comment, preview=preview)) if not ignored_nodes: continue @@ -197,7 +203,9 @@ def convert_one_fmt_off_pair(node: Node) -> bool: return False -def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: +def generate_ignored_nodes( + leaf: Leaf, comment: ProtoComment, *, preview: bool +) -> Iterator[LN]: """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. If comment is skip, returns leaf only. @@ -221,13 +229,13 @@ def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: yield leaf.parent return while container is not None and container.type != token.ENDMARKER: - if is_fmt_on(container): + if is_fmt_on(container, preview=preview): return # fix for fmt: on in children - if contains_fmt_on_at_column(container, leaf.column): + if contains_fmt_on_at_column(container, leaf.column, preview=preview): for child in container.children: - if contains_fmt_on_at_column(child, leaf.column): + if contains_fmt_on_at_column(child, leaf.column, preview=preview): return yield child else: @@ -235,12 +243,12 @@ def generate_ignored_nodes(leaf: Leaf, comment: ProtoComment) -> Iterator[LN]: container = container.next_sibling -def is_fmt_on(container: LN) -> bool: +def is_fmt_on(container: LN, preview: bool) -> bool: """Determine whether formatting is switched on within a container. Determined by whether the last `# fmt:` comment is `on` or `off`. """ fmt_on = False - for comment in list_comments(container.prefix, is_endmarker=False): + for comment in list_comments(container.prefix, is_endmarker=False, preview=preview): if comment.value in FMT_ON: fmt_on = True elif comment.value in FMT_OFF: @@ -248,7 +256,7 @@ def is_fmt_on(container: LN) -> bool: return fmt_on -def contains_fmt_on_at_column(container: LN, column: int) -> bool: +def contains_fmt_on_at_column(container: LN, column: int, *, preview: bool) -> bool: """Determine if children at a given column have formatting switched on.""" for child in container.children: if ( @@ -257,7 +265,7 @@ def contains_fmt_on_at_column(container: LN, column: int) -> bool: or isinstance(child, Leaf) and child.column == column ): - if is_fmt_on(child): + if is_fmt_on(child, preview=preview): return True return False diff --git a/src/black/linegen.py b/src/black/linegen.py index 4dc242a..79475a8 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -72,7 +72,7 @@ class LineGenerator(Visitor[Line]): """Default `visit_*()` implementation. Recurses to children of `node`.""" if isinstance(node, Leaf): any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() - for comment in generate_comments(node): + for comment in generate_comments(node, preview=self.mode.preview): if any_open_brackets: # any comment within brackets is subject to splitting self.current_line.append(comment) @@ -132,7 +132,7 @@ class LineGenerator(Visitor[Line]): `parens` holds a set of string leaf values immediately after which invisible parens should be put. """ - normalize_invisible_parens(node, parens_after=parens) + normalize_invisible_parens(node, parens_after=parens, preview=self.mode.preview) for child in node.children: if is_name_token(child) and child.value in keywords: yield from self.line() @@ -141,7 +141,7 @@ class LineGenerator(Visitor[Line]): def visit_match_case(self, node: Node) -> Iterator[Line]: """Visit either a match or case statement.""" - normalize_invisible_parens(node, parens_after=set()) + normalize_invisible_parens(node, parens_after=set(), preview=self.mode.preview) yield from self.line() for child in node.children: @@ -802,7 +802,9 @@ def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: leaf.prefix = "" -def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: +def normalize_invisible_parens( + node: Node, parens_after: Set[str], *, preview: bool +) -> None: """Make existing optional parentheses invisible or create new ones. `parens_after` is a set of string leaf values immediately after which parens @@ -811,7 +813,7 @@ def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: Standardizes on visible parentheses for single-element tuples, and keeps existing visible parentheses for other tuples and generator expressions. """ - for pc in list_comments(node.prefix, is_endmarker=False): + for pc in list_comments(node.prefix, is_endmarker=False, preview=preview): if pc.value in FMT_OFF: # This `node` has a prefix with `# fmt: off`, don't mess with parens. return @@ -820,7 +822,9 @@ def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: # Fixes a bug where invisible parens are not properly stripped from # assignment statements that contain type annotations. if isinstance(child, Node) and child.type == syms.annassign: - normalize_invisible_parens(child, parens_after=parens_after) + normalize_invisible_parens( + child, parens_after=parens_after, preview=preview + ) # Add parentheses around long tuple unpacking in assignments. if ( diff --git a/tests/data/comments8.py b/tests/data/comments8.py new file mode 100644 index 0000000..a2030c2 --- /dev/null +++ b/tests/data/comments8.py @@ -0,0 +1,15 @@ +# The percent-percent comments are Spyder IDE cells. +# Both `#%%`` and `# %%` are accepted, so `black` standardises +# to the latter. + +#%% +# %% + +# output + +# The percent-percent comments are Spyder IDE cells. +# Both `#%%`` and `# %%` are accepted, so `black` standardises +# to the latter. + +# %% +# %% diff --git a/tests/test_format.py b/tests/test_format.py index 269bbac..667d5c1 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -75,6 +75,7 @@ PREVIEW_CASES: List[str] = [ # string processing "cantfit", "comments7", + "comments8", "long_strings", "long_strings__edge_case", "long_strings__regression", -- 2.39.5 From 5379d4f3f460ec9b7063dd1cc10f437b0edf9ae3 Mon Sep 17 00:00:00 2001 From: Jelle Zijlstra Date: Mon, 21 Mar 2022 15:20:41 -0700 Subject: [PATCH 06/16] stub style: remove some possible future changes (#2940) Fixes #2938. All of these suggested future changes are out of scope for an autoformatter and should be handled by a linter instead. --- docs/the_black_code_style/current_style.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/the_black_code_style/current_style.md b/docs/the_black_code_style/current_style.md index 0bf5894..d54c7ab 100644 --- a/docs/the_black_code_style/current_style.md +++ b/docs/the_black_code_style/current_style.md @@ -399,16 +399,11 @@ recommended code style for those files is more terse than PEP 8: _Black_ enforces the above rules. There are additional guidelines for formatting `.pyi` file that are not enforced yet but might be in a future version of the formatter: -- all function bodies should be empty (contain `...` instead of the body); -- do not use docstrings; - prefer `...` over `pass`; -- for arguments with a default, use `...` instead of the actual default; - avoid using string literals in type annotations, stub files support forward references natively (like Python 3.7 code with `from __future__ import annotations`); - use variable annotations instead of type comments, even for stubs that target older - versions of Python; -- for arguments that default to `None`, use `Optional[]` explicitly; -- use `float` instead of `Union[int, float]`. + versions of Python. ## Pragmatism -- 2.39.5 From 062b54931dc3ea35f673e755893fe28ff1f5a889 Mon Sep 17 00:00:00 2001 From: Richard Si <63936253+ichard26@users.noreply.github.com> Date: Wed, 23 Mar 2022 13:31:13 -0400 Subject: [PATCH 07/16] Github now supports .git-blame-ignore-revs (GH-2948) It's in beta. https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view --- docs/guides/introducing_black_to_your_project.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/docs/guides/introducing_black_to_your_project.md b/docs/guides/introducing_black_to_your_project.md index 71ccf7c..9ae40a1 100644 --- a/docs/guides/introducing_black_to_your_project.md +++ b/docs/guides/introducing_black_to_your_project.md @@ -43,8 +43,10 @@ call to `git blame`. $ git config blame.ignoreRevsFile .git-blame-ignore-revs ``` -**The one caveat is that GitHub and GitLab do not yet support ignoring revisions using -their native UI of blame.** So blame information will be cluttered with a reformatting -commit on those platforms. (If you'd like this feature, there's an open issue for -[GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423) and please let GitHub -know!) +**The one caveat is that some online Git-repositories like GitLab do not yet support +ignoring revisions using their native blame UI.** So blame information will be cluttered +with a reformatting commit on those platforms. (If you'd like this feature, there's an +open issue for [GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/31423)). This is +however supported by +[GitHub](https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#ignore-commits-in-the-blame-view), +currently in beta. -- 2.39.5 From 3800ebd81df6a1c31d1eac8cc15899537b9cbb61 Mon Sep 17 00:00:00 2001 From: Joe Young <80432516+jpy-git@users.noreply.github.com> Date: Thu, 24 Mar 2022 02:16:09 +0000 Subject: [PATCH 08/16] Avoid magic-trailing-comma in single-element subscripts (#2942) Closes #2918. --- CHANGES.md | 1 + src/black/linegen.py | 11 ++++++--- src/black/lines.py | 21 ++++++++++++++--- src/black/mode.py | 5 ++-- src/black/nodes.py | 12 +++++++--- tests/data/one_element_subscript.py | 36 +++++++++++++++++++++++++++++ tests/test_format.py | 1 + 7 files changed, 75 insertions(+), 12 deletions(-) create mode 100644 tests/data/one_element_subscript.py diff --git a/CHANGES.md b/CHANGES.md index d0faf7c..d753a24 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,7 @@ - Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Avoid magic-trailing-comma in single-element subscripts (#2942) ### _Blackd_ diff --git a/src/black/linegen.py b/src/black/linegen.py index 79475a8..5d92011 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -8,7 +8,12 @@ from typing import Collection, Iterator, List, Optional, Set, Union from black.nodes import WHITESPACE, RARROW, STATEMENT, STANDALONE_COMMENT from black.nodes import ASSIGNMENTS, OPENING_BRACKETS, CLOSING_BRACKETS from black.nodes import Visitor, syms, is_arith_like, ensure_visible -from black.nodes import is_docstring, is_empty_tuple, is_one_tuple, is_one_tuple_between +from black.nodes import ( + is_docstring, + is_empty_tuple, + is_one_tuple, + is_one_sequence_between, +) from black.nodes import is_name_token, is_lpar_token, is_rpar_token from black.nodes import is_walrus_assignment, is_yield, is_vararg, is_multiline_string from black.nodes import is_stub_suite, is_stub_body, is_atom_with_invisible_parens @@ -973,7 +978,7 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf prev and prev.type == token.COMMA and leaf.opening_bracket is not None - and not is_one_tuple_between( + and not is_one_sequence_between( leaf.opening_bracket, leaf, line.leaves ) ): @@ -1001,7 +1006,7 @@ def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[Leaf prev and prev.type == token.COMMA and leaf.opening_bracket is not None - and not is_one_tuple_between(leaf.opening_bracket, leaf, line.leaves) + and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves) ): # Never omit bracket pairs with trailing commas. # We need to explode on those. diff --git a/src/black/lines.py b/src/black/lines.py index f35665c..e455a50 100644 --- a/src/black/lines.py +++ b/src/black/lines.py @@ -17,12 +17,12 @@ from blib2to3.pytree import Node, Leaf from blib2to3.pgen2 import token from black.brackets import BracketTracker, DOT_PRIORITY -from black.mode import Mode +from black.mode import Mode, Preview from black.nodes import STANDALONE_COMMENT, TEST_DESCENDANTS from black.nodes import BRACKETS, OPENING_BRACKETS, CLOSING_BRACKETS from black.nodes import syms, whitespace, replace_child, child_towards from black.nodes import is_multiline_string, is_import, is_type_comment -from black.nodes import is_one_tuple_between +from black.nodes import is_one_sequence_between # types T = TypeVar("T") @@ -254,6 +254,7 @@ class Line: """Return True if we have a magic trailing comma, that is when: - there's a trailing comma here - it's not a one-tuple + - it's not a single-element subscript Additionally, if ensure_removable: - it's not from square bracket indexing """ @@ -268,6 +269,20 @@ class Line: return True if closing.type == token.RSQB: + if ( + Preview.one_element_subscript in self.mode + and closing.parent + and closing.parent.type == syms.trailer + and closing.opening_bracket + and is_one_sequence_between( + closing.opening_bracket, + closing, + self.leaves, + brackets=(token.LSQB, token.RSQB), + ) + ): + return False + if not ensure_removable: return True comma = self.leaves[-1] @@ -276,7 +291,7 @@ class Line: if self.is_import: return True - if closing.opening_bracket is not None and not is_one_tuple_between( + if closing.opening_bracket is not None and not is_one_sequence_between( closing.opening_bracket, closing, self.leaves ): return True diff --git a/src/black/mode.py b/src/black/mode.py index 35a072c..77b1cab 100644 --- a/src/black/mode.py +++ b/src/black/mode.py @@ -127,6 +127,7 @@ class Preview(Enum): """Individual preview style features.""" string_processing = auto() + one_element_subscript = auto() class Deprecated(UserWarning): @@ -162,9 +163,7 @@ class Mode: """ if feature is Preview.string_processing: return self.preview or self.experimental_string_processing - # TODO: Remove type ignore comment once preview contains more features - # than just ESP - return self.preview # type: ignore + return self.preview def get_cache_key(self) -> str: if self.target_versions: diff --git a/src/black/nodes.py b/src/black/nodes.py index f130bff..d18d4bd 100644 --- a/src/black/nodes.py +++ b/src/black/nodes.py @@ -9,6 +9,7 @@ from typing import ( List, Optional, Set, + Tuple, TypeVar, Union, ) @@ -559,9 +560,14 @@ def is_one_tuple(node: LN) -> bool: ) -def is_one_tuple_between(opening: Leaf, closing: Leaf, leaves: List[Leaf]) -> bool: - """Return True if content between `opening` and `closing` looks like a one-tuple.""" - if opening.type != token.LPAR and closing.type != token.RPAR: +def is_one_sequence_between( + opening: Leaf, + closing: Leaf, + leaves: List[Leaf], + brackets: Tuple[int, int] = (token.LPAR, token.RPAR), +) -> bool: + """Return True if content between `opening` and `closing` is a one-sequence.""" + if (opening.type, closing.type) != brackets: return False depth = closing.bracket_depth + 1 diff --git a/tests/data/one_element_subscript.py b/tests/data/one_element_subscript.py new file mode 100644 index 0000000..39205ba --- /dev/null +++ b/tests/data/one_element_subscript.py @@ -0,0 +1,36 @@ +# We should not treat the trailing comma +# in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# The magic comma still applies to multi-element subscripts. +c: tuple[int, int,] +d = tuple[int, int,] + +# Magic commas still work as expected for non-subscripts. +small_list = [1,] +list_of_types = [tuple[int,],] + +# output +# We should not treat the trailing comma +# in a single-element subscript. +a: tuple[int,] +b = tuple[int,] + +# The magic comma still applies to multi-element subscripts. +c: tuple[ + int, + int, +] +d = tuple[ + int, + int, +] + +# Magic commas still work as expected for non-subscripts. +small_list = [ + 1, +] +list_of_types = [ + tuple[int,], +] diff --git a/tests/test_format.py b/tests/test_format.py index 667d5c1..4de3170 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -80,6 +80,7 @@ PREVIEW_CASES: List[str] = [ "long_strings__edge_case", "long_strings__regression", "percent_precedence", + "one_element_subscript", ] SOURCES: List[str] = [ -- 2.39.5 From 14e5ce5412efa53438df0180e735b3834df3b579 Mon Sep 17 00:00:00 2001 From: Joe Young <80432516+jpy-git@users.noreply.github.com> Date: Thu, 24 Mar 2022 14:59:54 +0000 Subject: [PATCH 09/16] Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) --- CHANGES.md | 1 + src/black/linegen.py | 26 ++++++++++++++--- src/black/mode.py | 1 + src/black/trans.py | 12 ++++---- tests/data/long_strings__regression.py | 2 +- tests/data/remove_for_brackets.py | 40 ++++++++++++++++++++++++++ tests/test_format.py | 1 + 7 files changed, 72 insertions(+), 11 deletions(-) create mode 100644 tests/data/remove_for_brackets.py diff --git a/CHANGES.md b/CHANGES.md index d753a24..b2325b6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,7 @@ - Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) - Avoid magic-trailing-comma in single-element subscripts (#2942) ### _Blackd_ diff --git a/src/black/linegen.py b/src/black/linegen.py index 5d92011..cb605ee 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -841,7 +841,11 @@ def normalize_invisible_parens( if check_lpar: if child.type == syms.atom: - if maybe_make_parens_invisible_in_atom(child, parent=node): + if maybe_make_parens_invisible_in_atom( + child, + parent=node, + preview=preview, + ): wrap_in_parentheses(node, child, visible=False) elif is_one_tuple(child): wrap_in_parentheses(node, child, visible=True) @@ -865,7 +869,11 @@ def normalize_invisible_parens( check_lpar = isinstance(child, Leaf) and child.value in parens_after -def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: +def maybe_make_parens_invisible_in_atom( + node: LN, + parent: LN, + preview: bool = False, +) -> bool: """If it's safe, make the parens in the atom `node` invisible, recursively. Additionally, remove repeated, adjacent invisible parens from the atom `node` as they are redundant. @@ -873,13 +881,23 @@ def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: Returns whether the node should itself be wrapped in invisible parentheses. """ + if ( + preview + and parent.type == syms.for_stmt + and isinstance(node.prev_sibling, Leaf) + and node.prev_sibling.type == token.NAME + and node.prev_sibling.value == "for" + ): + for_stmt_check = False + else: + for_stmt_check = True if ( node.type != syms.atom or is_empty_tuple(node) or is_one_tuple(node) or (is_yield(node) and parent.type != syms.expr_stmt) - or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY + or (max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY and for_stmt_check) ): return False @@ -902,7 +920,7 @@ def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: # make parentheses invisible first.value = "" last.value = "" - maybe_make_parens_invisible_in_atom(middle, parent=parent) + maybe_make_parens_invisible_in_atom(middle, parent=parent, preview=preview) if is_atom_with_invisible_parens(middle): # Strip the invisible parens from `middle` by replacing diff --git a/src/black/mode.py b/src/black/mode.py index 77b1cab..6b74c14 100644 --- a/src/black/mode.py +++ b/src/black/mode.py @@ -127,6 +127,7 @@ class Preview(Enum): """Individual preview style features.""" string_processing = auto() + remove_redundant_parens = auto() one_element_subscript = auto() diff --git a/src/black/trans.py b/src/black/trans.py index 74d052f..01aa80e 100644 --- a/src/black/trans.py +++ b/src/black/trans.py @@ -365,7 +365,7 @@ class StringMerger(StringTransformer, CustomSplitMapMixin): is_valid_index = is_valid_index_factory(LL) - for (i, leaf) in enumerate(LL): + for i, leaf in enumerate(LL): if ( leaf.type == token.STRING and is_valid_index(i + 1) @@ -570,7 +570,7 @@ class StringMerger(StringTransformer, CustomSplitMapMixin): # Build the final line ('new_line') that this method will later return. new_line = line.clone() - for (i, leaf) in enumerate(LL): + for i, leaf in enumerate(LL): if i == string_idx: new_line.append(string_leaf) @@ -691,7 +691,7 @@ class StringParenStripper(StringTransformer): is_valid_index = is_valid_index_factory(LL) - for (idx, leaf) in enumerate(LL): + for idx, leaf in enumerate(LL): # Should be a string... if leaf.type != token.STRING: continue @@ -1713,7 +1713,7 @@ class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": is_valid_index = is_valid_index_factory(LL) - for (i, leaf) in enumerate(LL): + for i, leaf in enumerate(LL): # We MUST find a comma... if leaf.type == token.COMMA: idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 @@ -1751,7 +1751,7 @@ class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): ): is_valid_index = is_valid_index_factory(LL) - for (i, leaf) in enumerate(LL): + for i, leaf in enumerate(LL): # We MUST find either an '=' or '+=' symbol... if leaf.type in [token.EQUAL, token.PLUSEQUAL]: idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 @@ -1794,7 +1794,7 @@ class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): if syms.dictsetmaker in [parent_type(LL[0]), parent_type(LL[0].parent)]: is_valid_index = is_valid_index_factory(LL) - for (i, leaf) in enumerate(LL): + for i, leaf in enumerate(LL): # We MUST find a colon... if leaf.type == token.COLON: idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 diff --git a/tests/data/long_strings__regression.py b/tests/data/long_strings__regression.py index 36f323e..58ccc4a 100644 --- a/tests/data/long_strings__regression.py +++ b/tests/data/long_strings__regression.py @@ -599,7 +599,7 @@ class A: def foo(xxxx): - for (xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx) in xxxx: + for xxx_xxxx, _xxx_xxx, _xxx_xxxxx, xxx_xxxx in xxxx: for xxx in xxx_xxxx: assert ("x" in xxx) or (xxx in xxx_xxx_xxxxx), ( "{0} xxxxxxx xx {1}, xxx {1} xx xxx xx xxxx xx xxx xxxx: xxx xxxx {2}" diff --git a/tests/data/remove_for_brackets.py b/tests/data/remove_for_brackets.py new file mode 100644 index 0000000..c8d88ab --- /dev/null +++ b/tests/data/remove_for_brackets.py @@ -0,0 +1,40 @@ +# Only remove tuple brackets after `for` +for (k, v) in d.items(): + print(k, v) + +# Don't touch tuple brackets after `in` +for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + +# Brackets remain for long for loop lines +for (why_would_anyone_choose_to_name_a_loop_variable_with_a_name_this_long, i_dont_know_but_we_should_still_check_the_behaviour_if_they_do) in d.items(): + print(k, v) + +for (k, v) in dfkasdjfldsjflkdsjflkdsjfdslkfjldsjfgkjdshgkljjdsfldgkhsdofudsfudsofajdslkfjdslkfjldisfjdffjsdlkfjdlkjjkdflskadjldkfjsalkfjdasj.items(): + print(k, v) + +# output +# Only remove tuple brackets after `for` +for k, v in d.items(): + print(k, v) + +# Don't touch tuple brackets after `in` +for module in (core, _unicodefun): + if hasattr(module, "_verify_python3_env"): + module._verify_python3_env = lambda: None + +# Brackets remain for long for loop lines +for ( + why_would_anyone_choose_to_name_a_loop_variable_with_a_name_this_long, + i_dont_know_but_we_should_still_check_the_behaviour_if_they_do, +) in d.items(): + print(k, v) + +for ( + k, + v, +) in ( + dfkasdjfldsjflkdsjflkdsjfdslkfjldsjfgkjdshgkljjdsfldgkhsdofudsfudsofajdslkfjdslkfjldisfjdffjsdlkfjdlkjjkdflskadjldkfjsalkfjdasj.items() +): + print(k, v) diff --git a/tests/test_format.py b/tests/test_format.py index 4de3170..b744685 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -80,6 +80,7 @@ PREVIEW_CASES: List[str] = [ "long_strings__edge_case", "long_strings__regression", "percent_precedence", + "remove_for_brackets", "one_element_subscript", ] -- 2.39.5 From 14d84ba2e96c5ca1351b8fe4d0d415cc148f4117 Mon Sep 17 00:00:00 2001 From: Joe Young <80432516+jpy-git@users.noreply.github.com> Date: Thu, 24 Mar 2022 15:14:21 +0000 Subject: [PATCH 10/16] Resolve new flake8-bugbear errors (B020) (GH-2950) Fixes a couple places where we were using the same variable name as we are iterating over. Co-authored-by: Jelle Zijlstra --- src/black/linegen.py | 6 +++--- src/black/parsing.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/black/linegen.py b/src/black/linegen.py index cb605ee..9c85e76 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -670,9 +670,9 @@ def dont_increase_indentation(split_func: Transformer) -> Transformer: @wraps(split_func) def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - for line in split_func(line, features): - normalize_prefix(line.leaves[0], inside_brackets=True) - yield line + for split_line in split_func(line, features): + normalize_prefix(split_line.leaves[0], inside_brackets=True) + yield split_line return split_wrapper diff --git a/src/black/parsing.py b/src/black/parsing.py index db48ae4..1272656 100644 --- a/src/black/parsing.py +++ b/src/black/parsing.py @@ -225,8 +225,8 @@ def stringify_ast(node: Union[ast.AST, ast3.AST], depth: int = 0) -> Iterator[st and isinstance(node, (ast.Delete, ast3.Delete)) and isinstance(item, (ast.Tuple, ast3.Tuple)) ): - for item in item.elts: - yield from stringify_ast(item, depth + 2) + for elt in item.elts: + yield from stringify_ast(elt, depth + 2) elif isinstance(item, (ast.AST, ast3.AST)): yield from stringify_ast(item, depth + 2) -- 2.39.5 From bd1e98034907463f5d86f4d87e89202dc6c34dd4 Mon Sep 17 00:00:00 2001 From: Joe Young <80432516+jpy-git@users.noreply.github.com> Date: Sat, 26 Mar 2022 16:56:50 +0000 Subject: [PATCH 11/16] Remove unnecessary parentheses from `except` clauses (#2939) Co-authored-by: Jelle Zijlstra --- CHANGES.md | 1 + src/black/linegen.py | 7 ++- tests/data/remove_except_parens.py | 79 ++++++++++++++++++++++++++++++ tests/test_format.py | 1 + 4 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 tests/data/remove_except_parens.py diff --git a/CHANGES.md b/CHANGES.md index b2325b6..d34bd4e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,6 +15,7 @@ - Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Remove unnecessary parentheses from `except` statements (#2939) - Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) - Avoid magic-trailing-comma in single-element subscripts (#2942) diff --git a/src/black/linegen.py b/src/black/linegen.py index 9c85e76..8a28c39 100644 --- a/src/black/linegen.py +++ b/src/black/linegen.py @@ -318,7 +318,12 @@ class LineGenerator(Visitor[Line]): self.visit_try_stmt = partial( v, keywords={"try", "except", "else", "finally"}, parens=Ø ) - self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) + if self.mode.preview: + self.visit_except_clause = partial( + v, keywords={"except"}, parens={"except"} + ) + else: + self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) diff --git a/tests/data/remove_except_parens.py b/tests/data/remove_except_parens.py new file mode 100644 index 0000000..322c5b7 --- /dev/null +++ b/tests/data/remove_except_parens.py @@ -0,0 +1,79 @@ +# These brackets are redundant, therefore remove. +try: + a.something +except (AttributeError) as err: + raise err + +# This is tuple of exceptions. +# Although this could be replaced with just the exception, +# we do not remove brackets to preserve AST. +try: + a.something +except (AttributeError,) as err: + raise err + +# This is a tuple of exceptions. Do not remove brackets. +try: + a.something +except (AttributeError, ValueError) as err: + raise err + +# Test long variants. +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error) as err: + raise err + +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error,) as err: + raise err + +try: + a.something +except (some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error) as err: + raise err + +# output +# These brackets are redundant, therefore remove. +try: + a.something +except AttributeError as err: + raise err + +# This is tuple of exceptions. +# Although this could be replaced with just the exception, +# we do not remove brackets to preserve AST. +try: + a.something +except (AttributeError,) as err: + raise err + +# This is a tuple of exceptions. Do not remove brackets. +try: + a.something +except (AttributeError, ValueError) as err: + raise err + +# Test long variants. +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error +) as err: + raise err + +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, +) as err: + raise err + +try: + a.something +except ( + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, + some.really.really.really.looooooooooooooooooooooooooooooooong.module.over89.chars.Error, +) as err: + raise err diff --git a/tests/test_format.py b/tests/test_format.py index b744685..a995bd3 100644 --- a/tests/test_format.py +++ b/tests/test_format.py @@ -80,6 +80,7 @@ PREVIEW_CASES: List[str] = [ "long_strings__edge_case", "long_strings__regression", "percent_precedence", + "remove_except_parens", "remove_for_brackets", "one_element_subscript", ] -- 2.39.5 From f239d227c003c52126239e1b9a37c36c2b2b8305 Mon Sep 17 00:00:00 2001 From: Richard Si <63936253+ichard26@users.noreply.github.com> Date: Sat, 26 Mar 2022 17:22:38 -0400 Subject: [PATCH 12/16] Enforce no formatting changes for PRs via CI (GH-2951) Now PRs will run two diff-shades jobs, "preview-changes" which formats all projects with preview=True, and "assert-no-changes" which formats all projects with preview=False. The latter also fails if any changes were made. Pushes to main will only run "preview-changes" Also the workflow_dispatch feature was dropped since it was complicating everything for little gain. --- .github/workflows/diff_shades.yml | 114 ++++++++++++++------------- docs/contributing/gauging_changes.md | 50 ++++++------ scripts/diff_shades_gha_helper.py | 105 ++++++++---------------- src/black/__init__.py | 3 + 4 files changed, 121 insertions(+), 151 deletions(-) diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml index e9deaba..51fcebc 100644 --- a/.github/workflows/diff_shades.yml +++ b/.github/workflows/diff_shades.yml @@ -3,54 +3,61 @@ name: diff-shades on: push: branches: [main] - paths-ignore: ["docs/**", "tests/**", "**.md", "**.rst"] + paths: ["src/**", "setup.*", "pyproject.toml", ".github/workflows/*"] pull_request: - paths-ignore: ["docs/**", "tests/**", "**.md", "**.rst"] - - workflow_dispatch: - inputs: - baseline: - description: > - The baseline revision. Pro-tip, use `.pypi` to use the latest version - on PyPI or `.XXX` to use a PR. - required: true - default: main - baseline-args: - description: "Custom Black arguments (eg. -l 79)" - required: false - target: - description: > - The target revision to compare against the baseline. Same tip applies here. - required: true - target-args: - description: "Custom Black arguments (eg. -S)" - required: false + paths: ["src/**", "setup.*", "pyproject.toml", ".github/workflows/*"] concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} cancel-in-progress: true jobs: + configure: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-config.outputs.matrix }} + + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + + - name: Install diff-shades and support dependencies + run: | + python -m pip install click packaging urllib3 + python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip + + - name: Calculate run configuration & metadata + id: set-config + env: + GITHUB_TOKEN: ${{ github.token }} + run: > + python scripts/diff_shades_gha_helper.py config ${{ github.event_name }} ${{ matrix.mode }} + analysis: - name: analysis / linux + name: analysis / ${{ matrix.mode }} + needs: configure runs-on: ubuntu-latest env: # Clang is less picky with the C code it's given than gcc (and may # generate faster binaries too). CC: clang-12 + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.configure.outputs.matrix )}} steps: - name: Checkout this repository (full clone) uses: actions/checkout@v3 with: + # The baseline revision could be rather old so a full clone is ideal. fetch-depth: 0 - uses: actions/setup-python@v3 - name: Install diff-shades and support dependencies run: | - python -m pip install pip --upgrade python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip python -m pip install click packaging urllib3 python -m pip install -r .github/mypyc-requirements.txt @@ -59,28 +66,19 @@ jobs: git config user.name "diff-shades-gha" git config user.email "diff-shades-gha@example.com" - - name: Calculate run configuration & metadata - id: config - env: - GITHUB_TOKEN: ${{ github.token }} - run: > - python helper.py config ${{ github.event_name }} - ${{ github.event.inputs.baseline }} ${{ github.event.inputs.target }} - --baseline-args "${{ github.event.inputs.baseline-args }}" - - name: Attempt to use cached baseline analysis id: baseline-cache uses: actions/cache@v2.1.7 with: - path: ${{ steps.config.outputs.baseline-analysis }} - key: ${{ steps.config.outputs.baseline-cache-key }} + path: ${{ matrix.baseline-analysis }} + key: ${{ matrix.baseline-cache-key }} - name: Build and install baseline revision if: steps.baseline-cache.outputs.cache-hit != 'true' env: GITHUB_TOKEN: ${{ github.token }} run: > - ${{ steps.config.outputs.baseline-setup-cmd }} + ${{ matrix.baseline-setup-cmd }} && python setup.py --use-mypyc bdist_wheel && python -m pip install dist/*.whl && rm build dist -r @@ -88,63 +86,69 @@ jobs: if: steps.baseline-cache.outputs.cache-hit != 'true' run: > diff-shades analyze -v --work-dir projects-cache/ - ${{ steps.config.outputs.baseline-analysis }} -- ${{ github.event.inputs.baseline-args }} + ${{ matrix.baseline-analysis }} ${{ matrix.force-flag }} - name: Build and install target revision env: GITHUB_TOKEN: ${{ github.token }} run: > - ${{ steps.config.outputs.target-setup-cmd }} + ${{ matrix.target-setup-cmd }} && python setup.py --use-mypyc bdist_wheel && python -m pip install dist/*.whl - name: Analyze target revision run: > diff-shades analyze -v --work-dir projects-cache/ - ${{ steps.config.outputs.target-analysis }} --repeat-projects-from - ${{ steps.config.outputs.baseline-analysis }} -- ${{ github.event.inputs.target-args }} + ${{ matrix.target-analysis }} --repeat-projects-from + ${{ matrix.baseline-analysis }} ${{ matrix.force-flag }} - name: Generate HTML diff report run: > - diff-shades --dump-html diff.html compare --diff --quiet - ${{ steps.config.outputs.baseline-analysis }} ${{ steps.config.outputs.target-analysis }} + diff-shades --dump-html diff.html compare --diff + ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} - name: Upload diff report uses: actions/upload-artifact@v2 with: - name: diff.html + name: ${{ matrix.mode }}-diff.html path: diff.html - name: Upload baseline analysis uses: actions/upload-artifact@v2 with: - name: ${{ steps.config.outputs.baseline-analysis }} - path: ${{ steps.config.outputs.baseline-analysis }} + name: ${{ matrix.baseline-analysis }} + path: ${{ matrix.baseline-analysis }} - name: Upload target analysis uses: actions/upload-artifact@v2 with: - name: ${{ steps.config.outputs.target-analysis }} - path: ${{ steps.config.outputs.target-analysis }} + name: ${{ matrix.target-analysis }} + path: ${{ matrix.target-analysis }} - name: Generate summary file (PR only) - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes' run: > python helper.py comment-body - ${{ steps.config.outputs.baseline-analysis }} ${{ steps.config.outputs.target-analysis }} - ${{ steps.config.outputs.baseline-sha }} ${{ steps.config.outputs.target-sha }} + ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} + ${{ matrix.baseline-sha }} ${{ matrix.target-sha }} ${{ github.event.pull_request.number }} - name: Upload summary file (PR only) - if: github.event_name == 'pull_request' + if: github.event_name == 'pull_request' && matrix.mode == 'preview-changes' uses: actions/upload-artifact@v2 with: name: .pr-comment.json path: .pr-comment.json - # This is last so the diff-shades-comment workflow can still work even if we - # end up detecting failed files and failing the run. - - name: Check for failed files in both analyses + - name: Verify zero changes (PR only) + if: matrix.mode == 'assert-no-changes' + run: > + diff-shades compare --check ${{ matrix.baseline-analysis }} ${{ matrix.target-analysis }} + || (echo "Please verify you didn't change the stable code style unintentionally!" && exit 1) + + - name: Check for failed files for target revision + # Even if the previous step failed, we should still check for failed files. + if: always() run: > - diff-shades show-failed --check --show-log ${{ steps.config.outputs.baseline-analysis }}; - diff-shades show-failed --check --show-log ${{ steps.config.outputs.target-analysis }} + diff-shades show-failed --check --show-log ${{ matrix.target-analysis }} + --check-allow 'sqlalchemy:test/orm/test_relationship_criteria.py' diff --git a/docs/contributing/gauging_changes.md b/docs/contributing/gauging_changes.md index f28e811..8562a83 100644 --- a/docs/contributing/gauging_changes.md +++ b/docs/contributing/gauging_changes.md @@ -9,10 +9,10 @@ enough to cause frustration to projects that are already "black formatted". ## diff-shades -diff-shades is a tool that runs _Black_ across a list of Git cloneable OSS projects -recording the results. The main highlight feature of diff-shades is being able to -compare two revisions of _Black_. This is incredibly useful as it allows us to see what -exact changes will occur, say merging a certain PR. +diff-shades is a tool that runs _Black_ across a list of open-source projects recording +the results. The main highlight feature of diff-shades is being able to compare two +revisions of _Black_. This is incredibly useful as it allows us to see what exact +changes will occur, say merging a certain PR. For more information, please see the [diff-shades documentation][diff-shades]. @@ -20,35 +20,39 @@ For more information, please see the [diff-shades documentation][diff-shades]. diff-shades is also the tool behind the "diff-shades results comparing ..." / "diff-shades reports zero changes ..." comments on PRs. The project has a GitHub Actions -workflow which runs diff-shades twice against two revisions of _Black_ according to -these rules: +workflow that analyzes and compares two revisions of _Black_ according to these rules: | | Baseline revision | Target revision | | --------------------- | ----------------------- | ---------------------------- | | On PRs | latest commit on `main` | PR commit with `main` merged | | On pushes (main only) | latest PyPI version | the pushed commit | -Once finished, a PR comment will be posted embedding a summary of the changes and links -to further information. If there's a pre-existing diff-shades comment, it'll be updated -instead the next time the workflow is triggered on the same PR. +For pushes to main, there's only one analysis job named `preview-changes` where the +preview style is used for all projects. -The workflow uploads 3-4 artifacts upon completion: the two generated analyses (they -have the .json file extension), `diff.html`, and `.pr-comment.json` if triggered by a -PR. The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be -downloaded locally. `diff.html` comes in handy for push-based or manually triggered -runs. And the analyses exist just in case you want to do further analysis using the -collected data locally. +For PRs they get one more analysis job: `assert-no-changes`. It's similar to +`preview-changes` but runs with the stable code style. It will fail if changes were +made. This makes sure code won't be reformatted again and again within the same year in +accordance to Black's stability policy. -Note that the workflow will only fail intentionally if while analyzing a file failed to +Additionally for PRs, a PR comment will be posted embedding a summary of the preview +changes and links to further information. If there's a pre-existing diff-shades comment, +it'll be updated instead the next time the workflow is triggered on the same PR. + +```{note} +The `preview-changes` job will only fail intentionally if while analyzing a file failed to format. Otherwise a failure indicates a bug in the workflow. +``` -```{tip} -Maintainers with write access or higher can trigger the workflow manually from the -Actions tab using the `workflow_dispatch` event. Simply select "diff-shades" -from the workflows list on the left, press "Run workflow", and fill in which revisions -and command line arguments to use. +The workflow uploads several artifacts upon completion: -Once finished, check the logs or download the artifacts for local use. -``` +- The raw analyses (.json) +- HTML diffs (.html) +- `.pr-comment.json` (if triggered by a PR) + +The last one is downloaded by the `diff-shades-comment` workflow and shouldn't be +downloaded locally. The HTML diffs come in handy for push-based where there's no PR to +post a comment. And the analyses exist just in case you want to do further analysis +using the collected data locally. [diff-shades]: https://github.com/ichard26/diff-shades#readme diff --git a/scripts/diff_shades_gha_helper.py b/scripts/diff_shades_gha_helper.py index f1f7f2b..b5fea5a 100644 --- a/scripts/diff_shades_gha_helper.py +++ b/scripts/diff_shades_gha_helper.py @@ -23,7 +23,7 @@ import sys import zipfile from io import BytesIO from pathlib import Path -from typing import Any, Optional, Tuple +from typing import Any import click import urllib3 @@ -55,7 +55,7 @@ def set_output(name: str, value: str) -> None: print(f"::set-output name={name}::{value}") -def http_get(url: str, is_json: bool = True, **kwargs: Any) -> Any: +def http_get(url: str, *, is_json: bool = True, **kwargs: Any) -> Any: headers = kwargs.get("headers") or {} headers["User-Agent"] = USER_AGENT if "github" in url: @@ -78,10 +78,10 @@ def http_get(url: str, is_json: bool = True, **kwargs: Any) -> Any: return data -def get_branch_or_tag_revision(sha: str = "main") -> str: +def get_main_revision() -> str: data = http_get( f"https://api.github.com/repos/{REPO}/commits", - fields={"per_page": "1", "sha": sha}, + fields={"per_page": "1", "sha": "main"}, ) assert isinstance(data[0]["sha"], str) return data[0]["sha"] @@ -100,53 +100,18 @@ def get_pypi_version() -> Version: return sorted_versions[0] -def resolve_custom_ref(ref: str) -> Tuple[str, str]: - if ref == ".pypi": - # Special value to get latest PyPI version. - version = str(get_pypi_version()) - return version, f"git checkout {version}" - - if ref.startswith(".") and ref[1:].isnumeric(): - # Special format to get a PR. - number = int(ref[1:]) - revision = get_pr_revision(number) - return ( - f"pr-{number}-{revision[:SHA_LENGTH]}", - f"gh pr checkout {number} && git merge origin/main", - ) - - # Alright, it's probably a branch, tag, or a commit SHA, let's find out! - revision = get_branch_or_tag_revision(ref) - # We're cutting the revision short as we might be operating on a short commit SHA. - if revision == ref or revision[: len(ref)] == ref: - # It's *probably* a commit as the resolved SHA isn't different from the REF. - return revision[:SHA_LENGTH], f"git checkout {revision}" - - # It's *probably* a pre-existing branch or tag, yay! - return f"{ref}-{revision[:SHA_LENGTH]}", f"git checkout {revision}" - - @click.group() def main() -> None: pass @main.command("config", help="Acquire run configuration and metadata.") -@click.argument( - "event", type=click.Choice(["push", "pull_request", "workflow_dispatch"]) -) -@click.argument("custom_baseline", required=False) -@click.argument("custom_target", required=False) -@click.option("--baseline-args", default="") -def config( - event: Literal["push", "pull_request", "workflow_dispatch"], - custom_baseline: Optional[str], - custom_target: Optional[str], - baseline_args: str, -) -> None: +@click.argument("event", type=click.Choice(["push", "pull_request"])) +def config(event: Literal["push", "pull_request"]) -> None: import diff_shades if event == "push": + jobs = [{"mode": "preview-changes", "force-flag": "--force-preview-style"}] # Push on main, let's use PyPI Black as the baseline. baseline_name = str(get_pypi_version()) baseline_cmd = f"git checkout {baseline_name}" @@ -156,11 +121,14 @@ def config( target_cmd = f"git checkout {target_rev}" elif event == "pull_request": + jobs = [ + {"mode": "preview-changes", "force-flag": "--force-preview-style"}, + {"mode": "assert-no-changes", "force-flag": "--force-stable-style"}, + ] # PR, let's use main as the baseline. - baseline_rev = get_branch_or_tag_revision() + baseline_rev = get_main_revision() baseline_name = "main-" + baseline_rev[:SHA_LENGTH] baseline_cmd = f"git checkout {baseline_rev}" - pr_ref = os.getenv("GITHUB_REF") assert pr_ref is not None pr_num = int(pr_ref[10:-6]) @@ -168,27 +136,20 @@ def config( target_name = f"pr-{pr_num}-{pr_rev[:SHA_LENGTH]}" target_cmd = f"gh pr checkout {pr_num} && git merge origin/main" - # These are only needed for the PR comment. - set_output("baseline-sha", baseline_rev) - set_output("target-sha", pr_rev) - else: - assert custom_baseline is not None and custom_target is not None - baseline_name, baseline_cmd = resolve_custom_ref(custom_baseline) - target_name, target_cmd = resolve_custom_ref(custom_target) - if baseline_name == target_name: - # Alright we're using the same revisions but we're (hopefully) using - # different command line arguments, let's support that too. - baseline_name += "-1" - target_name += "-2" - - set_output("baseline-analysis", baseline_name + ".json") - set_output("baseline-setup-cmd", baseline_cmd) - set_output("target-analysis", target_name + ".json") - set_output("target-setup-cmd", target_cmd) + env = f"{platform.system()}-{platform.python_version()}-{diff_shades.__version__}" + for entry in jobs: + entry["baseline-analysis"] = f"{entry['mode']}-{baseline_name}.json" + entry["baseline-setup-cmd"] = baseline_cmd + entry["target-analysis"] = f"{entry['mode']}-{target_name}.json" + entry["target-setup-cmd"] = target_cmd + entry["baseline-cache-key"] = f"{env}-{baseline_name}-{entry['mode']}" + if event == "pull_request": + # These are only needed for the PR comment. + entry["baseline-sha"] = baseline_rev + entry["target-sha"] = pr_rev - key = f"{platform.system()}-{platform.python_version()}-{diff_shades.__version__}" - key += f"-{baseline_name}-{baseline_args.encode('utf-8').hex()}" - set_output("baseline-cache-key", key) + set_output("matrix", json.dumps(jobs, indent=None)) + pprint.pprint(jobs) @main.command("comment-body", help="Generate the body for a summary PR comment.") @@ -238,15 +199,13 @@ def comment_details(run_id: str) -> None: set_output("needs-comment", "true") jobs = http_get(data["jobs_url"])["jobs"] - assert len(jobs) == 1, "multiple jobs not supported nor tested" - job = jobs[0] - steps = {s["name"]: s["number"] for s in job["steps"]} - diff_step = steps[DIFF_STEP_NAME] - diff_url = job["html_url"] + f"#step:{diff_step}:1" - - artifacts_data = http_get(data["artifacts_url"])["artifacts"] - artifacts = {a["name"]: a["archive_download_url"] for a in artifacts_data} - comment_url = artifacts[COMMENT_FILE] + job = next(j for j in jobs if j["name"] == "analysis / preview-changes") + diff_step = next(s for s in job["steps"] if s["name"] == DIFF_STEP_NAME) + diff_url = job["html_url"] + f"#step:{diff_step['number']}:1" + + artifacts = http_get(data["artifacts_url"])["artifacts"] + comment_artifact = next(a for a in artifacts if a["name"] == COMMENT_FILE) + comment_url = comment_artifact["archive_download_url"] comment_zip = BytesIO(http_get(comment_url, is_json=False)) with zipfile.ZipFile(comment_zip) as zfile: with zfile.open(COMMENT_FILE) as rf: diff --git a/src/black/__init__.py b/src/black/__init__.py index 51e31e9..a82cf6a 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -697,6 +697,9 @@ def reformat_code( report.failed(path, str(exc)) +# diff-shades depends on being to monkeypatch this function to operate. I know it's +# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26 +@mypyc_attr(patchable=True) def reformat_one( src: Path, fast: bool, write_back: WriteBack, mode: Mode, report: "Report" ) -> None: -- 2.39.5 From ac7402cbf6a0deb5c74e9abcffc5bd7b1148fda5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 13:21:13 -0400 Subject: [PATCH 13/16] Bump sphinx from 4.4.0 to 4.5.0 in /docs (GH-2959) Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 4.4.0 to 4.5.0. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/4.x/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v4.4.0...v4.5.0) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 5ca7a6f..63c9c8f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,7 +1,7 @@ # Used by ReadTheDocs; pinned requirements for stability. myst-parser==0.16.1 -Sphinx==4.4.0 +Sphinx==4.5.0 sphinxcontrib-programoutput==0.17 sphinx_copybutton==0.5.0 furo==2022.3.4 -- 2.39.5 From e9681a40dcb3d38b56b301d811bb1c55201fd97e Mon Sep 17 00:00:00 2001 From: Jelle Zijlstra Date: Mon, 28 Mar 2022 12:01:13 -0700 Subject: [PATCH 14/16] Fix _unicodefun patch code for Click 8.1.0 (#2966) Fixes #2964 --- .github/workflows/diff_shades.yml | 4 ++-- CHANGES.md | 1 + src/black/__init__.py | 14 +++++++++++--- tests/test_black.py | 2 +- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml index 51fcebc..0529b13 100644 --- a/.github/workflows/diff_shades.yml +++ b/.github/workflows/diff_shades.yml @@ -24,7 +24,7 @@ jobs: - name: Install diff-shades and support dependencies run: | - python -m pip install click packaging urllib3 + python -m pip install 'click<8.1.0' packaging urllib3 python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip - name: Calculate run configuration & metadata @@ -59,7 +59,7 @@ jobs: - name: Install diff-shades and support dependencies run: | python -m pip install https://github.com/ichard26/diff-shades/archive/stable.zip - python -m pip install click packaging urllib3 + python -m pip install 'click<8.1.0' packaging urllib3 python -m pip install -r .github/mypyc-requirements.txt # After checking out old revisions, this might not exist so we'll use a copy. cat scripts/diff_shades_gha_helper.py > helper.py diff --git a/CHANGES.md b/CHANGES.md index d34bd4e..02327fc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -55,6 +55,7 @@ +- Fix Black to work with Click 8.1.0 (#2966) - On Python 3.11 and newer, use the standard library's `tomllib` instead of `tomli` (#2903) - `black-primer`, the deprecated internal devtool, has been removed and copied to a diff --git a/src/black/__init__.py b/src/black/__init__.py index a82cf6a..e9d3c1b 100644 --- a/src/black/__init__.py +++ b/src/black/__init__.py @@ -1427,13 +1427,21 @@ def patch_click() -> None: file paths is minimal since it's Python source code. Moreover, this crash was spurious on Python 3.7 thanks to PEP 538 and PEP 540. """ + modules: List[Any] = [] try: from click import core + except ImportError: + pass + else: + modules.append(core) + try: from click import _unicodefun - except ModuleNotFoundError: - return + except ImportError: + pass + else: + modules.append(_unicodefun) - for module in (core, _unicodefun): + for module in modules: if hasattr(module, "_verify_python3_env"): module._verify_python3_env = lambda: None # type: ignore if hasattr(module, "_verify_python_env"): diff --git a/tests/test_black.py b/tests/test_black.py index b1bf177..ce7bab2 100644 --- a/tests/test_black.py +++ b/tests/test_black.py @@ -1257,7 +1257,7 @@ class BlackTestCase(BlackBaseTestCase): def test_shhh_click(self) -> None: try: from click import _unicodefun - except ModuleNotFoundError: + except ImportError: self.skipTest("Incompatible Click version") if not hasattr(_unicodefun, "_verify_python3_env"): self.skipTest("Incompatible Click version") -- 2.39.5 From ae2c0758c9e61a385df9700dc9c231bf54887041 Mon Sep 17 00:00:00 2001 From: Jelle Zijlstra Date: Mon, 28 Mar 2022 12:08:29 -0700 Subject: [PATCH 15/16] Prepare release 22.3.0 (#2968) --- CHANGES.md | 57 +++++++++++++-------- docs/integrations/source_version_control.md | 2 +- docs/usage_and_configuration/the_basics.md | 2 +- 3 files changed, 39 insertions(+), 22 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 02327fc..f68bc8f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -14,11 +14,6 @@ -- Code cell separators `#%%` are now standardised to `# %%` (#2919) -- Remove unnecessary parentheses from `except` statements (#2939) -- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) -- Avoid magic-trailing-comma in single-element subscripts (#2942) - ### _Blackd_ @@ -27,34 +22,62 @@ +### Documentation + + + +### Integrations + + + +### Output + + + +### Packaging + + + +### Parser + + + +### Performance + + + +## 22.3.0 + +### Preview style + +- Code cell separators `#%%` are now standardised to `# %%` (#2919) +- Remove unnecessary parentheses from `except` statements (#2939) +- Remove unnecessary parentheses from tuple unpacking in `for` loops (#2945) +- Avoid magic-trailing-comma in single-element subscripts (#2942) + +### Configuration + - Do not format `__pypackages__` directories by default (#2836) - Add support for specifying stable version with `--required-version` (#2832). - Avoid crashing when the user has no homedir (#2814) - Avoid crashing when md5 is not available (#2905) +- Fix handling of directory junctions on Windows (#2904) ### Documentation - - - Update pylint config documentation (#2931) ### Integrations - - - Move test to disable plugin in Vim/Neovim, which speeds up loading (#2896) ### Output - - - In verbose, mode, log when _Black_ is using user-level config (#2861) ### Packaging - - - Fix Black to work with Click 8.1.0 (#2966) - On Python 3.11 and newer, use the standard library's `tomllib` instead of `tomli` (#2903) @@ -66,12 +89,6 @@ - Black can now parse starred expressions in the target of `for` and `async for` statements, e.g `for item in *items_1, *items_2: pass` (#2879). -- Fix handling of directory junctions on Windows (#2904) - -### Performance - - - ## 22.1.0 At long last, _Black_ is no longer a beta product! This is the first non-beta release diff --git a/docs/integrations/source_version_control.md b/docs/integrations/source_version_control.md index 7215e11..d7d3da4 100644 --- a/docs/integrations/source_version_control.md +++ b/docs/integrations/source_version_control.md @@ -7,7 +7,7 @@ Use [pre-commit](https://pre-commit.com/). Once you ```yaml repos: - repo: https://github.com/psf/black - rev: 22.1.0 + rev: 22.3.0 hooks: - id: black # It is recommended to specify the latest version of Python diff --git a/docs/usage_and_configuration/the_basics.md b/docs/usage_and_configuration/the_basics.md index 48dda3b..4c793f4 100644 --- a/docs/usage_and_configuration/the_basics.md +++ b/docs/usage_and_configuration/the_basics.md @@ -173,7 +173,7 @@ You can check the version of _Black_ you have installed using the `--version` fl ```console $ black --version -black, version 22.1.0 +black, version 22.3.0 ``` An option to require a specific version to be running is also provided. -- 2.39.5 From 2d62a09e838d2df98961e6e93036abad54f28c5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 21:42:53 -0400 Subject: [PATCH 16/16] Bump actions/cache from 2.1.7 to 3 (GH-2962) Bumps [actions/cache](https://github.com/actions/cache) from 2.1.7 to 3. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v2.1.7...v3) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/diff_shades.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/diff_shades.yml b/.github/workflows/diff_shades.yml index 0529b13..611d8bc 100644 --- a/.github/workflows/diff_shades.yml +++ b/.github/workflows/diff_shades.yml @@ -68,7 +68,7 @@ jobs: - name: Attempt to use cached baseline analysis id: baseline-cache - uses: actions/cache@v2.1.7 + uses: actions/cache@v3 with: path: ${{ matrix.baseline-analysis }} key: ${{ matrix.baseline-cache-key }} -- 2.39.5