from platform import system
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
-from typing import Any, Callable, Generator, Iterator, Tuple
+from typing import Any, Callable, Iterator, Tuple
from unittest.mock import Mock, patch
from click.testing import CliRunner
68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- - 0 projects Disabled by config
- - 0 projects skipped due to Python Version
+ - 0 projects disabled by config
+ - 0 projects skipped due to Python version
- 0 skipped due to long checkout
-Failed Projects:
+Failed projects:
## black:
- Returned 69
- stdout:
-black didn't work
+Black didn't work
"""
FAKE_PROJECT_CONFIG = {
@contextmanager
-def capture_stdout(command: Callable, *args: Any, **kwargs: Any) -> Generator:
+def capture_stdout(
+ command: Callable[..., Any], *args: Any, **kwargs: Any
+) -> Iterator[str]:
old_stdout, sys.stdout = sys.stdout, StringIO()
try:
command(*args, **kwargs)
loop.close()
-async def raise_subprocess_error(*args: Any, **kwargs: Any) -> None:
+async def raise_subprocess_error_1(*args: Any, **kwargs: Any) -> None:
raise CalledProcessError(1, ["unittest", "error"], b"", b"")
+async def raise_subprocess_error_123(*args: Any, **kwargs: Any) -> None:
+ raise CalledProcessError(123, ["unittest", "error"], b"", b"")
+
+
async def return_false(*args: Any, **kwargs: Any) -> bool:
return False
"success": 68,
"wrong_py_ver": 0,
},
- {"black": CalledProcessError(69, ["black"], b"black didn't work", b"")},
+ {"black": CalledProcessError(69, ["black"], b"Black didn't work", b"")},
)
with capture_stdout(lib.analyze_results, 69, fake_results) as analyze_stdout:
self.assertEqual(EXPECTED_ANALYSIS_OUTPUT, analyze_stdout)
@event_loop()
def test_black_run(self) -> None:
- """Pretend run black to ensure we cater for all scenarios"""
+ """Pretend to run Black to ensure we cater for all scenarios"""
loop = asyncio.get_event_loop()
+ project_name = "unittest"
repo_path = Path(gettempdir())
project_config = deepcopy(FAKE_PROJECT_CONFIG)
results = lib.Results({"failed": 0, "success": 0}, {})
- # Test a successful black run
+ # Test a successful Black run
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["success"])
self.assertFalse(results.failed_projects)
project_config["expect_formatting_changes"] = True
results = lib.Results({"failed": 0, "success": 0}, {})
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
# Test a fail based on returning 1 and not expecting formatting changes
project_config["expect_formatting_changes"] = False
results = lib.Results({"failed": 0, "success": 0}, {})
- with patch("black_primer.lib._gen_check_output", raise_subprocess_error):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1):
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
+ # Test a formatting error based on returning 123
+ with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123):
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
+ self.assertEqual(2, results.stats["failed"])
+
+ def test_flatten_cli_args(self) -> None:
+ fake_long_args = ["--arg", ["really/", "|long", "|regex", "|splitup"], "--done"]
+ expected = ["--arg", "really/|long|regex|splitup", "--done"]
+ self.assertEqual(expected, lib._flatten_cli_args(fake_long_args))
+
@event_loop()
def test_gen_check_output(self) -> None:
loop = asyncio.get_event_loop()
@patch("sys.stdout", new_callable=StringIO)
@event_loop()
def test_process_queue(self, mock_stdout: Mock) -> None:
+ """Test the process queue on primer itself
+ - If you have non black conforming formatting in primer itself this can fail"""
loop = asyncio.get_event_loop()
config_path = Path(lib.__file__).parent / "primer.json"
with patch("black_primer.lib.git_checkout_or_rebase", return_false):
with TemporaryDirectory() as td:
return_val = loop.run_until_complete(
- lib.process_queue(str(config_path), td, 2)
+ lib.process_queue(str(config_path), Path(td), 2)
)
self.assertEqual(0, return_val)
"rebase": False,
"workdir": str(work_dir),
"workers": 69,
+ "no_diff": False,
}
with patch("black_primer.cli.lib.process_queue", return_zero):
- return_val = loop.run_until_complete(cli.async_main(**args))
+ return_val = loop.run_until_complete(cli.async_main(**args)) # type: ignore
self.assertEqual(0, return_val)
def test_handle_debug(self) -> None: