68 / 69 succeeded (98.55%) ✅
1 / 69 FAILED (1.45%) 💩
- - 0 projects Disabled by config
- - 0 projects skipped due to Python Version
+ - 0 projects disabled by config
+ - 0 projects skipped due to Python version
- 0 skipped due to long checkout
-Failed Projects:
+Failed projects:
## black:
- Returned 69
- stdout:
-black didn't work
+Black didn't work
"""
FAKE_PROJECT_CONFIG = {
loop.close()
-async def raise_subprocess_error(*args: Any, **kwargs: Any) -> None:
+async def raise_subprocess_error_1(*args: Any, **kwargs: Any) -> None:
raise CalledProcessError(1, ["unittest", "error"], b"", b"")
+async def raise_subprocess_error_123(*args: Any, **kwargs: Any) -> None:
+ raise CalledProcessError(123, ["unittest", "error"], b"", b"")
+
+
async def return_false(*args: Any, **kwargs: Any) -> bool:
return False
"success": 68,
"wrong_py_ver": 0,
},
- {"black": CalledProcessError(69, ["black"], b"black didn't work", b"")},
+ {"black": CalledProcessError(69, ["black"], b"Black didn't work", b"")},
)
with capture_stdout(lib.analyze_results, 69, fake_results) as analyze_stdout:
self.assertEqual(EXPECTED_ANALYSIS_OUTPUT, analyze_stdout)
@event_loop()
def test_black_run(self) -> None:
- """Pretend run black to ensure we cater for all scenarios"""
+ """Pretend to run Black to ensure we cater for all scenarios"""
loop = asyncio.get_event_loop()
+ project_name = "unittest"
repo_path = Path(gettempdir())
project_config = deepcopy(FAKE_PROJECT_CONFIG)
results = lib.Results({"failed": 0, "success": 0}, {})
- # Test a successful black run
+ # Test a successful Black run
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["success"])
self.assertFalse(results.failed_projects)
project_config["expect_formatting_changes"] = True
results = lib.Results({"failed": 0, "success": 0}, {})
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
# Test a fail based on returning 1 and not expecting formatting changes
project_config["expect_formatting_changes"] = False
results = lib.Results({"failed": 0, "success": 0}, {})
- with patch("black_primer.lib._gen_check_output", raise_subprocess_error):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1):
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
+ # Test a formatting error based on returning 123
+ with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123):
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
+ self.assertEqual(2, results.stats["failed"])
+
@event_loop()
def test_gen_check_output(self) -> None:
loop = asyncio.get_event_loop()
"rebase": False,
"workdir": str(work_dir),
"workers": 69,
+ "no_diff": False,
}
with patch("black_primer.cli.lib.process_queue", return_zero):
return_val = loop.run_until_complete(cli.async_main(**args))