from os import getpid
from pathlib import Path
from platform import system
+from pytest import LogCaptureFixture
from subprocess import CalledProcessError
from tempfile import TemporaryDirectory, gettempdir
-from typing import Any, Callable, Generator, Iterator, Tuple
+from typing import Any, Callable, Iterator, List, Tuple, TypeVar
from unittest.mock import Mock, patch
from click.testing import CliRunner
EXPECTED_ANALYSIS_OUTPUT = """\
+
+Failed projects:
+
+## black:
+ - Returned 69
+ - stdout:
+Black didn't work
+
-- primer results 📊 --
68 / 69 succeeded (98.55%) ✅
- 0 projects skipped due to Python version
- 0 skipped due to long checkout
-Failed projects:
-
-## black:
- - Returned 69
- - stdout:
-Black didn't work
+Failed projects: black
"""
FAKE_PROJECT_CONFIG = {
@contextmanager
-def capture_stdout(command: Callable, *args: Any, **kwargs: Any) -> Generator:
+def capture_stdout(
+ command: Callable[..., Any], *args: Any, **kwargs: Any
+) -> Iterator[str]:
old_stdout, sys.stdout = sys.stdout, StringIO()
try:
command(*args, **kwargs)
return 0
+if sys.version_info >= (3, 9):
+ T = TypeVar("T")
+ Q = asyncio.Queue[T]
+else:
+ T = Any
+ Q = asyncio.Queue
+
+
+def collect(queue: Q) -> List[T]:
+ ret = []
+ while True:
+ try:
+ item = queue.get_nowait()
+ ret.append(item)
+ except asyncio.QueueEmpty:
+ return ret
+
+
class PrimerLibTests(unittest.TestCase):
def test_analyze_results(self) -> None:
fake_results = lib.Results(
def test_black_run(self) -> None:
"""Pretend to run Black to ensure we cater for all scenarios"""
loop = asyncio.get_event_loop()
+ project_name = "unittest"
repo_path = Path(gettempdir())
project_config = deepcopy(FAKE_PROJECT_CONFIG)
results = lib.Results({"failed": 0, "success": 0}, {})
# Test a successful Black run
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["success"])
self.assertFalse(results.failed_projects)
project_config["expect_formatting_changes"] = True
results = lib.Results({"failed": 0, "success": 0}, {})
with patch("black_primer.lib._gen_check_output", return_subproccess_output):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
project_config["expect_formatting_changes"] = False
results = lib.Results({"failed": 0, "success": 0}, {})
with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(1, results.stats["failed"])
self.assertTrue(results.failed_projects)
# Test a formatting error based on returning 123
with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123):
- loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+ loop.run_until_complete(
+ lib.black_run(project_name, repo_path, project_config, results)
+ )
self.assertEqual(2, results.stats["failed"])
+ def test_flatten_cli_args(self) -> None:
+ fake_long_args = ["--arg", ["really/", "|long", "|regex", "|splitup"], "--done"]
+ expected = ["--arg", "really/|long|regex|splitup", "--done"]
+ self.assertEqual(expected, lib._flatten_cli_args(fake_long_args))
+
@event_loop()
def test_gen_check_output(self) -> None:
loop = asyncio.get_event_loop()
@patch("sys.stdout", new_callable=StringIO)
@event_loop()
def test_process_queue(self, mock_stdout: Mock) -> None:
+ """Test the process queue on primer itself
+ - If you have non black conforming formatting in primer itself this can fail"""
loop = asyncio.get_event_loop()
config_path = Path(lib.__file__).parent / "primer.json"
with patch("black_primer.lib.git_checkout_or_rebase", return_false):
with TemporaryDirectory() as td:
return_val = loop.run_until_complete(
- lib.process_queue(str(config_path), td, 2)
+ lib.process_queue(
+ str(config_path), Path(td), 2, ["django", "pyramid"]
+ )
)
self.assertEqual(0, return_val)
+ @event_loop()
+ def test_load_projects_queue(self) -> None:
+ """Test the process queue on primer itself
+ - If you have non black conforming formatting in primer itself this can fail"""
+ loop = asyncio.get_event_loop()
+ config_path = Path(lib.__file__).parent / "primer.json"
+
+ config, projects_queue = loop.run_until_complete(
+ lib.load_projects_queue(config_path, ["django", "pyramid"])
+ )
+ projects = collect(projects_queue)
+ self.assertEqual(projects, ["django", "pyramid"])
+
class PrimerCLITests(unittest.TestCase):
@event_loop()
"workdir": str(work_dir),
"workers": 69,
"no_diff": False,
+ "projects": "",
}
with patch("black_primer.cli.lib.process_queue", return_zero):
- return_val = loop.run_until_complete(cli.async_main(**args))
+ return_val = loop.run_until_complete(cli.async_main(**args)) # type: ignore
self.assertEqual(0, return_val)
def test_handle_debug(self) -> None:
self.assertEqual(result.exit_code, 0)
+def test_projects(caplog: LogCaptureFixture) -> None:
+ with event_loop():
+ runner = CliRunner()
+ result = runner.invoke(cli.main, ["--projects=STDIN,asdf"])
+ assert result.exit_code == 0
+ assert "1 / 1 succeeded" in result.output
+ assert "Projects not found: {'asdf'}" in caplog.text
+
+ caplog.clear()
+
+ with event_loop():
+ runner = CliRunner()
+ result = runner.invoke(cli.main, ["--projects=fdsa,STDIN"])
+ assert result.exit_code == 0
+ assert "1 / 1 succeeded" in result.output
+ assert "Projects not found: {'fdsa'}" in caplog.text
+
+
if __name__ == "__main__":
unittest.main()