]> git.madduck.net Git - etc/vim.git/blobdiff - tests/test_primer.py

madduck's git repository

Every one of the projects in this repository is available at the canonical URL git://git.madduck.net/madduck/pub/<projectpath> — see each project's metadata for the exact URL.

All patches and comments are welcome. Please squash your changes to logical commits before using git-format-patch and git-send-email to patches@git.madduck.net. If you'd read over the Git project's submission guidelines and adhered to them, I'd be especially grateful.

SSH access, as well as push access can be individually arranged.

If you use my repositories frequently, consider adding the following snippet to ~/.gitconfig and using the third clone URL listed for each project:

[url "git://git.madduck.net/madduck/"]
  insteadOf = madduck:

Elaborate on Python support policy (#2819)
[etc/vim.git] / tests / test_primer.py
index 8bfecd61a5723fc72a9d06f3e0f0d1ef12c5f757..0a9d2aec495042229c7e93ea011d361ba7f48302 100644 (file)
@@ -9,9 +9,10 @@ from io import StringIO
 from os import getpid
 from pathlib import Path
 from platform import system
 from os import getpid
 from pathlib import Path
 from platform import system
+from pytest import LogCaptureFixture
 from subprocess import CalledProcessError
 from tempfile import TemporaryDirectory, gettempdir
 from subprocess import CalledProcessError
 from tempfile import TemporaryDirectory, gettempdir
-from typing import Any, Callable, Generator, Iterator, Tuple
+from typing import Any, Callable, Iterator, List, Tuple, TypeVar
 from unittest.mock import Mock, patch
 
 from click.testing import CliRunner
 from unittest.mock import Mock, patch
 
 from click.testing import CliRunner
@@ -20,6 +21,14 @@ from black_primer import cli, lib
 
 
 EXPECTED_ANALYSIS_OUTPUT = """\
 
 
 EXPECTED_ANALYSIS_OUTPUT = """\
+
+Failed projects:
+
+## black:
+ - Returned 69
+ - stdout:
+Black didn't work
+
 -- primer results 📊 --
 
 68 / 69 succeeded (98.55%) ✅
 -- primer results 📊 --
 
 68 / 69 succeeded (98.55%) ✅
@@ -28,12 +37,7 @@ EXPECTED_ANALYSIS_OUTPUT = """\
  - 0 projects skipped due to Python version
  - 0 skipped due to long checkout
 
  - 0 projects skipped due to Python version
  - 0 skipped due to long checkout
 
-Failed projects:
-
-## black:
- - Returned 69
- - stdout:
-Black didn't work
+Failed projects: black
 
 """
 FAKE_PROJECT_CONFIG = {
 
 """
 FAKE_PROJECT_CONFIG = {
@@ -44,7 +48,9 @@ FAKE_PROJECT_CONFIG = {
 
 
 @contextmanager
 
 
 @contextmanager
-def capture_stdout(command: Callable, *args: Any, **kwargs: Any) -> Generator:
+def capture_stdout(
+    command: Callable[..., Any], *args: Any, **kwargs: Any
+) -> Iterator[str]:
     old_stdout, sys.stdout = sys.stdout, StringIO()
     try:
         command(*args, **kwargs)
     old_stdout, sys.stdout = sys.stdout, StringIO()
     try:
         command(*args, **kwargs)
@@ -87,6 +93,24 @@ async def return_zero(*args: Any, **kwargs: Any) -> int:
     return 0
 
 
     return 0
 
 
+if sys.version_info >= (3, 9):
+    T = TypeVar("T")
+    Q = asyncio.Queue[T]
+else:
+    T = Any
+    Q = asyncio.Queue
+
+
+def collect(queue: Q) -> List[T]:
+    ret = []
+    while True:
+        try:
+            item = queue.get_nowait()
+            ret.append(item)
+        except asyncio.QueueEmpty:
+            return ret
+
+
 class PrimerLibTests(unittest.TestCase):
     def test_analyze_results(self) -> None:
         fake_results = lib.Results(
 class PrimerLibTests(unittest.TestCase):
     def test_analyze_results(self) -> None:
         fake_results = lib.Results(
@@ -106,13 +130,16 @@ class PrimerLibTests(unittest.TestCase):
     def test_black_run(self) -> None:
         """Pretend to run Black to ensure we cater for all scenarios"""
         loop = asyncio.get_event_loop()
     def test_black_run(self) -> None:
         """Pretend to run Black to ensure we cater for all scenarios"""
         loop = asyncio.get_event_loop()
+        project_name = "unittest"
         repo_path = Path(gettempdir())
         project_config = deepcopy(FAKE_PROJECT_CONFIG)
         results = lib.Results({"failed": 0, "success": 0}, {})
 
         # Test a successful Black run
         with patch("black_primer.lib._gen_check_output", return_subproccess_output):
         repo_path = Path(gettempdir())
         project_config = deepcopy(FAKE_PROJECT_CONFIG)
         results = lib.Results({"failed": 0, "success": 0}, {})
 
         # Test a successful Black run
         with patch("black_primer.lib._gen_check_output", return_subproccess_output):
-            loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+            loop.run_until_complete(
+                lib.black_run(project_name, repo_path, project_config, results)
+            )
         self.assertEqual(1, results.stats["success"])
         self.assertFalse(results.failed_projects)
 
         self.assertEqual(1, results.stats["success"])
         self.assertFalse(results.failed_projects)
 
@@ -120,7 +147,9 @@ class PrimerLibTests(unittest.TestCase):
         project_config["expect_formatting_changes"] = True
         results = lib.Results({"failed": 0, "success": 0}, {})
         with patch("black_primer.lib._gen_check_output", return_subproccess_output):
         project_config["expect_formatting_changes"] = True
         results = lib.Results({"failed": 0, "success": 0}, {})
         with patch("black_primer.lib._gen_check_output", return_subproccess_output):
-            loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+            loop.run_until_complete(
+                lib.black_run(project_name, repo_path, project_config, results)
+            )
         self.assertEqual(1, results.stats["failed"])
         self.assertTrue(results.failed_projects)
 
         self.assertEqual(1, results.stats["failed"])
         self.assertTrue(results.failed_projects)
 
@@ -128,22 +157,31 @@ class PrimerLibTests(unittest.TestCase):
         project_config["expect_formatting_changes"] = False
         results = lib.Results({"failed": 0, "success": 0}, {})
         with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1):
         project_config["expect_formatting_changes"] = False
         results = lib.Results({"failed": 0, "success": 0}, {})
         with patch("black_primer.lib._gen_check_output", raise_subprocess_error_1):
-            loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+            loop.run_until_complete(
+                lib.black_run(project_name, repo_path, project_config, results)
+            )
         self.assertEqual(1, results.stats["failed"])
         self.assertTrue(results.failed_projects)
 
         # Test a formatting error based on returning 123
         with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123):
         self.assertEqual(1, results.stats["failed"])
         self.assertTrue(results.failed_projects)
 
         # Test a formatting error based on returning 123
         with patch("black_primer.lib._gen_check_output", raise_subprocess_error_123):
-            loop.run_until_complete(lib.black_run(repo_path, project_config, results))
+            loop.run_until_complete(
+                lib.black_run(project_name, repo_path, project_config, results)
+            )
         self.assertEqual(2, results.stats["failed"])
 
         self.assertEqual(2, results.stats["failed"])
 
+    def test_flatten_cli_args(self) -> None:
+        fake_long_args = ["--arg", ["really/", "|long", "|regex", "|splitup"], "--done"]
+        expected = ["--arg", "really/|long|regex|splitup", "--done"]
+        self.assertEqual(expected, lib._flatten_cli_args(fake_long_args))
+
     @event_loop()
     def test_gen_check_output(self) -> None:
         loop = asyncio.get_event_loop()
         stdout, stderr = loop.run_until_complete(
             lib._gen_check_output([lib.BLACK_BINARY, "--help"])
         )
     @event_loop()
     def test_gen_check_output(self) -> None:
         loop = asyncio.get_event_loop()
         stdout, stderr = loop.run_until_complete(
             lib._gen_check_output([lib.BLACK_BINARY, "--help"])
         )
-        self.assertTrue("The uncompromising code formatter" in stdout.decode("utf8"))
+        self.assertIn("The uncompromising code formatter", stdout.decode("utf8"))
         self.assertEqual(None, stderr)
 
         # TODO: Add a test to see failure works on Windows
         self.assertEqual(None, stderr)
 
         # TODO: Add a test to see failure works on Windows
@@ -175,15 +213,32 @@ class PrimerLibTests(unittest.TestCase):
     @patch("sys.stdout", new_callable=StringIO)
     @event_loop()
     def test_process_queue(self, mock_stdout: Mock) -> None:
     @patch("sys.stdout", new_callable=StringIO)
     @event_loop()
     def test_process_queue(self, mock_stdout: Mock) -> None:
+        """Test the process queue on primer itself
+        - If you have non black conforming formatting in primer itself this can fail"""
         loop = asyncio.get_event_loop()
         config_path = Path(lib.__file__).parent / "primer.json"
         with patch("black_primer.lib.git_checkout_or_rebase", return_false):
             with TemporaryDirectory() as td:
                 return_val = loop.run_until_complete(
         loop = asyncio.get_event_loop()
         config_path = Path(lib.__file__).parent / "primer.json"
         with patch("black_primer.lib.git_checkout_or_rebase", return_false):
             with TemporaryDirectory() as td:
                 return_val = loop.run_until_complete(
-                    lib.process_queue(str(config_path), td, 2)
+                    lib.process_queue(
+                        str(config_path), Path(td), 2, ["django", "pyramid"]
+                    )
                 )
                 self.assertEqual(0, return_val)
 
                 )
                 self.assertEqual(0, return_val)
 
+    @event_loop()
+    def test_load_projects_queue(self) -> None:
+        """Test the process queue on primer itself
+        - If you have non black conforming formatting in primer itself this can fail"""
+        loop = asyncio.get_event_loop()
+        config_path = Path(lib.__file__).parent / "primer.json"
+
+        config, projects_queue = loop.run_until_complete(
+            lib.load_projects_queue(config_path, ["django", "pyramid"])
+        )
+        projects = collect(projects_queue)
+        self.assertEqual(projects, ["django", "pyramid"])
+
 
 class PrimerCLITests(unittest.TestCase):
     @event_loop()
 
 class PrimerCLITests(unittest.TestCase):
     @event_loop()
@@ -199,9 +254,10 @@ class PrimerCLITests(unittest.TestCase):
             "workdir": str(work_dir),
             "workers": 69,
             "no_diff": False,
             "workdir": str(work_dir),
             "workers": 69,
             "no_diff": False,
+            "projects": "",
         }
         with patch("black_primer.cli.lib.process_queue", return_zero):
         }
         with patch("black_primer.cli.lib.process_queue", return_zero):
-            return_val = loop.run_until_complete(cli.async_main(**args))
+            return_val = loop.run_until_complete(cli.async_main(**args))  # type: ignore
             self.assertEqual(0, return_val)
 
     def test_handle_debug(self) -> None:
             self.assertEqual(0, return_val)
 
     def test_handle_debug(self) -> None:
@@ -213,5 +269,23 @@ class PrimerCLITests(unittest.TestCase):
         self.assertEqual(result.exit_code, 0)
 
 
         self.assertEqual(result.exit_code, 0)
 
 
+def test_projects(caplog: LogCaptureFixture) -> None:
+    with event_loop():
+        runner = CliRunner()
+        result = runner.invoke(cli.main, ["--projects=STDIN,asdf"])
+        assert result.exit_code == 0
+        assert "1 / 1 succeeded" in result.output
+        assert "Projects not found: {'asdf'}" in caplog.text
+
+    caplog.clear()
+
+    with event_loop():
+        runner = CliRunner()
+        result = runner.invoke(cli.main, ["--projects=fdsa,STDIN"])
+        assert result.exit_code == 0
+        assert "1 / 1 succeeded" in result.output
+        assert "Projects not found: {'fdsa'}" in caplog.text
+
+
 if __name__ == "__main__":
     unittest.main()
 if __name__ == "__main__":
     unittest.main()