import asyncio
import logging
+import os
import signal
import sys
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
from mypy_extensions import mypyc_attr
-from black import DEFAULT_WORKERS, WriteBack, format_file_in_place
+from black import WriteBack, format_file_in_place
from black.cache import Cache, filter_cached, read_cache, write_cache
from black.mode import Mode
from black.output import err
for task in to_cancel:
task.cancel()
- if sys.version_info >= (3, 7):
- loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
- else:
- loop.run_until_complete(
- asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
- )
+ loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
finally:
# `concurrent.futures.Future` objects cannot be cancelled once they
# are already running. There might be some when the `shutdown()` happened.
maybe_install_uvloop()
executor: Executor
- worker_count = workers if workers is not None else DEFAULT_WORKERS
+ if workers is None:
+ workers = os.cpu_count() or 1
if sys.platform == "win32":
# Work around https://bugs.python.org/issue26903
- assert worker_count is not None
- worker_count = min(worker_count, 60)
+ workers = min(workers, 60)
try:
- executor = ProcessPoolExecutor(max_workers=worker_count)
+ executor = ProcessPoolExecutor(max_workers=workers)
except (ImportError, NotImplementedError, OSError):
# we arrive here if the underlying system does not support multi-processing
# like in AWS Lambda or Termux, in which case we gracefully fallback to
sources_to_cache.append(src)
report.done(src, changed)
if cancelled:
- if sys.version_info >= (3, 7):
- await asyncio.gather(*cancelled, return_exceptions=True)
- else:
- await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
+ await asyncio.gather(*cancelled, return_exceptions=True)
if sources_to_cache:
write_cache(cache, sources_to_cache, mode)