]>
git.madduck.net Git - etc/vim.git/blobdiff - src/black/concurrency.py
madduck's git repository
Every one of the projects in this repository is available at the canonical
URL git://git.madduck.net/madduck/pub/<projectpath> — see
each project's metadata for the exact URL.
All patches and comments are welcome. Please squash your changes to logical
commits before using git-format-patch and git-send-email to
patches@ git. madduck. net .
If you'd read over the Git project's submission guidelines and adhered to them,
I'd be especially grateful.
SSH access, as well as push access can be individually
arranged .
If you use my repositories frequently, consider adding the following
snippet to ~/.gitconfig and using the third clone URL listed for each
project:
[url "git://git.madduck.net/madduck/"]
insteadOf = madduck:
import asyncio
import logging
import asyncio
import logging
import signal
import sys
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
import signal
import sys
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
from mypy_extensions import mypyc_attr
from mypy_extensions import mypyc_attr
-from black import DEFAULT_WORKERS, WriteBack, format_file_in_place
+from black import WriteBack, format_file_in_place
from black.cache import Cache, filter_cached, read_cache, write_cache
from black.mode import Mode
from black.output import err
from black.cache import Cache, filter_cached, read_cache, write_cache
from black.mode import Mode
from black.output import err
for task in to_cancel:
task.cancel()
for task in to_cancel:
task.cancel()
- if sys.version_info >= (3, 7):
- loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
- else:
- loop.run_until_complete(
- asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
- )
+ loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
finally:
# `concurrent.futures.Future` objects cannot be cancelled once they
# are already running. There might be some when the `shutdown()` happened.
finally:
# `concurrent.futures.Future` objects cannot be cancelled once they
# are already running. There might be some when the `shutdown()` happened.
maybe_install_uvloop()
executor: Executor
maybe_install_uvloop()
executor: Executor
- worker_count = workers if workers is not None else DEFAULT_WORKERS
+ if workers is None:
+ workers = os.cpu_count() or 1
if sys.platform == "win32":
# Work around https://bugs.python.org/issue26903
if sys.platform == "win32":
# Work around https://bugs.python.org/issue26903
- assert worker_count is not None
- worker_count = min(worker_count, 60)
+ workers = min(workers, 60)
- executor = ProcessPoolExecutor(max_workers=worker_count )
+ executor = ProcessPoolExecutor(max_workers=workers )
except (ImportError, NotImplementedError, OSError):
# we arrive here if the underlying system does not support multi-processing
# like in AWS Lambda or Termux, in which case we gracefully fallback to
except (ImportError, NotImplementedError, OSError):
# we arrive here if the underlying system does not support multi-processing
# like in AWS Lambda or Termux, in which case we gracefully fallback to
sources_to_cache.append(src)
report.done(src, changed)
if cancelled:
sources_to_cache.append(src)
report.done(src, changed)
if cancelled:
- if sys.version_info >= (3, 7):
- await asyncio.gather(*cancelled, return_exceptions=True)
- else:
- await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
+ await asyncio.gather(*cancelled, return_exceptions=True)
if sources_to_cache:
write_cache(cache, sources_to_cache, mode)
if sources_to_cache:
write_cache(cache, sources_to_cache, mode)