diff --git a/CHANGELOG.md b/CHANGELOG.md index aba3b3551..c9c068667 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## Unreleased +- Using pytest-xdist for faster local tests + [PR #440](https://github.com/aai-institute/pyDVL/pull/440) - Added `AntitheticPermutationSampler` [PR #439](https://github.com/aai-institute/pyDVL/pull/439) - Implementation of Data-OOB by @BastienZim diff --git a/requirements-dev.txt b/requirements-dev.txt index deeabebee..6bf514bb2 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -19,5 +19,6 @@ pytest-docker==2.0.0 pytest-mock pytest-timeout pytest-lazy-fixture +pytest-xdist>=3.3.1 wheel twine==4.0.2 diff --git a/tests/conftest.py b/tests/conftest.py index c99a38d3f..19ad9bf7a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -196,7 +196,6 @@ def seed_numpy(seed=42): np.random.seed(seed) -@pytest.fixture(scope="session") def num_workers(): # Run with 2 CPUs inside GitHub actions if os.getenv("CI"): @@ -205,9 +204,22 @@ def num_workers(): return max(1, min(available_cpus() - 1, 4)) -@pytest.fixture -def n_jobs(num_workers): - return num_workers +@pytest.fixture(scope="session") +def n_jobs(): + return num_workers() + + +def pytest_xdist_auto_num_workers(config) -> Optional[int]: + """Return the number of workers to use for pytest-xdist. + + This is used by pytest-xdist to automatically determine the number of + workers to use. We want to use all available CPUs, but leave one CPU for + the main process. + """ + + if config.option.numprocesses == "auto": + return max(1, (available_cpus() - 1) // num_workers()) + return None ################################################################################ diff --git a/tests/utils/conftest.py b/tests/utils/conftest.py index 19c8a0798..276972c73 100644 --- a/tests/utils/conftest.py +++ b/tests/utils/conftest.py @@ -2,17 +2,19 @@ from pydvl.parallel.config import ParallelConfig +from ..conftest import num_workers + @pytest.fixture(scope="module", params=["joblib", "ray-local", "ray-external"]) -def parallel_config(request, num_workers): +def parallel_config(request): if request.param == "joblib": - yield ParallelConfig(backend="joblib", n_cpus_local=num_workers) + yield ParallelConfig(backend="joblib", n_cpus_local=num_workers()) elif request.param == "ray-local": try: import ray except ImportError: pytest.skip("Ray not installed.") - yield ParallelConfig(backend="ray", n_cpus_local=num_workers) + yield ParallelConfig(backend="ray", n_cpus_local=num_workers()) ray.shutdown() elif request.param == "ray-external": try: @@ -22,10 +24,7 @@ def parallel_config(request, num_workers): pytest.skip("Ray not installed.") # Starts a head-node for the cluster. cluster = Cluster( - initialize_head=True, - head_node_args={ - "num_cpus": num_workers, - }, + initialize_head=True, head_node_args={"num_cpus": num_workers()} ) yield ParallelConfig(backend="ray", address=cluster.address) ray.shutdown() diff --git a/tests/utils/test_parallel.py b/tests/utils/test_parallel.py index 4677de4ee..bca0c56af 100644 --- a/tests/utils/test_parallel.py +++ b/tests/utils/test_parallel.py @@ -12,15 +12,17 @@ from pydvl.parallel.futures import init_executor from pydvl.utils.types import Seed +from ..conftest import num_workers -def test_effective_n_jobs(parallel_config, num_workers): + +def test_effective_n_jobs(parallel_config): parallel_backend = init_parallel_backend(parallel_config) assert parallel_backend.effective_n_jobs(1) == 1 - assert parallel_backend.effective_n_jobs(4) == min(4, num_workers) + assert parallel_backend.effective_n_jobs(4) == min(4, num_workers()) if parallel_config.address is None: - assert parallel_backend.effective_n_jobs(-1) == num_workers + assert parallel_backend.effective_n_jobs(-1) == num_workers() else: - assert parallel_backend.effective_n_jobs(-1) == num_workers + assert parallel_backend.effective_n_jobs(-1) == num_workers() for n_jobs in [-1, 1, 2]: assert parallel_backend.effective_n_jobs(n_jobs) == effective_n_jobs( @@ -166,7 +168,7 @@ def test_map_reduce_seeding(parallel_config, seed_1, seed_2, op): assert op(result_1, result_2) -def test_wrap_function(parallel_config, num_workers): +def test_wrap_function(parallel_config): if parallel_config.backend != "ray": pytest.skip("Only makes sense for ray") @@ -188,8 +190,8 @@ def get_pid(): return os.getpid() wrapped_func = parallel_backend.wrap(get_pid, num_cpus=1) - pids = parallel_backend.get([wrapped_func() for _ in range(num_workers)]) - assert len(set(pids)) == num_workers + pids = parallel_backend.get([wrapped_func() for _ in range(num_workers())]) + assert len(set(pids)) == num_workers() def test_futures_executor_submit(parallel_config): @@ -205,7 +207,7 @@ def test_futures_executor_map(parallel_config): assert results == [1, 2, 3] -def test_futures_executor_map_with_max_workers(parallel_config, num_workers): +def test_futures_executor_map_with_max_workers(parallel_config): if parallel_config.backend != "ray": pytest.skip("Currently this test only works with Ray") @@ -215,12 +217,12 @@ def func(_): start_time = time.monotonic() with init_executor(config=parallel_config) as executor: - assert executor._max_workers == num_workers + assert executor._max_workers == num_workers() list(executor.map(func, range(3))) end_time = time.monotonic() total_time = end_time - start_time - # We expect the time difference to be > 3 / num_workers, but has to be at least 1 - assert total_time > max(1.0, 3 / num_workers) + # We expect the time difference to be > 3 / num_workers(), but has to be at least 1 + assert total_time > max(1.0, 3 / num_workers()) def test_future_cancellation(parallel_config): diff --git a/tests/value/conftest.py b/tests/value/conftest.py index 38cddfdf8..75fedb38d 100644 --- a/tests/value/conftest.py +++ b/tests/value/conftest.py @@ -10,6 +10,7 @@ from pydvl.utils.status import Status from pydvl.value import ValuationResult +from ..conftest import num_workers from . import polynomial @@ -122,5 +123,5 @@ def linear_shapley(linear_dataset, scorer, n_jobs): @pytest.fixture(scope="module") -def parallel_config(num_workers): - yield ParallelConfig(backend="joblib", n_cpus_local=num_workers, wait_timeout=0.1) +def parallel_config(): + yield ParallelConfig(backend="joblib", n_cpus_local=num_workers(), wait_timeout=0.1) diff --git a/tox.ini b/tox.ini index b01f1dfb5..6b6e14c75 100644 --- a/tox.ini +++ b/tox.ini @@ -12,12 +12,12 @@ setenv = [testenv:base] description = Tests base modules commands = - pytest --cov "{envsitepackagesdir}/pydvl" -m "not torch" {posargs} + pytest -n auto --cov "{envsitepackagesdir}/pydvl" -m "not torch" {posargs} [testenv:torch] description = Tests modules that rely on pytorch commands = - pytest --cov "{envsitepackagesdir}/pydvl" -m torch {posargs} + pytest -n auto --cov "{envsitepackagesdir}/pydvl" -m torch {posargs} extras = influence @@ -26,7 +26,7 @@ description = Tests notebooks setenv = PYTHONPATH={toxinidir}/notebooks commands = - pytest notebooks/ --cov "{envsitepackagesdir}/pydvl" + pytest -n auto notebooks/ --cov "{envsitepackagesdir}/pydvl" {posargs} deps = {[testenv]deps} jupyter==1.0.0