diff --git a/.github/workflows/push_flow.yml b/.github/workflows/push_flow.yml index b2de5504..01d7f089 100644 --- a/.github/workflows/push_flow.yml +++ b/.github/workflows/push_flow.yml @@ -62,6 +62,7 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt python setup.py develop + pip install -r tests/requirements.txt - name: Test with pytest run: | pytest --cov @@ -96,7 +97,7 @@ jobs: - uses: actions/setup-python@v3 - name: Install CLI run: | - pip install -r requirements.txt + pip install -r requirements.txt -r tests/requirements.txt pip install codecov-cli - name: Label Analysis run: | diff --git a/codecov_cli/runners/python_standard_runner.py b/codecov_cli/runners/python_standard_runner.py index 3678fbae..af8aa76b 100644 --- a/codecov_cli/runners/python_standard_runner.py +++ b/codecov_cli/runners/python_standard_runner.py @@ -1,17 +1,13 @@ import logging import random import subprocess -from contextlib import redirect_stdout -from io import StringIO, TextIOWrapper -from multiprocessing import Process, Queue, get_context -from os import getcwd +from multiprocessing import Process, Queue from queue import Empty from subprocess import CalledProcessError from sys import path, stdout from typing import List, Optional import click -import pytest from codecov_cli.runners.types import ( LabelAnalysisRequestResult, @@ -43,58 +39,6 @@ def coverage_root(self) -> str: """ return self.get("coverage_root", "./") - @property - def strict_mode(self) -> bool: - """ - Run pytest from within Python instead of using subprocess.run - This is potentailly safer than using subprocess.run because it guarantees better that - the program running is indeed pytest. - But it might not work everytime due to import issues related to Python caching modules. - """ - return self.get("strict_mode", False) - - -def _include_curr_dir(method): - """ - Account for the difference 'pytest' vs 'python -m pytest' - https://docs.pytest.org/en/7.1.x/how-to/usage.html#calling-pytest-through-python-m-pytest - Used only in strict_mode - """ - - def call_method(self, *args, **kwargs): - curr_dir = getcwd() - path.append(curr_dir) - - result = method(self, *args, **kwargs) - - path.remove(curr_dir) - return result - - return call_method - - -def _execute_pytest_subprocess( - pytest_args: List[str], - queue: Queue, - parent_stdout: TextIOWrapper, - capture_output: bool = True, -): - """Runs pytest from python in a subprocess. - This is because we call it twice in the label-analysis process, - so we might have import errors if calling it directly. - Check the warning: https://docs.pytest.org/en/7.1.x/how-to/usage.html#calling-pytest-from-python-code - - Returns the output value and pytest exit code via queue - """ - subproces_stdout = parent_stdout - if capture_output: - subproces_stdout = StringIO() - with redirect_stdout(subproces_stdout): - result = pytest.main(pytest_args) - if capture_output: - queue.put({"output": subproces_stdout.getvalue()}) - queue.put({"result": result}) - class PythonStandardRunner(LabelAnalysisRunnerInterface): @@ -125,35 +69,6 @@ def _wait_pytest(self, pytest_process: Process, queue: Queue): pytest_process.join() return result, output - @_include_curr_dir - def _execute_pytest_strict( - self, pytest_args: List[str], capture_output: bool = True - ) -> str: - """Handles calling pytest from Python in a subprocess. - Raises Exception if pytest fails - Returns the complete pytest output - """ - ctx = get_context(method="fork") - queue = ctx.Queue(2) - p = ctx.Process( - target=_execute_pytest_subprocess, - args=[pytest_args, queue, stdout, capture_output], - ) - result, output = self._wait_pytest(p, queue) - - if p.exitcode != 0 or (result != pytest.ExitCode.OK and result != 0): - message = f"Pytest exited with non-zero code {result}." - message += "\nThis is likely not a problem with label-analysis. Check pytest's output and options." - if capture_output: - # If pytest failed but we captured its output the user won't know what's wrong - # So we need to include that in the error message - message += "\nPYTEST OUTPUT:" - message += "\n" + output - else: - message += "\n(you can check pytest options on the logs before the test session start)" - raise click.ClickException(message) - return output - def parse_captured_output_error(self, exp: CalledProcessError) -> str: result = "" for out_stream in [exp.stdout, exp.stderr]: @@ -202,10 +117,7 @@ def collect_tests(self): ), ) - if self.params.strict_mode: - output = self._execute_pytest_strict(options_to_use) - else: - output = self._execute_pytest(options_to_use) + output = self._execute_pytest(options_to_use) lines = output.split(sep="\n") test_names = list(line for line in lines if ("::" in line and "test" in line)) return test_names @@ -254,10 +166,7 @@ def process_labelanalysis_result(self, result: LabelAnalysisRequestResult): "List of tests executed", extra=dict(extra_log_attributes=dict(executed_tests=tests_to_run)), ) - if self.params.strict_mode: - output = self._execute_pytest_strict(command_array, capture_output=False) - else: - output = self._execute_pytest(command_array, capture_output=False) + output = self._execute_pytest(command_array, capture_output=False) logger.info(f"Finished running {len(tests_to_run)} tests successfully") logger.info(f" pytest options: \"{' '.join(default_options)}\"") logger.debug(output) diff --git a/requirements.txt b/requirements.txt index 4e97e220..20b98444 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,23 +2,19 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --output-file=requirements.txt requirements.in setup.py +# pip-compile setup.py # anyio==4.0.0 # via httpcore -attrs==21.4.0 - # via pytest certifi==2023.7.22 # via # httpcore # httpx # requests -charset-normalizer==3.2.0 +charset-normalizer==3.3.0 # via requests click==8.1.7 # via codecov-cli (setup.py) -coverage[toml]==7.3.1 - # via pytest-cov exceptiongroup==1.1.3 # via anyio h11==0.14.0 @@ -34,28 +30,6 @@ idna==3.4 # rfc3986 ijson==3.2.3 # via codecov-cli (setup.py) -iniconfig==1.1.1 - # via pytest -packaging==21.3 - # via pytest -pluggy==1.0.0 - # via pytest -py==1.11.0 - # via pytest -pyparsing==3.0.9 - # via packaging -pytest==7.1.2 - # via - # codecov-cli (setup.py) - # pytest-asyncio - # pytest-cov - # pytest-mock -pytest-asyncio==0.21.1 - # via -r requirements.in -pytest-cov==4.1.0 - # via codecov-cli (setup.py) -pytest-mock==3.11.1 - # via -r requirements.in pyyaml==6.0.1 # via codecov-cli (setup.py) requests==2.31.0 @@ -71,13 +45,9 @@ sniffio==1.3.0 # anyio # httpcore # httpx -tomli==2.0.1 - # via - # coverage - # pytest tree-sitter==0.20.2 # via codecov-cli (setup.py) -urllib3==2.0.4 +urllib3==2.0.6 # via # requests # responses diff --git a/setup.py b/setup.py index e66e5d7b..1eb2836b 100644 --- a/setup.py +++ b/setup.py @@ -24,8 +24,6 @@ "click==8.*", "httpx==0.23.*", "ijson==3.*", - "pytest==7.*", - "pytest-cov>=3", "pyyaml==6.*", "responses==0.21.*", "smart-open==6.*", diff --git a/requirements.in b/tests/requirements.in similarity index 60% rename from requirements.in rename to tests/requirements.in index 3c764f05..57ce4337 100644 --- a/requirements.in +++ b/tests/requirements.in @@ -1,2 +1,4 @@ +pytest +pytest-cov pytest-mock pytest-asyncio diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 00000000..a0dbd1ed --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,30 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile tests/requirements.in +# +coverage[toml]==7.3.1 + # via pytest-cov +exceptiongroup==1.1.3 + # via pytest +iniconfig==2.0.0 + # via pytest +packaging==23.2 + # via pytest +pluggy==1.3.0 + # via pytest +pytest==7.4.2 + # via + # -r tests/requirements.in + # pytest-asyncio + # pytest-cov + # pytest-mock +pytest-asyncio==0.21.1 + # via -r tests/requirements.in +pytest-cov==4.1.0 + # via -r tests/requirements.in +pytest-mock==3.11.1 + # via -r tests/requirements.in +tomli==2.0.1 + # via pytest diff --git a/tests/runners/test_python_standard_runner.py b/tests/runners/test_python_standard_runner.py index 1724287d..75235009 100644 --- a/tests/runners/test_python_standard_runner.py +++ b/tests/runners/test_python_standard_runner.py @@ -5,51 +5,17 @@ import pytest from pytest import ExitCode -from codecov_cli.runners.python_standard_runner import ( - PythonStandardRunner, - _execute_pytest_subprocess, -) +from codecov_cli.runners.python_standard_runner import PythonStandardRunner from codecov_cli.runners.python_standard_runner import logger as runner_logger from codecov_cli.runners.python_standard_runner import stdout as pyrunner_stdout from codecov_cli.runners.types import LabelAnalysisRequestResult -@patch("codecov_cli.runners.python_standard_runner.pytest") -def test_execute_pytest_subprocess(mock_pytest, mocker): - def side_effect(*args, **kwargs): - print("Pytest output") - return ExitCode.OK - - mock_pytest.main.side_effect = side_effect - mock_queue = MagicMock() - _execute_pytest_subprocess(["pytest", "args"], mock_queue, MagicMock()) - mock_pytest.main.assert_called_with(["pytest", "args"]) - assert mock_queue.put.call_count == 2 - mock_queue.put.assert_has_calls( - [call({"output": "Pytest output\n"}), call({"result": ExitCode.OK})] - ) - - -@patch("codecov_cli.runners.python_standard_runner.pytest") -def test_execute_pytest_subprocess_no_capture_stdout(mock_pytest, mocker): - def side_effect(*args, **kwargs): - print("Pytest output") - return ExitCode.OK - - mock_pytest.main.side_effect = side_effect - mock_queue = MagicMock() - _execute_pytest_subprocess(["pytest", "args"], mock_queue, MagicMock(), False) - mock_pytest.main.assert_called_with(["pytest", "args"]) - assert mock_queue.put.call_count == 1 - mock_queue.put.assert_has_calls([call({"result": ExitCode.OK})]) - - class TestPythonStandardRunner(object): runner = PythonStandardRunner() def test_init_with_params(self): assert self.runner.params.collect_tests_options == [] - assert self.runner.params.strict_mode == False assert self.runner.params.coverage_root == "./" config_params = dict( @@ -73,102 +39,6 @@ def test_execute_pytest(self, mock_subprocess): ) assert result == output - @patch( - "codecov_cli.runners.python_standard_runner.getcwd", - return_value="current directory", - ) - @patch("codecov_cli.runners.python_standard_runner.path") - @patch("codecov_cli.runners.python_standard_runner.get_context") - def test_execute_pytest_strict_mode( - self, mock_get_context, mock_sys_path, mock_getcwd - ): - output = "Output in stdout" - mock_queue = MagicMock() - mock_queue.get.side_effect = [{"output": output}, {"result": ExitCode.OK}] - mock_process = MagicMock() - mock_process.exitcode = 0 - mock_get_context.return_value.Queue.return_value = mock_queue - mock_get_context.return_value.Process.return_value = mock_process - - result = self.runner._execute_pytest_strict(["--option", "--ignore=batata"]) - mock_get_context.return_value.Queue.assert_called_with(2) - mock_get_context.return_value.Process.assert_called_with( - target=_execute_pytest_subprocess, - args=[["--option", "--ignore=batata"], mock_queue, pyrunner_stdout, True], - ) - mock_sys_path.append.assert_called_with("current directory") - mock_sys_path.remove.assert_called_with("current directory") - assert mock_queue.get.call_count == 2 - assert result == output - - @patch( - "codecov_cli.runners.python_standard_runner.getcwd", - return_value="current directory", - ) - @patch("codecov_cli.runners.python_standard_runner.path") - @patch("codecov_cli.runners.python_standard_runner.get_context") - def test_execute_pytest_fail_strict_mode_collection( - self, mock_get_context, mock_sys_path, mock_getcwd - ): - output = "Output in stdout" - mock_queue = MagicMock() - mock_queue.get.side_effect = [ - {"output": output}, - {"result": ExitCode.INTERNAL_ERROR}, - ] - mock_process = MagicMock() - mock_process.exitcode = 0 - mock_get_context.return_value.Queue.return_value = mock_queue - mock_get_context.return_value.Process.return_value = mock_process - - with pytest.raises(Exception) as exp: - _ = self.runner._execute_pytest_strict(["--option", "--ignore=batata"]) - assert ( - str(exp.value) - == "Pytest exited with non-zero code 3.\nThis is likely not a problem with label-analysis. Check pytest's output and options.\nPYTEST OUTPUT:\nOutput in stdout" - ) - mock_get_context.return_value.Queue.assert_called_with(2) - mock_get_context.return_value.Process.assert_called_with( - target=_execute_pytest_subprocess, - args=[["--option", "--ignore=batata"], mock_queue, pyrunner_stdout, True], - ) - mock_sys_path.append.assert_called_with("current directory") - - @patch( - "codecov_cli.runners.python_standard_runner.getcwd", - return_value="current directory", - ) - @patch("codecov_cli.runners.python_standard_runner.path") - @patch("codecov_cli.runners.python_standard_runner.get_context") - def test_execute_pytest_fail_strict_mode_execution( - self, mock_get_context, mock_sys_path, mock_getcwd - ): - output = "Output in stdout" - mock_queue = MagicMock() - mock_queue.get.side_effect = [ - {"output": output}, - {"result": ExitCode.INTERNAL_ERROR}, - ] - mock_process = MagicMock() - mock_process.exitcode = 0 - mock_get_context.return_value.Queue.return_value = mock_queue - mock_get_context.return_value.Process.return_value = mock_process - - with pytest.raises(Exception) as exp: - _ = self.runner._execute_pytest_strict( - ["--option", "--ignore=batata"], capture_output=False - ) - assert ( - str(exp.value) - == "Pytest exited with non-zero code 3.\nThis is likely not a problem with label-analysis. Check pytest's output and options.\n(you can check pytest options on the logs before the test session start)" - ) - mock_get_context.return_value.Queue.assert_called_with(2) - mock_get_context.return_value.Process.assert_called_with( - target=_execute_pytest_subprocess, - args=[["--option", "--ignore=batata"], mock_queue, pyrunner_stdout, False], - ) - mock_sys_path.append.assert_called_with("current directory") - @patch("codecov_cli.runners.python_standard_runner.subprocess") def test_execute_pytest_fail_collection(self, mock_subprocess): def side_effect(command, *args, **kwargs): @@ -257,40 +127,9 @@ def test_collect_tests(self, mocker): "_execute_pytest", return_value="\n".join(collected_test_list), ) - mock_execute_strict = mocker.patch.object( - PythonStandardRunner, - "_execute_pytest_strict", - return_value="\n".join(collected_test_list), - ) collected_tests_from_runner = self.runner.collect_tests() mock_execute.assert_called_with(["-q", "--collect-only"]) - mock_execute_strict.assert_not_called() - assert collected_tests_from_runner == collected_test_list - - def test_collect_tests_strict(self, mocker): - collected_test_list = [ - "tests/services/upload/test_upload_service.py::test_do_upload_logic_happy_path_legacy_uploader" - "tests/services/upload/test_upload_service.py::test_do_upload_logic_happy_path" - "tests/services/upload/test_upload_service.py::test_do_upload_logic_dry_run" - "tests/services/upload/test_upload_service.py::test_do_upload_logic_verbose" - ] - mock_execute = mocker.patch.object( - PythonStandardRunner, - "_execute_pytest", - return_value="\n".join(collected_test_list), - ) - mock_execute_strict = mocker.patch.object( - PythonStandardRunner, - "_execute_pytest_strict", - return_value="\n".join(collected_test_list), - ) - - runner_config = {"strict_mode": True} - runner = PythonStandardRunner(runner_config) - collected_tests_from_runner = runner.collect_tests() - mock_execute_strict.assert_called_with(["-q", "--collect-only"]) - mock_execute.assert_not_called() assert collected_tests_from_runner == collected_test_list def test_collect_tests_with_options(self, mocker): @@ -367,38 +206,6 @@ def test_process_label_analysis_result_diff_coverage_root(self, mocker): "test_in_diff", ] - def test_process_label_analysis_result_strict(self, mocker): - label_analysis_result = { - "present_report_labels": ["test_present"], - "absent_labels": ["test_absent"], - "present_diff_labels": ["test_in_diff"], - "global_level_labels": ["test_global"], - } - mock_execute = mocker.patch.object(PythonStandardRunner, "_execute_pytest") - mock_execute_strict = mocker.patch.object( - PythonStandardRunner, "_execute_pytest_strict" - ) - - runner_config = {"strict_mode": True} - runner = PythonStandardRunner(runner_config) - runner.process_labelanalysis_result( - LabelAnalysisRequestResult(label_analysis_result) - ) - mock_execute.assert_not_called() - args, kwargs = mock_execute_strict.call_args - assert kwargs == {"capture_output": False} - assert isinstance(args[0], list) - actual_command = args[0] - assert actual_command[:2] == [ - "--cov=./", - "--cov-context=test", - ] - assert sorted(actual_command[2:]) == [ - "test_absent", - "test_global", - "test_in_diff", - ] - def test_process_label_analysis_result_with_options(self, mocker): label_analysis_result = { "present_report_labels": ["test_present"], @@ -408,9 +215,6 @@ def test_process_label_analysis_result_with_options(self, mocker): } mock_execute = mocker.patch.object(PythonStandardRunner, "_execute_pytest") mock_warning = mocker.patch.object(runner_logger, "warning") - mock_execute_strict = mocker.patch.object( - PythonStandardRunner, "_execute_pytest_strict" - ) runner_config = { "execute_tests_options": ["-s", "--cov-report=xml", "--cov=something"] @@ -419,7 +223,6 @@ def test_process_label_analysis_result_with_options(self, mocker): runner.process_labelanalysis_result( LabelAnalysisRequestResult(label_analysis_result) ) - mock_execute_strict.assert_not_called() args, kwargs = mock_execute.call_args assert kwargs == {"capture_output": False} assert isinstance(args[0], list) diff --git a/tests/runners/test_runners.py b/tests/runners/test_runners.py index fae40d36..69117101 100644 --- a/tests/runners/test_runners.py +++ b/tests/runners/test_runners.py @@ -24,7 +24,6 @@ def test_python_standard_runner_with_options(self): runner_instance.params.collect_tests_options == config_params["collect_tests_options"] ) - assert runner_instance.params.strict_mode == False assert runner_instance.params.coverage_root == "./" def test_get_dan_runner_with_params(self):