Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Remove requirement for pytest #273

Merged
merged 2 commits into from
Oct 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/push_flow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ jobs:
python -m pip install --upgrade pip
pip install -r requirements.txt
python setup.py develop
pip install -r tests/requirements.txt
- name: Test with pytest
run: |
pytest --cov
Expand Down Expand Up @@ -96,7 +97,7 @@ jobs:
- uses: actions/setup-python@v3
- name: Install CLI
run: |
pip install -r requirements.txt
pip install -r requirements.txt -r tests/requirements.txt
pip install codecov-cli
- name: Label Analysis
run: |
Expand Down
118 changes: 3 additions & 115 deletions codecov_cli/runners/python_standard_runner.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,11 @@
import logging
import random
import subprocess
from contextlib import redirect_stdout
from io import StringIO, TextIOWrapper
from multiprocessing import Process, Queue, get_context
from os import getcwd
from queue import Empty
from subprocess import CalledProcessError
from sys import path, stdout
from sys import stdout
from typing import List, Optional

import click
import pytest

from codecov_cli.runners.types import (
LabelAnalysisRequestResult,
Expand Down Expand Up @@ -43,58 +37,6 @@ def coverage_root(self) -> str:
"""
return self.get("coverage_root", "./")

@property
def strict_mode(self) -> bool:
"""
Run pytest from within Python instead of using subprocess.run
This is potentailly safer than using subprocess.run because it guarantees better that
the program running is indeed pytest.
But it might not work everytime due to import issues related to Python caching modules.
"""
return self.get("strict_mode", False)


def _include_curr_dir(method):
"""
Account for the difference 'pytest' vs 'python -m pytest'
https://docs.pytest.org/en/7.1.x/how-to/usage.html#calling-pytest-through-python-m-pytest
Used only in strict_mode
"""

def call_method(self, *args, **kwargs):
curr_dir = getcwd()
path.append(curr_dir)

result = method(self, *args, **kwargs)

path.remove(curr_dir)
return result

return call_method


def _execute_pytest_subprocess(
pytest_args: List[str],
queue: Queue,
parent_stdout: TextIOWrapper,
capture_output: bool = True,
):
"""Runs pytest from python in a subprocess.
This is because we call it twice in the label-analysis process,
so we might have import errors if calling it directly.
Check the warning: https://docs.pytest.org/en/7.1.x/how-to/usage.html#calling-pytest-from-python-code

Returns the output value and pytest exit code via queue
"""
subproces_stdout = parent_stdout
if capture_output:
subproces_stdout = StringIO()
with redirect_stdout(subproces_stdout):
result = pytest.main(pytest_args)
if capture_output:
queue.put({"output": subproces_stdout.getvalue()})
queue.put({"result": result})


class PythonStandardRunner(LabelAnalysisRunnerInterface):

Expand All @@ -106,54 +48,6 @@ def __init__(self, config_params: Optional[dict] = None) -> None:
config_params = {}
self.params = PythonStandardRunnerConfigParams(config_params)

def _wait_pytest(self, pytest_process: Process, queue: Queue):
pytest_process.start()
result = None
output = None
while pytest_process.exitcode == 0 or pytest_process.exitcode == None:
from_queue = None
try:
from_queue = queue.get(timeout=1)
except Empty:
pass
if from_queue and "output" in from_queue:
output = from_queue["output"]
if from_queue and "result" in from_queue:
result = from_queue["result"]
if result is not None:
break
pytest_process.join()
return result, output

@_include_curr_dir
def _execute_pytest_strict(
self, pytest_args: List[str], capture_output: bool = True
) -> str:
"""Handles calling pytest from Python in a subprocess.
Raises Exception if pytest fails
Returns the complete pytest output
"""
ctx = get_context(method="fork")
queue = ctx.Queue(2)
p = ctx.Process(
target=_execute_pytest_subprocess,
args=[pytest_args, queue, stdout, capture_output],
)
result, output = self._wait_pytest(p, queue)

if p.exitcode != 0 or (result != pytest.ExitCode.OK and result != 0):
message = f"Pytest exited with non-zero code {result}."
message += "\nThis is likely not a problem with label-analysis. Check pytest's output and options."
if capture_output:
# If pytest failed but we captured its output the user won't know what's wrong
# So we need to include that in the error message
message += "\nPYTEST OUTPUT:"
message += "\n" + output
else:
message += "\n(you can check pytest options on the logs before the test session start)"
raise click.ClickException(message)
return output

def parse_captured_output_error(self, exp: CalledProcessError) -> str:
result = ""
for out_stream in [exp.stdout, exp.stderr]:
Expand Down Expand Up @@ -202,10 +96,7 @@ def collect_tests(self):
),
)

if self.params.strict_mode:
output = self._execute_pytest_strict(options_to_use)
else:
output = self._execute_pytest(options_to_use)
output = self._execute_pytest(options_to_use)
lines = output.split(sep="\n")
test_names = list(line for line in lines if ("::" in line and "test" in line))
return test_names
Expand Down Expand Up @@ -254,10 +145,7 @@ def process_labelanalysis_result(self, result: LabelAnalysisRequestResult):
"List of tests executed",
extra=dict(extra_log_attributes=dict(executed_tests=tests_to_run)),
)
if self.params.strict_mode:
output = self._execute_pytest_strict(command_array, capture_output=False)
else:
output = self._execute_pytest(command_array, capture_output=False)
output = self._execute_pytest(command_array, capture_output=False)
logger.info(f"Finished running {len(tests_to_run)} tests successfully")
logger.info(f" pytest options: \"{' '.join(default_options)}\"")
logger.debug(output)
36 changes: 3 additions & 33 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,19 @@
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile --output-file=requirements.txt requirements.in setup.py
# pip-compile setup.py
#
anyio==4.0.0
# via httpcore
attrs==21.4.0
# via pytest
certifi==2023.7.22
# via
# httpcore
# httpx
# requests
charset-normalizer==3.2.0
charset-normalizer==3.3.0
# via requests
click==8.1.7
# via codecov-cli (setup.py)
coverage[toml]==7.3.1
# via pytest-cov
exceptiongroup==1.1.3
# via anyio
h11==0.14.0
Expand All @@ -34,28 +30,6 @@ idna==3.4
# rfc3986
ijson==3.2.3
# via codecov-cli (setup.py)
iniconfig==1.1.1
# via pytest
packaging==21.3
# via pytest
pluggy==1.0.0
# via pytest
py==1.11.0
# via pytest
pyparsing==3.0.9
# via packaging
pytest==7.1.2
# via
# codecov-cli (setup.py)
# pytest-asyncio
# pytest-cov
# pytest-mock
pytest-asyncio==0.21.1
# via -r requirements.in
pytest-cov==4.1.0
# via codecov-cli (setup.py)
pytest-mock==3.11.1
# via -r requirements.in
pyyaml==6.0.1
# via codecov-cli (setup.py)
requests==2.31.0
Expand All @@ -71,13 +45,9 @@ sniffio==1.3.0
# anyio
# httpcore
# httpx
tomli==2.0.1
# via
# coverage
# pytest
tree-sitter==0.20.2
# via codecov-cli (setup.py)
urllib3==2.0.4
urllib3==2.0.6
# via
# requests
# responses
2 changes: 0 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@
"click==8.*",
"httpx==0.23.*",
"ijson==3.*",
"pytest==7.*",
"pytest-cov>=3",
"pyyaml==6.*",
"responses==0.21.*",
"smart-open==6.*",
Expand Down
2 changes: 2 additions & 0 deletions requirements.in → tests/requirements.in
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
pytest
pytest-cov
pytest-mock
pytest-asyncio
30 changes: 30 additions & 0 deletions tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#
# This file is autogenerated by pip-compile with Python 3.10
# by the following command:
#
# pip-compile tests/requirements.in
#
coverage[toml]==7.3.1
# via pytest-cov
exceptiongroup==1.1.3
# via pytest
iniconfig==2.0.0
# via pytest
packaging==23.2
# via pytest
pluggy==1.3.0
# via pytest
pytest==7.4.2
# via
# -r tests/requirements.in
# pytest-asyncio
# pytest-cov
# pytest-mock
pytest-asyncio==0.21.1
# via -r tests/requirements.in
pytest-cov==4.1.0
# via -r tests/requirements.in
pytest-mock==3.11.1
# via -r tests/requirements.in
tomli==2.0.1
# via pytest
Loading