diff --git a/.github/workflows/build_assets.yml b/.github/workflows/build_assets.yml
index 8c02cc61..b12941e6 100644
--- a/.github/workflows/build_assets.yml
+++ b/.github/workflows/build_assets.yml
@@ -17,8 +17,6 @@ jobs:
matrix:
include:
- os: macos-latest
- env:
- CFLAGS: -arch arm64 -arch x86_64
TARGET: macos
CMD_REQS: >
mkdir -p pip-packages && cd pip-packages && pip wheel --no-cache-dir --no-binary tree_sitter,ijson,charset_normalizer,PyYAML .. && cd .. &&
@@ -33,8 +31,7 @@ jobs:
- os: ubuntu-20.04
TARGET: ubuntu
CMD_REQS: >
- pip install -r requirements.txt
- pip install .
+ pip install -r requirements.txt && pip install .
CMD_BUILD: >
STATICCODECOV_LIB_PATH=$(find build/ -maxdepth 1 -type d -name 'lib.*' -print -quit | xargs -I {} sh -c "find {} -type f -name 'staticcodecov*' -print -quit | sed 's|^./||'") &&
pyinstaller --add-binary ${STATICCODECOV_LIB_PATH}:. --copy-metadata codecov-cli --hidden-import staticcodecov_languages -F codecov_cli/main.py &&
@@ -44,8 +41,7 @@ jobs:
- os: windows-latest
TARGET: windows
CMD_REQS: >
- pip install -r requirements.txt
- pip install .
+ pip install -r requirements.txt && pip install .
CMD_BUILD: >
pyinstaller --add-binary "build\lib.win-amd64-cpython-311\staticcodecov_languages.cp311-win_amd64.pyd;." --copy-metadata codecov-cli --hidden-import staticcodecov_languages -F codecov_cli\main.py &&
Copy-Item -Path ".\dist\main.exe" -Destination ".\dist\codecovcli_windows.exe"
@@ -83,5 +79,54 @@ jobs:
tag: ${{ github.ref }}
overwrite: true
+ build_assets_alpine_arm:
+ name: Build assets - Alpine and ARM
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - distro: "python:3.11-alpine3.18"
+ arch: arm64
+ distro_name: alpine
+ - distro: "python:3.11-alpine3.18"
+ arch: x86_64
+ distro_name: alpine
+ - distro: "python:3.11-bullseye"
+ arch: arm64
+ distro_name: linux
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: true
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v1
+ with:
+ platforms: ${{ matrix.arch }}
+ - name: Run in Docker
+ run: |
+ docker run \
+ --rm \
+ -v $(pwd):/${{ github.workspace }} \
+ -w ${{ github.workspace }} \
+ --platform linux/${{ matrix.arch }} \
+ ${{ matrix.distro }} \
+ ./scripts/build_${{ matrix.distro_name }}_arm.sh ${{ matrix.distro_name }}_${{ matrix.arch }}
+ - name: Upload a Build Artifact
+ uses: actions/upload-artifact@v3.1.3
+ if: inputs.release == false
+ with:
+ path: ./dist/codecovcli_${{ matrix.distro_name }}_${{ matrix.arch }}
+ - name: Upload Release Asset
+ if: inputs.release == true
+ id: upload-release-asset
+ uses: svenstaro/upload-release-action@v2
+ with:
+ repo_token: ${{ secrets.GITHUB_TOKEN }}
+ file: ./dist/codecovcli_${{ matrix.distro_name }}_${{ matrix.arch }}
+ asset_name: codecovcli_${{ matrix.distro_name }}_${{ matrix.arch }}
+ tag: ${{ github.ref }}
+ overwrite: true
+
diff --git a/.github/workflows/build_for_pypi.yml b/.github/workflows/build_for_pypi.yml
index 05d9793e..1315f24a 100644
--- a/.github/workflows/build_for_pypi.yml
+++ b/.github/workflows/build_for_pypi.yml
@@ -36,6 +36,6 @@ jobs:
- name: Publish package to PyPi
if: inputs.publish == true
uses: pypa/gh-action-pypi-publish@release/v1
-
-
-
+ with:
+ attestations: false
+ verbose: true
diff --git a/.github/workflows/ci-job.yml b/.github/workflows/ci-job.yml
new file mode 100644
index 00000000..2379156c
--- /dev/null
+++ b/.github/workflows/ci-job.yml
@@ -0,0 +1,41 @@
+# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
+# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
+
+name: CLI CI Job
+
+on:
+ pull_request:
+ push:
+ branches:
+ - main
+
+jobs:
+ build-test-upload:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: true
+ fetch-depth: 2
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ python -m pip install -e .
+ pip install -r tests/requirements.txt
+ - name: Test with pytest
+ run: |
+ pytest --cov --junitxml=3.12junit.xml
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: ./coverage.xml
+ flags: python3.12
+ fail_ci_if_error: true
+ verbose: true
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index d6006669..434fd283 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -16,25 +16,22 @@ jobs:
- uses: actions/checkout@v4
with:
submodules: true
- - name: Install dependencies
+ - name: Check linting with ruff
run: |
- python -m pip install --upgrade pip
- pip install black==22.3.0 isort==5.10.1
- - name: Check linting with black
- run: |
- black --check codecov_cli
- - name: Check imports order with isort
- run: |
- isort --check --profile=black codecov_cli -p staticcodecov_languages
+ make lint
+
codecov-startup:
runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.head.repo.fork && github.repository_owner == 'codecov' }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 2
- - uses: actions/setup-python@v3
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
- name: Install CLI
run: |
pip install codecov-cli
@@ -47,11 +44,11 @@ jobs:
build-test-upload:
runs-on: ubuntu-latest
- needs: codecov-startup
strategy:
fail-fast: false
matrix:
include:
+ - python-version: "3.12"
- python-version: "3.11"
- python-version: "3.10"
- python-version: "3.9"
@@ -62,31 +59,42 @@ jobs:
submodules: true
fetch-depth: 2
- name: Set up Python ${{matrix.python-version}}
- uses: actions/setup-python@v3
+ uses: actions/setup-python@v5
with:
python-version: "${{matrix.python-version}}"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- python setup.py develop
+ python -m pip install -e .
pip install -r tests/requirements.txt
- name: Test with pytest
run: |
- pytest --cov
+ pytest --cov --junitxml=${{matrix.python-version}}junit.xml
- name: Dogfooding codecov-cli
+ if: ${{ !github.event.pull_request.head.repo.fork && github.repository_owner == 'codecov' }}
run: |
- codecovcli do-upload --fail-on-error -t ${{ secrets.CODECOV_TOKEN }} --plugin pycoverage --flag python${{matrix.python-version}}
+ codecovcli -v do-upload --fail-on-error -t ${{ secrets.CODECOV_TOKEN }} --plugin pycoverage --flag python${{matrix.python-version}}
+ codecovcli do-upload --report-type test_results --fail-on-error -t ${{ secrets.CODECOV_TOKEN }} --plugin pycoverage --flag python${{matrix.python-version}}
+ - name: Upload artifacts for test-results-processing
+ if: ${{ !cancelled() }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{matrix.python-version}}junit.xml
+ path: ${{matrix.python-version}}junit.xml
static-analysis:
runs-on: ubuntu-latest
needs: codecov-startup
+ if: ${{ !github.event.pull_request.head.repo.fork && github.repository_owner == 'codecov' }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 2
- - uses: actions/setup-python@v3
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
- name: Install CLI
run: |
pip install codecov-cli
@@ -101,16 +109,21 @@ jobs:
label-analysis:
runs-on: ubuntu-latest
needs: static-analysis
+ if: ${{ !github.event.pull_request.head.repo.fork && github.repository_owner == 'codecov' }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
fetch-depth: 0
- - uses: actions/setup-python@v3
- - name: Install CLI
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+ - name: Install dependencies for Dogfooding
run: |
- pip install -r requirements.txt -r tests/requirements.txt
- pip install codecov-cli
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ python -m pip install -e .
+ pip install -r tests/requirements.txt
- name: Label Analysis
run: |
BASE_SHA=$(git merge-base HEAD^ origin/main)
@@ -119,3 +132,33 @@ jobs:
- name: Upload smart-labels
run: |
codecovcli --codecov-yml-path=codecov.yml do-upload --plugin pycoverage --plugin compress-pycoverage --fail-on-error -t ${{ secrets.CODECOV_TOKEN }} --flag smart-labels
+
+ process-test-results:
+ if: ${{ always() }}
+ needs: build-test-upload
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: true
+ fetch-depth: 2
+ - uses: actions/setup-python@v5
+ with:
+ python-version: "3.12"
+ - name: Install dependencies for Dogfooding
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ python -m pip install -e .
+ pip install -r tests/requirements.txt
+ - name: Download all test results
+ uses: actions/download-artifact@v4
+ with:
+ pattern: "*junit.xml"
+ path: "test_results"
+ merge-multiple: true
+
+ - name: Dogfooding codecov-cli
+ if: ${{ !cancelled() && github.ref && contains(github.ref, 'pull') }}
+ run: |
+ codecovcli process-test-results --dir test_results --github-token ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index b4e93b07..c0b75dd8 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -9,8 +9,6 @@ on:
jobs:
create-release:
if: ${{ github.event.pull_request.merged == true && startsWith(github.head_ref, 'release/') && github.repository_owner == 'codecov' }}
- env:
- GITHUB_TOKEN: ${{ secrets.CODECOV_RELEASE_PAT }}
name: Create Github Release
runs-on: ubuntu-latest
steps:
@@ -22,11 +20,8 @@ jobs:
run: |
echo release_version=$(grep -E "version=\"[0-9]\.[0-9]\.[0-9]\"" setup.py | grep -Eo "[0-9]\.[0-9]\.[0-9]") >> "$GITHUB_OUTPUT"
- - name: Create GH Release
- uses: softprops/action-gh-release@v0.1.15
- with:
- name: Release v${{ steps.get-release-vars.outputs.release_version }}
- tag_name: v${{ steps.get-release-vars.outputs.release_version }}
- generate_release_notes: true
- body: Autogenerated for ${{ steps.get-release-vars.outputs.release_version }}. Created for ${{ github.event.pull_request.html_url }}
-
+ - name: Create GitHub Release
+ env:
+ GITHUB_TOKEN: ${{ secrets.CODECOV_RELEASE_PAT }}
+ run: |
+ gh release create v${{ steps.get-release-vars.outputs.release_version }} --title "Release v${{ steps.get-release-vars.outputs.release_version }}" --notes "Autogenerated for v${{ steps.get-release-vars.outputs.release_version }}. Created for ${{ github.event.pull_request.html_url }}" --generate-notes --target ${{ github.event.pull_request.head.sha }}
diff --git a/.github/workflows/enforce-license-compliance.yml b/.github/workflows/enforce-license-compliance.yml
new file mode 100644
index 00000000..86be7410
--- /dev/null
+++ b/.github/workflows/enforce-license-compliance.yml
@@ -0,0 +1,14 @@
+name: Enforce License Compliance
+
+on:
+ pull_request:
+ branches: [main, master]
+
+jobs:
+ enforce-license-compliance:
+ runs-on: ubuntu-latest
+ steps:
+ - name: 'Enforce License Compliance'
+ uses: getsentry/action-enforce-license-compliance@57ba820387a1a9315a46115ee276b2968da51f3d # main
+ with:
+ fossa_api_key: ${{ secrets.FOSSA_API_KEY }}
diff --git a/.gitignore b/.gitignore
index ea23c4f1..62c63e0a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -163,3 +163,5 @@ cython_debug/
# Vim temporary files
*.swp
*.swo
+
+.debug
\ No newline at end of file
diff --git a/Makefile b/Makefile
index f70eab15..96f12b10 100644
--- a/Makefile
+++ b/Makefile
@@ -2,12 +2,26 @@ name ?= codecovcli
# Semantic versioning format https://semver.org/
tag_regex := ^v([0-9]{1,}\.){2}[0-9]{1,}([-_]\w+)?$
+lint.install:
+ echo "Installing ruff..."
+ pip install -Iv ruff
+
+# The preferred method (for now) w.r.t. fixable rules is to manually update the makefile
+# with --fix and re-run 'make lint.' Since ruff is constantly adding rules this is a slight
+# amount of "needed" friction imo.
+lint.run:
+ ruff check --ignore F401 --exclude languages --exclude samples
+ ruff format --exclude languages --exclude samples
+
+lint.check:
+ echo "Linting..."
+ ruff check --ignore F401 --exclude languages --exclude samples
+ echo "Formatting..."
+ ruff format --check --exclude languages --exclude samples
+
lint:
- pip install black==22.3.0 isort==5.10.1
- black codecov_cli
- isort --profile=black codecov_cli -p staticcodecov_languages
- black tests
- isort --profile black tests
+ make lint.install
+ make lint.run
tag.release:
ifeq ($(shell echo ${version} | egrep "${tag_regex}"),)
@@ -16,4 +30,4 @@ else
@echo "Tagging new release ${version}"
git tag -a ${version} -m "Autogenerated release tag for codecov-cli"
git push origin ${version}
-endif
\ No newline at end of file
+endif
diff --git a/README.md b/README.md
index 000699d4..81c2b179 100644
--- a/README.md
+++ b/README.md
@@ -20,6 +20,8 @@ CodecovCLI is a new way for users to interact with Codecov directly from the use
- [create-report-results](#create-report-results)
- [get-report-results](#get-report-results)
- [pr-base-picking](#pr-base-picking)
+ - [send-notifications](#send-notifications)
+ - [empty-upload](#empty-upload)
- [How to Use Local Upload](#how-to-use-local-upload)
- [Work in Progress Features](#work-in-progress-features)
- [Plugin System](#plugin-system)
@@ -27,6 +29,7 @@ CodecovCLI is a new way for users to interact with Codecov directly from the use
- [Contributions](#contributions)
- [Requirements](#requirements)
- [Guidelines](#guidelines)
+ - [Dependencies](#dependencies)
- [Releases](#releases)
# Installing
@@ -121,7 +124,7 @@ Codecov-cli supports user input. These inputs, along with their descriptions and
| `get-report-results` | Used for local upload. It asks codecov to provide you the report results you calculated with the previous command.
| `pr-base-picking` | Tells codecov that you want to explicitly define a base for your PR
| `upload-process` | A wrapper for 3 commands. Create-commit, create-report and do-upload. You can use this command to upload to codecov instead of using the previosly mentioned commands.
-| `send-notification` | A command that tells Codecov that you finished uploading and you want to be sent notifications. To disable automatically sent notifications please consider adding manual_trigger to your codecov.yml, so it will look like codecov: notify: manual_trigger: true.
+| `send-notifications` | A command that tells Codecov that you finished uploading and you want to be sent notifications. To disable automatically sent notifications please consider adding manual_trigger to your codecov.yml, so it will look like codecov: notify: manual_trigger: true.
>**Note**: Every command has its own different options that will be mentioned later in this doc. Codecov will try to load these options from your CI environment variables, if not, it will try to load them from git, if not found, you may need to add them manually.
@@ -225,10 +228,28 @@ Codecov-cli supports user input. These inputs, along with their descriptions and
| :---: | :---: | :---: |
| -C, --sha, --commit-sha TEXT |Commit SHA (with 40 chars) | Required
| -r, --slug TEXT |owner/repo slug used instead of the private repo token in Self-hosted | Required
-| -t, --token UUID |Codecov upload token | Required
+| -t, --token TEXT |Codecov upload token | Required
| --git-service | Git provider. Options: github, gitlab, bitbucket, github_enterprise, gitlab_enterprise, bitbucket_server | Optional
| -h,--help |Show this message and exit.
+## empty-upload
+
+Used if the changes made don't need testing, but PRs require a passing codecov status to be merged.
+This command will scan the files in the commit and send passing status checks configured if all the changed files
+are ignored by codecov (including README and configuration files)
+
+`Usage: codecovcli empty-upload [OPTIONS]`
+
+| Options | Description | usage |
+| :--------------------------: | :----------------------------------------------------------------------------------------: | :------: |
+| -C, --sha, --commit-sha TEXT | Commit SHA (with 40 chars) | Required |
+| -t, --token TEXT | Codecov upload token | Required |
+| -r, --slug TEXT | owner/repo slug used instead of the private repo token in Self-hosted | Optional |
+| --force | Always emit passing checks regardless of changed files | Optional |
+| -Z, --fail-on-error | Exit with non-zero code in case of error | Optional |
+| --git-service | Options: github, gitlab, bitbucket, github_enterprise, gitlab_enterprise, bitbucket_server | Optional |
+| -h, --help | Show this message and exit. | Optional |
+
# How to Use Local Upload
The CLI also supports "dry run" local uploading. This is useful if you prefer to see Codecov status checks and coverage reporting locally, in your terminal, as opposed to opening a PR and waiting for your full CI to run. Local uploads do not interfere with regular uploads made from your CI for any given commit / Pull Request.
diff --git a/codecov_cli/commands/base_picking.py b/codecov_cli/commands/base_picking.py
index afe0f531..3c536751 100644
--- a/codecov_cli/commands/base_picking.py
+++ b/codecov_cli/commands/base_picking.py
@@ -1,12 +1,13 @@
import logging
import typing
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.encoder import slug_without_subgroups_is_invalid
from codecov_cli.services.commit.base_picking import base_picking_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -36,7 +37,6 @@
"-t",
"--token",
help="Codecov upload token",
- type=click.UUID,
envvar="CODECOV_TOKEN",
)
@click.option(
@@ -47,24 +47,19 @@
)
@click.pass_context
def pr_base_picking(
- ctx,
+ ctx: CommandContext,
base_sha: str,
pr: typing.Optional[int],
slug: typing.Optional[str],
- token: typing.Optional[uuid.UUID],
+ token: typing.Optional[str],
service: typing.Optional[str],
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting base picking process",
extra=dict(
- extra_log_attributes=dict(
- pr=pr,
- slug=slug,
- token=token,
- service=service,
- enterprise_url=enterprise_url,
- )
+ extra_log_attributes=args,
),
)
@@ -74,4 +69,4 @@ def pr_base_picking(
)
return
- base_picking_logic(base_sha, pr, slug, token, service, enterprise_url)
+ base_picking_logic(base_sha, pr, slug, token, service, enterprise_url, args)
diff --git a/codecov_cli/commands/commit.py b/codecov_cli/commands/commit.py
index 1b2bbb98..b2b14a7e 100644
--- a/codecov_cli/commands/commit.py
+++ b/codecov_cli/commands/commit.py
@@ -1,13 +1,14 @@
import logging
import typing
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.git import GitService
from codecov_cli.helpers.options import global_options
from codecov_cli.services.commit import create_commit_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -36,30 +37,22 @@
@global_options
@click.pass_context
def create_commit(
- ctx,
+ ctx: CommandContext,
commit_sha: str,
parent_sha: typing.Optional[str],
pull_request_number: typing.Optional[int],
branch: typing.Optional[str],
slug: typing.Optional[str],
- token: typing.Optional[uuid.UUID],
+ token: typing.Optional[str],
git_service: typing.Optional[str],
fail_on_error: bool,
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting create commit process",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- parent_sha=parent_sha,
- pr=pull_request_number,
- branch=branch,
- slug=slug,
- token=token,
- service=git_service,
- enterprise_url=enterprise_url,
- )
+ extra_log_attributes=args,
),
)
create_commit_logic(
@@ -72,4 +65,5 @@ def create_commit(
git_service,
enterprise_url,
fail_on_error,
+ args,
)
diff --git a/codecov_cli/commands/create_report_result.py b/codecov_cli/commands/create_report_result.py
index 94bae460..28648f23 100644
--- a/codecov_cli/commands/create_report_result.py
+++ b/codecov_cli/commands/create_report_result.py
@@ -1,12 +1,11 @@
import logging
-import uuid
import click
-from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
-from codecov_cli.helpers.git import GitService
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.options import global_options
from codecov_cli.services.report import create_report_results_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -18,28 +17,22 @@
@global_options
@click.pass_context
def create_report_results(
- ctx,
+ ctx: CommandContext,
commit_sha: str,
code: str,
slug: str,
git_service: str,
- token: uuid.UUID,
+ token: str,
fail_on_error: bool,
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Creating report results",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- code=code,
- slug=slug,
- service=git_service,
- enterprise_url=enterprise_url,
- token=token,
- )
+ extra_log_attributes=args,
),
)
create_report_results_logic(
- commit_sha, code, slug, git_service, token, enterprise_url, fail_on_error
+ commit_sha, code, slug, git_service, token, enterprise_url, fail_on_error, args
)
diff --git a/codecov_cli/commands/empty_upload.py b/codecov_cli/commands/empty_upload.py
index 4c429144..d68e0224 100644
--- a/codecov_cli/commands/empty_upload.py
+++ b/codecov_cli/commands/empty_upload.py
@@ -1,42 +1,39 @@
import logging
import typing
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.git import GitService
from codecov_cli.helpers.options import global_options
from codecov_cli.services.empty_upload import empty_upload_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@click.command()
+@click.option("--force", is_flag=True, default=False)
@global_options
@click.pass_context
def empty_upload(
- ctx,
+ ctx: CommandContext,
commit_sha: str,
+ force: bool,
slug: typing.Optional[str],
- token: typing.Optional[uuid.UUID],
+ token: typing.Optional[str],
git_service: typing.Optional[str],
fail_on_error: typing.Optional[bool],
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting empty upload process",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- slug=slug,
- token=token,
- service=git_service,
- enterprise_url=enterprise_url,
- fail_on_error=fail_on_error,
- )
+ extra_log_attributes=args,
),
)
return empty_upload_logic(
- commit_sha, slug, token, git_service, enterprise_url, fail_on_error
+ commit_sha, slug, token, git_service, enterprise_url, fail_on_error, force, args
)
diff --git a/codecov_cli/commands/get_report_results.py b/codecov_cli/commands/get_report_results.py
index e676afac..4e02a1f9 100644
--- a/codecov_cli/commands/get_report_results.py
+++ b/codecov_cli/commands/get_report_results.py
@@ -1,13 +1,15 @@
import logging
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.encoder import encode_slug
from codecov_cli.helpers.git import GitService
from codecov_cli.helpers.options import global_options
from codecov_cli.services.report import send_reports_result_get_request
+from codecov_cli.types import CommandContext
+
logger = logging.getLogger("codecovcli")
@@ -19,26 +21,20 @@
@global_options
@click.pass_context
def get_report_results(
- ctx,
+ ctx: CommandContext,
commit_sha: str,
code: str,
slug: str,
git_service: str,
- token: uuid.UUID,
+ token: str,
fail_on_error: bool,
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Getting report results",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- code=code,
- slug=slug,
- service=git_service,
- enterprise_url=enterprise_url,
- token=token,
- )
+ extra_log_attributes=args,
),
)
encoded_slug = encode_slug(slug)
diff --git a/codecov_cli/commands/labelanalysis.py b/codecov_cli/commands/labelanalysis.py
index 5a0e1503..d384083d 100644
--- a/codecov_cli/commands/labelanalysis.py
+++ b/codecov_cli/commands/labelanalysis.py
@@ -2,13 +2,14 @@
import logging
import pathlib
import time
-from typing import List, Optional
+from typing import Dict, List, Optional
import click
import requests
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
from codecov_cli.helpers import request
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.config import CODECOV_API_URL
from codecov_cli.helpers.validators import validate_commit_sha
from codecov_cli.runners import get_runner
@@ -16,6 +17,7 @@
LabelAnalysisRequestResult,
LabelAnalysisRunnerInterface,
)
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -58,11 +60,8 @@
"--dry-run",
"dry_run",
help=(
- "Print list of tests to run AND tests skipped (and options that need to be added to the test runner) to stdout. "
- + "Also prints the same information in JSON format. "
- + "JSON will have keys 'ats_tests_to_run', 'ats_tests_to_skip' and 'runner_options'. "
- + "List of tests to run is prefixed with ATS_TESTS_TO_RUN= "
- + "List of tests to skip is prefixed with ATS_TESTS_TO_SKIP="
+ "Print list of tests to run AND tests skipped AND options that need to be added to the test runner to stdout. "
+ + "Choose format with --dry-run-format option. Default is JSON. "
),
is_flag=True,
)
@@ -70,11 +69,17 @@
"--dry-run-format",
"dry_run_format",
type=click.Choice(["json", "space-separated-list"]),
+ help="Format in which --dry-run data is printed. Default is JSON.",
default="json",
)
+@click.option(
+ "--runner-param",
+ "runner_params",
+ multiple=True,
+)
@click.pass_context
def label_analysis(
- ctx: click.Context,
+ ctx: CommandContext,
token: str,
head_commit_sha: str,
base_commit_sha: str,
@@ -82,20 +87,14 @@ def label_analysis(
max_wait_time: str,
dry_run: bool,
dry_run_format: str,
+ runner_params: List[str],
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting label analysis",
extra=dict(
- extra_log_attributes=dict(
- head_commit_sha=head_commit_sha,
- base_commit_sha=base_commit_sha,
- token=token,
- runner_name=runner_name,
- enterprise_url=enterprise_url,
- max_wait_time=max_wait_time,
- dry_run=dry_run,
- )
+ extra_log_attributes=args,
),
)
if head_commit_sha == base_commit_sha:
@@ -115,7 +114,8 @@ def label_analysis(
codecov_yaml = ctx.obj["codecov_yaml"] or {}
cli_config = codecov_yaml.get("cli", {})
# Raises error if no runner is found
- runner = get_runner(cli_config, runner_name)
+ parsed_runner_params = _parse_runner_params(runner_params)
+ runner = get_runner(cli_config, runner_name, parsed_runner_params)
logger.debug(
f"Selected runner: {runner}",
extra=dict(extra_log_attributes=dict(config=runner.params)),
@@ -157,6 +157,7 @@ def label_analysis(
runner,
dry_run=dry_run,
dry_run_format=dry_run_format,
+ fallback_reason="codecov_unavailable",
)
return
@@ -189,6 +190,13 @@ def label_analysis(
LabelAnalysisRequestResult(request_result),
runner,
dry_run_format,
+ # It's possible that the task had processing errors and fallback to all tests
+ # Even though it's marked as FINISHED (not ERROR) it's not a true success
+ fallback_reason=(
+ "test_list_processing_errors"
+ if resp_json.get("errors", None)
+ else None
+ ),
)
return
if resp_json["state"] == "error":
@@ -207,6 +215,7 @@ def label_analysis(
runner=runner,
dry_run=dry_run,
dry_run_format=dry_run_format,
+ fallback_reason="test_list_processing_failed",
)
return
if max_wait_time and (time.monotonic() - start_wait) > max_wait_time:
@@ -218,12 +227,45 @@ def label_analysis(
runner=runner,
dry_run=dry_run,
dry_run_format=dry_run_format,
+ fallback_reason="max_wait_time_exceeded",
)
return
logger.info("Waiting more time for result...")
time.sleep(5)
+def _parse_runner_params(runner_params: List[str]) -> Dict[str, str]:
+ """Parses the structured list of dynamic runner params into a dictionary.
+ Structure is `key=value`. If value is a list make it comma-separated.
+ If the list item doesn't have '=' we consider it the key and set to None.
+
+ EXAMPLE:
+ runner_params = ['key=value', 'null_item', 'list=item1,item2,item3']
+ _parse_runner_params(runner_params) == {
+ 'key': 'value',
+ 'null_item': None,
+ 'list': ['item1', 'item2', 'item3']
+ }
+ """
+ final_params = {}
+ for param in runner_params:
+ # Emit warning if param is not well formatted
+ # Using == 0 rather than != 1 because there might be
+ # a good reason for the param to include '=' in the value.
+ if param.count("=") == 0:
+ logger.warning(
+ f"Runner param {param} is not well formated. Setting value to None. Use '--runner-param key=value' to set value"
+ )
+ final_params[param] = None
+ else:
+ key, value = param.split("=", 1)
+ # For list values we need to split the list too
+ if "," in value:
+ value = value.split(",")
+ final_params[key] = value
+ return final_params
+
+
def _potentially_calculate_absent_labels(
request_result, requested_labels
) -> LabelAnalysisRequestResult:
@@ -322,12 +364,16 @@ def _send_labelanalysis_request(payload, url, token_header):
def _dry_run_json_output(
- labels_to_run: set, labels_to_skip: set, runner_options: List[str]
+ labels_to_run: set,
+ labels_to_skip: set,
+ runner_options: List[str],
+ fallback_reason: str = None,
) -> None:
output_as_dict = dict(
runner_options=runner_options,
ats_tests_to_run=sorted(labels_to_run),
ats_tests_to_skip=sorted(labels_to_skip),
+ ats_fallback_reason=fallback_reason,
)
# ⚠️ DON'T use logger
# logger goes to stderr, we want it in stdout
@@ -335,15 +381,21 @@ def _dry_run_json_output(
def _dry_run_list_output(
- labels_to_run: set, labels_to_skip: set, runner_options: List[str]
+ labels_to_run: set,
+ labels_to_skip: set,
+ runner_options: List[str],
+ fallback_reason: str = None,
) -> None:
+ if fallback_reason:
+ logger.warning(f"label-analysis didn't run correctly. Error: {fallback_reason}")
+
to_run_line = " ".join(
- sorted(map(lambda l: f"'{l}'", runner_options))
- + sorted(map(lambda l: f"'{l}'", labels_to_run))
+ sorted(map(lambda option: f"'{option}'", runner_options))
+ + sorted(map(lambda label: f"'{label}'", labels_to_run))
)
to_skip_line = " ".join(
- sorted(map(lambda l: f"'{l}'", runner_options))
- + sorted(map(lambda l: f"'{l}'", labels_to_skip))
+ sorted(map(lambda option: f"'{option}'", runner_options))
+ + sorted(map(lambda label: f"'{label}'", labels_to_skip))
)
# ⚠️ DON'T use logger
# logger goes to stderr, we want it in stdout
@@ -355,6 +407,10 @@ def _dry_run_output(
result: LabelAnalysisRequestResult,
runner: LabelAnalysisRunnerInterface,
dry_run_format: str,
+ *,
+ # If we have a fallback reason it means that calculating the list of tests to run
+ # failed at some point. So it was not a completely successful task.
+ fallback_reason: str = None,
):
labels_to_run = set(
result.absent_labels + result.global_level_labels + result.present_diff_labels
@@ -368,13 +424,16 @@ def _dry_run_output(
# Because dry_run_format is a click.Choice we can
# be sure the value will be in the dict of choices
fn_to_use = format_lookup[dry_run_format]
- fn_to_use(labels_to_run, labels_to_skip, runner.dry_run_runner_options)
+ fn_to_use(
+ labels_to_run, labels_to_skip, runner.dry_run_runner_options, fallback_reason
+ )
def _fallback_to_collected_labels(
collected_labels: List[str],
runner: LabelAnalysisRunnerInterface,
*,
+ fallback_reason: str = None,
dry_run: bool = False,
dry_run_format: Optional[pathlib.Path] = None,
) -> dict:
@@ -393,7 +452,10 @@ def _fallback_to_collected_labels(
return runner.process_labelanalysis_result(fake_response)
else:
return _dry_run_output(
- LabelAnalysisRequestResult(fake_response), runner, dry_run_format
+ LabelAnalysisRequestResult(fake_response),
+ runner,
+ dry_run_format,
+ fallback_reason=fallback_reason,
)
logger.error("Cannot fallback to collected labels because no labels were collected")
raise click.ClickException("Failed to get list of labels to run")
diff --git a/codecov_cli/commands/process_test_results.py b/codecov_cli/commands/process_test_results.py
new file mode 100644
index 00000000..b95cc30c
--- /dev/null
+++ b/codecov_cli/commands/process_test_results.py
@@ -0,0 +1,265 @@
+import json
+import logging
+import os
+import pathlib
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional
+
+import click
+from test_results_parser import (
+ Outcome,
+ ParserError,
+ Testrun,
+ build_message,
+ parse_junit_xml,
+)
+
+from codecov_cli.helpers.args import get_cli_args
+from codecov_cli.helpers.request import (
+ log_warnings_and_errors_if_any,
+ send_get_request,
+ send_post_request,
+)
+from codecov_cli.services.upload.file_finder import select_file_finder
+from codecov_cli.types import CommandContext, RequestResult, UploadCollectionResultFile
+
+logger = logging.getLogger("codecovcli")
+
+# Search marker so that we can find the comment when looking for previously created comments
+CODECOV_SEARCH_MARKER = ""
+
+
+_process_test_results_options = [
+ click.option(
+ "-s",
+ "--dir",
+ "--files-search-root-folder",
+ "dir",
+ help="Folder where to search for test results files",
+ type=click.Path(path_type=pathlib.Path),
+ default=pathlib.Path.cwd,
+ show_default="Current Working Directory",
+ ),
+ click.option(
+ "-f",
+ "--file",
+ "--files-search-direct-file",
+ "files",
+ help="Explicit files to upload. These will be added to the test results files to be processed. If you wish to only process the specified files, please consider using --disable-search to disable processing other files.",
+ type=click.Path(path_type=pathlib.Path),
+ multiple=True,
+ default=[],
+ ),
+ click.option(
+ "--exclude",
+ "--files-search-exclude-folder",
+ "exclude_folders",
+ help="Folders to exclude from search",
+ type=click.Path(path_type=pathlib.Path),
+ multiple=True,
+ default=[],
+ ),
+ click.option(
+ "--disable-search",
+ help="Disable search for coverage files. This is helpful when specifying what files you want to upload with the --file option.",
+ is_flag=True,
+ default=False,
+ ),
+ click.option(
+ "--github-token",
+ help="If specified, output the message to the specified GitHub PR.",
+ type=str,
+ default=None,
+ ),
+]
+
+
+def process_test_results_options(func):
+ for option in reversed(_process_test_results_options):
+ func = option(func)
+ return func
+
+
+@dataclass
+class TestResultsNotificationPayload:
+ failures: List[Testrun]
+ failed: int = 0
+ passed: int = 0
+ skipped: int = 0
+
+
+@click.command()
+@process_test_results_options
+@click.pass_context
+def process_test_results(
+ ctx: CommandContext,
+ dir=None,
+ files=None,
+ exclude_folders=None,
+ disable_search=None,
+ github_token=None,
+):
+ file_finder = select_file_finder(
+ dir, exclude_folders, files, disable_search, report_type="test_results"
+ )
+
+ upload_collection_results: List[UploadCollectionResultFile] = (
+ file_finder.find_files()
+ )
+ if len(upload_collection_results) == 0:
+ raise click.ClickException(
+ "No JUnit XML files were found. Make sure to specify them using the --file option."
+ )
+
+ payload: TestResultsNotificationPayload = generate_message_payload(
+ upload_collection_results
+ )
+
+ message: str = f"{build_message(payload)} {CODECOV_SEARCH_MARKER}"
+
+ args: Dict[str, str] = get_cli_args(ctx)
+
+ maybe_write_to_github_action(message, github_token, args)
+
+ click.echo(message)
+
+
+def maybe_write_to_github_action(
+ message: str, github_token: str, args: Dict[str, str]
+) -> None:
+ if github_token is None:
+ # If no token is passed, then we will assume users are not running in a GitHub Action
+ return
+
+ maybe_write_to_github_comment(message, github_token, args)
+
+
+def maybe_write_to_github_comment(
+ message: str, github_token: str, args: Dict[str, str]
+) -> None:
+ slug = os.getenv("GITHUB_REPOSITORY")
+ if slug is None:
+ raise click.ClickException(
+ "Error getting repo slug from environment. "
+ "Can't find GITHUB_REPOSITORY environment variable."
+ )
+
+ ref = os.getenv("GITHUB_REF")
+ if ref is None or "pull" not in ref:
+ raise click.ClickException(
+ "Error getting PR number from environment. "
+ "Can't find GITHUB_REF environment variable."
+ )
+ # GITHUB_REF is documented here: https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables
+ pr_number = ref.split("/")[2]
+
+ existing_comment = find_existing_github_comment(github_token, slug, pr_number)
+ comment_id = None
+ if existing_comment is not None:
+ comment_id = existing_comment.get("id")
+
+ create_or_update_github_comment(
+ github_token, slug, pr_number, message, comment_id, args
+ )
+
+
+def find_existing_github_comment(
+ github_token: str, repo_slug: str, pr_number: int
+) -> Optional[Dict[str, Any]]:
+ url = f"https://api.github.com/repos/{repo_slug}/issues/{pr_number}/comments"
+
+ headers = {
+ "Accept": "application/vnd.github+json",
+ "Authorization": f"Bearer {github_token}",
+ "X-GitHub-Api-Version": "2022-11-28",
+ }
+ page = 1
+
+ results = get_github_response_or_error(url, headers, page)
+ while results != []:
+ for comment in results:
+ comment_user = comment.get("user")
+ if (
+ CODECOV_SEARCH_MARKER in comment.get("body", "")
+ and comment_user
+ and comment_user.get("login", "") == "github-actions[bot]"
+ ):
+ return comment
+
+ page += 1
+ results = get_github_response_or_error(url, headers, page)
+
+ # No matches, return None
+ return None
+
+
+def get_github_response_or_error(
+ url: str, headers: Dict[str, str], page: int
+) -> Dict[str, Any]:
+ request_results: RequestResult = send_get_request(
+ url, headers, params={"page": page}
+ )
+ if request_results.status_code != 200:
+ raise click.ClickException("Cannot find existing GitHub comment for PR.")
+ results = json.loads(request_results.text)
+ return results
+
+
+def create_or_update_github_comment(
+ token: str,
+ repo_slug: str,
+ pr_number: str,
+ message: str,
+ comment_id: Optional[str],
+ args: Dict[str, Any],
+) -> None:
+ if comment_id is not None:
+ url = f"https://api.github.com/repos/{repo_slug}/issues/comments/{comment_id}"
+ else:
+ url = f"https://api.github.com/repos/{repo_slug}/issues/{pr_number}/comments"
+
+ headers = {
+ "Accept": "application/vnd.github+json",
+ "Authorization": f"Bearer {token}",
+ "X-GitHub-Api-Version": "2022-11-28",
+ }
+ logger.info(f"Posting GitHub comment {comment_id}")
+
+ log_warnings_and_errors_if_any(
+ send_post_request(
+ url=url,
+ data={
+ "body": message,
+ "cli_args": args,
+ },
+ headers=headers,
+ ),
+ "Posting test results comment",
+ )
+
+
+def generate_message_payload(
+ upload_collection_results: List[UploadCollectionResultFile],
+) -> TestResultsNotificationPayload:
+ payload = TestResultsNotificationPayload(failures=[])
+
+ for result in upload_collection_results:
+ try:
+ logger.info(f"Parsing {result.get_filename()}")
+ parsed_info = parse_junit_xml(result.get_content())
+ for testrun in parsed_info.testruns:
+ if (
+ testrun.outcome == Outcome.Failure
+ or testrun.outcome == Outcome.Error
+ ):
+ payload.failed += 1
+ payload.failures.append(testrun)
+ elif testrun.outcome == Outcome.Skip:
+ payload.skipped += 1
+ else:
+ payload.passed += 1
+ except ParserError as err:
+ raise click.ClickException(
+ f"Error parsing {str(result.get_filename(), 'utf8')} with error: {err}"
+ )
+ return payload
diff --git a/codecov_cli/commands/report.py b/codecov_cli/commands/report.py
index 02ea8ec3..7169ba4f 100644
--- a/codecov_cli/commands/report.py
+++ b/codecov_cli/commands/report.py
@@ -1,12 +1,12 @@
import logging
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
-from codecov_cli.helpers.git import GitService
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.options import global_options
from codecov_cli.services.report import create_report_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -15,33 +15,45 @@
@click.option(
"--code", help="The code of the report. If unsure, leave default", default="default"
)
+@click.option(
+ "-P",
+ "--pr",
+ "--pull-request-number",
+ "pull_request_number",
+ help="Specify the pull request number mannually. Used to override pre-existing CI environment variables",
+ cls=CodecovOption,
+ fallback_field=FallbackFieldEnum.pull_request_number,
+)
@global_options
@click.pass_context
def create_report(
- ctx,
+ ctx: CommandContext,
commit_sha: str,
code: str,
slug: str,
git_service: str,
- token: uuid.UUID,
+ token: str,
fail_on_error: bool,
+ pull_request_number: int,
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting create report process",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- code=code,
- slug=slug,
- service=git_service,
- enterprise_url=enterprise_url,
- token=token,
- )
+ extra_log_attributes=args,
),
)
res = create_report_logic(
- commit_sha, code, slug, git_service, token, enterprise_url, fail_on_error
+ commit_sha,
+ code,
+ slug,
+ git_service,
+ token,
+ enterprise_url,
+ pull_request_number,
+ fail_on_error,
+ args,
)
if not res.error:
logger.info(
diff --git a/codecov_cli/commands/send_notifications.py b/codecov_cli/commands/send_notifications.py
index ee962cfe..0b7b79d1 100644
--- a/codecov_cli/commands/send_notifications.py
+++ b/codecov_cli/commands/send_notifications.py
@@ -1,13 +1,14 @@
import logging
import typing
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.git import GitService
from codecov_cli.helpers.options import global_options
from codecov_cli.services.upload_completion import upload_completion_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -16,24 +17,19 @@
@global_options
@click.pass_context
def send_notifications(
- ctx,
+ ctx: CommandContext,
commit_sha: str,
slug: typing.Optional[str],
- token: typing.Optional[uuid.UUID],
+ token: typing.Optional[str],
git_service: typing.Optional[str],
fail_on_error: bool,
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Sending notifications process has started",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- slug=slug,
- token=token,
- service=git_service,
- enterprise_url=enterprise_url,
- )
+ extra_log_attributes=args,
),
)
return upload_completion_logic(
@@ -43,4 +39,5 @@ def send_notifications(
git_service,
enterprise_url,
fail_on_error,
+ args,
)
diff --git a/codecov_cli/commands/staticanalysis.py b/codecov_cli/commands/staticanalysis.py
index 62bb65e3..a876f90b 100644
--- a/codecov_cli/commands/staticanalysis.py
+++ b/codecov_cli/commands/staticanalysis.py
@@ -6,8 +6,10 @@
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.validators import validate_commit_sha
from codecov_cli.services.staticanalysis import run_analysis_entrypoint
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -48,7 +50,7 @@
)
@click.pass_context
def static_analysis(
- ctx,
+ ctx: CommandContext,
foldertosearch,
numberprocesses,
pattern,
@@ -58,19 +60,11 @@ def static_analysis(
folders_to_exclude: typing.List[pathlib.Path],
):
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting Static Analysis processing",
extra=dict(
- extra_log_attributes=dict(
- foldertosearch=foldertosearch,
- numberprocesses=numberprocesses,
- pattern=pattern,
- commit_sha=commit,
- token=token,
- force=force,
- folders_to_exclude=folders_to_exclude,
- enterprise_url=enterprise_url,
- )
+ extra_log_attributes=args,
),
)
return asyncio.run(
@@ -84,5 +78,6 @@ def static_analysis(
force,
list(folders_to_exclude),
enterprise_url,
+ args,
)
)
diff --git a/codecov_cli/commands/upload.py b/codecov_cli/commands/upload.py
index cf6a4b1b..7d67ff30 100644
--- a/codecov_cli/commands/upload.py
+++ b/codecov_cli/commands/upload.py
@@ -2,13 +2,14 @@
import os
import pathlib
import typing
-import uuid
import click
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.options import global_options
from codecov_cli.services.upload import do_upload_logic
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -19,7 +20,9 @@ def _turn_env_vars_into_dict(ctx, params, value):
_global_upload_options = [
click.option(
+ "--code",
"--report-code",
+ "report_code",
help="The code of the report. If unsure, leave default",
default="default",
),
@@ -34,7 +37,8 @@ def _turn_env_vars_into_dict(ctx, params, value):
"-s",
"--dir",
"--coverage-files-search-root-folder",
- "coverage_files_search_root_folder",
+ "--files-search-root-folder",
+ "files_search_root_folder",
help="Folder where to search for coverage files",
type=click.Path(path_type=pathlib.Path),
default=pathlib.Path.cwd,
@@ -43,7 +47,8 @@ def _turn_env_vars_into_dict(ctx, params, value):
click.option(
"--exclude",
"--coverage-files-search-exclude-folder",
- "coverage_files_search_exclude_folders",
+ "--files-search-exclude-folder",
+ "files_search_exclude_folders",
help="Folders to exclude from search",
type=click.Path(path_type=pathlib.Path),
multiple=True,
@@ -53,7 +58,8 @@ def _turn_env_vars_into_dict(ctx, params, value):
"-f",
"--file",
"--coverage-files-search-direct-file",
- "coverage_files_search_explicitly_listed_files",
+ "--files-search-direct-file",
+ "files_search_explicitly_listed_files",
help="Explicit files to upload. These will be added to the coverage files found for upload. If you wish to only upload the specified files, please consider using --disable-search to disable uploading other files.",
type=click.Path(path_type=pathlib.Path),
multiple=True,
@@ -61,7 +67,7 @@ def _turn_env_vars_into_dict(ctx, params, value):
),
click.option(
"--disable-search",
- help="Disable search for coverage files. This is helpful when specifying what files you want to uload with the --file option.",
+ help="Disable search for coverage files. This is helpful when specifying what files you want to upload with the --file option.",
is_flag=True,
default=False,
),
@@ -96,6 +102,8 @@ def _turn_env_vars_into_dict(ctx, params, value):
"-n",
"--name",
help="Custom defined name of the upload. Visible in Codecov UI",
+ cls=CodecovOption,
+ fallback_field=FallbackFieldEnum.build_code,
),
click.option(
"-B",
@@ -154,7 +162,41 @@ def _turn_env_vars_into_dict(ctx, params, value):
"--handle-no-reports-found",
"handle_no_reports_found",
is_flag=True,
- help="Raise no excpetions when no coverage reports found.",
+ help="Raise no exceptions when no coverage reports found.",
+ ),
+ click.option(
+ "--report-type",
+ help="The type of the file to upload, coverage by default. Possible values are: testing, coverage.",
+ default="coverage",
+ type=click.Choice(["coverage", "test_results"]),
+ ),
+ click.option(
+ "--network-filter",
+ help="Specify a filter on the files listed in the network section of the Codecov report. This will only add files whose path begin with the specified filter. Useful for upload-specific path fixing",
+ ),
+ click.option(
+ "--network-prefix",
+ help="Specify a prefix on files listed in the network section of the Codecov report. Useful to help resolve path fixing",
+ ),
+ click.option(
+ "--gcov-args",
+ help="Extra arguments to pass to gcov",
+ ),
+ click.option(
+ "--gcov-ignore",
+ help="Paths to ignore during gcov gathering",
+ ),
+ click.option(
+ "--gcov-include",
+ help="Paths to include during gcov gathering",
+ ),
+ click.option(
+ "--gcov-executable",
+ help="gcov executable to run. Defaults to 'gcov'",
+ ),
+ click.option(
+ "--swift-project",
+ help="Specify the swift project",
),
]
@@ -170,97 +212,88 @@ def global_upload_options(func):
@global_options
@click.pass_context
def do_upload(
- ctx: click.Context,
+ ctx: CommandContext,
commit_sha: str,
report_code: str,
+ branch: typing.Optional[str],
build_code: typing.Optional[str],
build_url: typing.Optional[str],
- job_code: typing.Optional[str],
+ disable_file_fixes: bool,
+ disable_search: bool,
+ dry_run: bool,
env_vars: typing.Dict[str, str],
+ fail_on_error: bool,
+ files_search_exclude_folders: typing.List[pathlib.Path],
+ files_search_explicitly_listed_files: typing.List[pathlib.Path],
+ files_search_root_folder: pathlib.Path,
flags: typing.List[str],
+ gcov_args: typing.Optional[str],
+ gcov_executable: typing.Optional[str],
+ gcov_ignore: typing.Optional[str],
+ gcov_include: typing.Optional[str],
+ git_service: typing.Optional[str],
+ handle_no_reports_found: bool,
+ job_code: typing.Optional[str],
name: typing.Optional[str],
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
network_root_folder: pathlib.Path,
- coverage_files_search_root_folder: pathlib.Path,
- coverage_files_search_exclude_folders: typing.List[pathlib.Path],
- coverage_files_search_explicitly_listed_files: typing.List[pathlib.Path],
- disable_search: bool,
- disable_file_fixes: bool,
- token: typing.Optional[uuid.UUID],
plugin_names: typing.List[str],
- branch: typing.Optional[str],
- slug: typing.Optional[str],
pull_request_number: typing.Optional[str],
+ report_type: str,
+ slug: typing.Optional[str],
+ swift_project: typing.Optional[str],
+ token: typing.Optional[str],
use_legacy_uploader: bool,
- fail_on_error: bool,
- dry_run: bool,
- git_service: typing.Optional[str],
- handle_no_reports_found: bool,
):
versioning_system = ctx.obj["versioning_system"]
codecov_yaml = ctx.obj["codecov_yaml"] or {}
cli_config = codecov_yaml.get("cli", {})
ci_adapter = ctx.obj.get("ci_adapter")
enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
logger.debug(
"Starting upload processing",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- report_code=report_code,
- build_code=build_code,
- build_url=build_url,
- job_code=job_code,
- env_vars=env_vars,
- flags=flags,
- name=name,
- network_root_folder=network_root_folder,
- coverage_files_search_root_folder=coverage_files_search_root_folder,
- coverage_files_search_exclude_folders=coverage_files_search_exclude_folders,
- coverage_files_search_explicitly_listed_files=coverage_files_search_explicitly_listed_files,
- plugin_names=plugin_names,
- token=token,
- branch=branch,
- slug=slug,
- pull_request_number=pull_request_number,
- git_service=git_service,
- enterprise_url=enterprise_url,
- disable_search=disable_search,
- disable_file_fixes=disable_file_fixes,
- handle_no_reports_found=handle_no_reports_found,
- )
+ extra_log_attributes=args,
),
)
do_upload_logic(
cli_config,
versioning_system,
ci_adapter,
- commit_sha=commit_sha,
- report_code=report_code,
+ branch=branch,
build_code=build_code,
build_url=build_url,
- job_code=job_code,
+ commit_sha=commit_sha,
+ disable_file_fixes=disable_file_fixes,
+ disable_search=disable_search,
+ dry_run=dry_run,
+ enterprise_url=enterprise_url,
env_vars=env_vars,
+ fail_on_error=fail_on_error,
+ files_search_exclude_folders=list(files_search_exclude_folders),
+ files_search_explicitly_listed_files=list(files_search_explicitly_listed_files),
+ files_search_root_folder=files_search_root_folder,
flags=flags,
+ gcov_args=gcov_args,
+ gcov_executable=gcov_executable,
+ gcov_ignore=gcov_ignore,
+ gcov_include=gcov_include,
+ git_service=git_service,
+ handle_no_reports_found=handle_no_reports_found,
+ job_code=job_code,
name=name,
+ network_filter=network_filter,
+ network_prefix=network_prefix,
network_root_folder=network_root_folder,
- coverage_files_search_root_folder=coverage_files_search_root_folder,
- coverage_files_search_exclude_folders=list(
- coverage_files_search_exclude_folders
- ),
- coverage_files_search_explicitly_listed_files=list(
- coverage_files_search_explicitly_listed_files
- ),
plugin_names=plugin_names,
- token=token,
- branch=branch,
- slug=slug,
pull_request_number=pull_request_number,
+ report_code=report_code,
+ slug=slug,
+ swift_project=swift_project,
+ token=token,
+ upload_file_type=report_type,
use_legacy_uploader=use_legacy_uploader,
- fail_on_error=fail_on_error,
- dry_run=dry_run,
- git_service=git_service,
- enterprise_url=enterprise_url,
- disable_search=disable_search,
- handle_no_reports_found=handle_no_reports_found,
- disable_file_fixes=disable_file_fixes,
+ args=args,
)
diff --git a/codecov_cli/commands/upload_coverage.py b/codecov_cli/commands/upload_coverage.py
new file mode 100644
index 00000000..7f22c998
--- /dev/null
+++ b/codecov_cli/commands/upload_coverage.py
@@ -0,0 +1,175 @@
+import logging
+import pathlib
+import typing
+
+import click
+
+from codecov_cli.commands.commit import create_commit
+from codecov_cli.commands.report import create_report
+from codecov_cli.commands.upload import do_upload, global_upload_options
+from codecov_cli.helpers.args import get_cli_args
+from codecov_cli.helpers.options import global_options
+from codecov_cli.services.upload_coverage import upload_coverage_logic
+from codecov_cli.types import CommandContext
+
+logger = logging.getLogger("codecovcli")
+
+
+# These options are the combined options of commit, report and upload commands
+@click.command()
+@global_options
+@global_upload_options
+@click.option(
+ "--parent-sha",
+ help="SHA (with 40 chars) of what should be the parent of this commit",
+)
+@click.pass_context
+def upload_coverage(
+ ctx: CommandContext,
+ branch: typing.Optional[str],
+ build_code: typing.Optional[str],
+ build_url: typing.Optional[str],
+ commit_sha: str,
+ disable_file_fixes: bool,
+ disable_search: bool,
+ dry_run: bool,
+ env_vars: typing.Dict[str, str],
+ fail_on_error: bool,
+ files_search_exclude_folders: typing.List[pathlib.Path],
+ files_search_explicitly_listed_files: typing.List[pathlib.Path],
+ files_search_root_folder: pathlib.Path,
+ flags: typing.List[str],
+ gcov_args: typing.Optional[str],
+ gcov_executable: typing.Optional[str],
+ gcov_ignore: typing.Optional[str],
+ gcov_include: typing.Optional[str],
+ git_service: typing.Optional[str],
+ handle_no_reports_found: bool,
+ job_code: typing.Optional[str],
+ name: typing.Optional[str],
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
+ network_root_folder: pathlib.Path,
+ parent_sha: typing.Optional[str],
+ plugin_names: typing.List[str],
+ pull_request_number: typing.Optional[str],
+ report_code: str,
+ report_type: str,
+ slug: typing.Optional[str],
+ swift_project: typing.Optional[str],
+ token: typing.Optional[str],
+ use_legacy_uploader: bool,
+):
+ args = get_cli_args(ctx)
+ logger.debug(
+ "Starting upload coverage",
+ extra=dict(
+ extra_log_attributes=args,
+ ),
+ )
+
+ if not use_legacy_uploader and report_type == "coverage":
+ versioning_system = ctx.obj["versioning_system"]
+ codecov_yaml = ctx.obj["codecov_yaml"] or {}
+ cli_config = codecov_yaml.get("cli", {})
+ ci_adapter = ctx.obj.get("ci_adapter")
+ enterprise_url = ctx.obj.get("enterprise_url")
+ args = get_cli_args(ctx)
+ ctx.invoke(
+ upload_coverage_logic,
+ cli_config,
+ versioning_system,
+ ci_adapter,
+ branch=branch,
+ build_code=build_code,
+ build_url=build_url,
+ commit_sha=commit_sha,
+ disable_file_fixes=disable_file_fixes,
+ disable_search=disable_search,
+ dry_run=dry_run,
+ enterprise_url=enterprise_url,
+ env_vars=env_vars,
+ fail_on_error=fail_on_error,
+ files_search_exclude_folders=files_search_exclude_folders,
+ files_search_explicitly_listed_files=files_search_explicitly_listed_files,
+ files_search_root_folder=files_search_root_folder,
+ flags=flags,
+ gcov_args=gcov_args,
+ gcov_executable=gcov_executable,
+ gcov_ignore=gcov_ignore,
+ gcov_include=gcov_include,
+ git_service=git_service,
+ handle_no_reports_found=handle_no_reports_found,
+ job_code=job_code,
+ name=name,
+ network_filter=network_filter,
+ network_prefix=network_prefix,
+ network_root_folder=network_root_folder,
+ parent_sha=parent_sha,
+ plugin_names=plugin_names,
+ pull_request_number=pull_request_number,
+ report_code=report_code,
+ slug=slug,
+ swift_project=swift_project,
+ token=token,
+ upload_file_type=report_type,
+ use_legacy_uploader=use_legacy_uploader,
+ args=args,
+ )
+ else:
+ ctx.invoke(
+ create_commit,
+ commit_sha=commit_sha,
+ parent_sha=parent_sha,
+ pull_request_number=pull_request_number,
+ branch=branch,
+ slug=slug,
+ token=token,
+ git_service=git_service,
+ fail_on_error=True,
+ )
+ if report_type == "coverage":
+ ctx.invoke(
+ create_report,
+ token=token,
+ code=report_code,
+ fail_on_error=True,
+ commit_sha=commit_sha,
+ slug=slug,
+ git_service=git_service,
+ )
+ ctx.invoke(
+ do_upload,
+ branch=branch,
+ build_code=build_code,
+ build_url=build_url,
+ commit_sha=commit_sha,
+ disable_file_fixes=disable_file_fixes,
+ disable_search=disable_search,
+ dry_run=dry_run,
+ env_vars=env_vars,
+ fail_on_error=fail_on_error,
+ files_search_exclude_folders=files_search_exclude_folders,
+ files_search_explicitly_listed_files=files_search_explicitly_listed_files,
+ files_search_root_folder=files_search_root_folder,
+ flags=flags,
+ gcov_args=gcov_args,
+ gcov_executable=gcov_executable,
+ gcov_ignore=gcov_ignore,
+ gcov_include=gcov_include,
+ git_service=git_service,
+ handle_no_reports_found=handle_no_reports_found,
+ job_code=job_code,
+ name=name,
+ network_filter=network_filter,
+ network_prefix=network_prefix,
+ network_root_folder=network_root_folder,
+ plugin_names=plugin_names,
+ pull_request_number=pull_request_number,
+ report_code=report_code,
+ report_type=report_type,
+ slug=slug,
+ swift_project=swift_project,
+ token=token,
+ use_legacy_uploader=use_legacy_uploader,
+ )
diff --git a/codecov_cli/commands/upload_process.py b/codecov_cli/commands/upload_process.py
index 67fd14da..1ee77eca 100644
--- a/codecov_cli/commands/upload_process.py
+++ b/codecov_cli/commands/upload_process.py
@@ -1,14 +1,15 @@
import logging
import pathlib
import typing
-import uuid
import click
from codecov_cli.commands.commit import create_commit
from codecov_cli.commands.report import create_report
from codecov_cli.commands.upload import do_upload, global_upload_options
+from codecov_cli.helpers.args import get_cli_args
from codecov_cli.helpers.options import global_options
+from codecov_cli.types import CommandContext
logger = logging.getLogger("codecovcli")
@@ -23,60 +24,46 @@
)
@click.pass_context
def upload_process(
- ctx,
- commit_sha: str,
- report_code: str,
+ ctx: CommandContext,
+ branch: typing.Optional[str],
build_code: typing.Optional[str],
build_url: typing.Optional[str],
- job_code: typing.Optional[str],
+ commit_sha: str,
+ disable_file_fixes: bool,
+ disable_search: bool,
+ dry_run: bool,
env_vars: typing.Dict[str, str],
+ fail_on_error: bool,
+ files_search_exclude_folders: typing.List[pathlib.Path],
+ files_search_explicitly_listed_files: typing.List[pathlib.Path],
+ files_search_root_folder: pathlib.Path,
flags: typing.List[str],
+ gcov_args: typing.Optional[str],
+ gcov_executable: typing.Optional[str],
+ gcov_ignore: typing.Optional[str],
+ gcov_include: typing.Optional[str],
+ git_service: typing.Optional[str],
+ handle_no_reports_found: bool,
+ job_code: typing.Optional[str],
name: typing.Optional[str],
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
network_root_folder: pathlib.Path,
- coverage_files_search_root_folder: pathlib.Path,
- coverage_files_search_exclude_folders: typing.List[pathlib.Path],
- coverage_files_search_explicitly_listed_files: typing.List[pathlib.Path],
- disable_search: bool,
- disable_file_fixes: bool,
- token: typing.Optional[uuid.UUID],
+ parent_sha: typing.Optional[str],
plugin_names: typing.List[str],
- branch: typing.Optional[str],
- slug: typing.Optional[str],
pull_request_number: typing.Optional[str],
+ report_code: str,
+ report_type: str,
+ slug: typing.Optional[str],
+ swift_project: typing.Optional[str],
+ token: typing.Optional[str],
use_legacy_uploader: bool,
- fail_on_error: bool,
- dry_run: bool,
- git_service: typing.Optional[str],
- parent_sha: typing.Optional[str],
- handle_no_reports_found: bool,
):
+ args = get_cli_args(ctx)
logger.debug(
"Starting upload process",
extra=dict(
- extra_log_attributes=dict(
- commit_sha=commit_sha,
- report_code=report_code,
- build_code=build_code,
- build_url=build_url,
- job_code=job_code,
- env_vars=env_vars,
- flags=flags,
- name=name,
- network_root_folder=network_root_folder,
- coverage_files_search_root_folder=coverage_files_search_root_folder,
- coverage_files_search_exclude_folders=coverage_files_search_exclude_folders,
- coverage_files_search_explicitly_listed_files=coverage_files_search_explicitly_listed_files,
- plugin_names=plugin_names,
- token=token,
- branch=branch,
- slug=slug,
- pull_request_number=pull_request_number,
- git_service=git_service,
- disable_search=disable_search,
- disable_file_fixes=disable_file_fixes,
- fail_on_error=fail_on_error,
- handle_no_reports_found=handle_no_reports_found,
- )
+ extra_log_attributes=args,
),
)
@@ -91,39 +78,48 @@ def upload_process(
git_service=git_service,
fail_on_error=True,
)
- ctx.invoke(
- create_report,
- token=token,
- code=report_code,
- fail_on_error=True,
- commit_sha=commit_sha,
- slug=slug,
- git_service=git_service,
- )
+ if report_type == "coverage":
+ ctx.invoke(
+ create_report,
+ token=token,
+ code=report_code,
+ fail_on_error=True,
+ commit_sha=commit_sha,
+ slug=slug,
+ git_service=git_service,
+ )
ctx.invoke(
do_upload,
- commit_sha=commit_sha,
- report_code=report_code,
+ branch=branch,
build_code=build_code,
build_url=build_url,
- job_code=job_code,
+ commit_sha=commit_sha,
+ disable_file_fixes=disable_file_fixes,
+ disable_search=disable_search,
+ dry_run=dry_run,
env_vars=env_vars,
+ fail_on_error=fail_on_error,
+ files_search_exclude_folders=files_search_exclude_folders,
+ files_search_explicitly_listed_files=files_search_explicitly_listed_files,
+ files_search_root_folder=files_search_root_folder,
flags=flags,
+ gcov_args=gcov_args,
+ gcov_executable=gcov_executable,
+ gcov_ignore=gcov_ignore,
+ gcov_include=gcov_include,
+ git_service=git_service,
+ handle_no_reports_found=handle_no_reports_found,
+ job_code=job_code,
name=name,
+ network_filter=network_filter,
+ network_prefix=network_prefix,
network_root_folder=network_root_folder,
- coverage_files_search_root_folder=coverage_files_search_root_folder,
- coverage_files_search_exclude_folders=coverage_files_search_exclude_folders,
- coverage_files_search_explicitly_listed_files=coverage_files_search_explicitly_listed_files,
- disable_search=disable_search,
- token=token,
plugin_names=plugin_names,
- branch=branch,
- slug=slug,
pull_request_number=pull_request_number,
+ report_code=report_code,
+ report_type=report_type,
+ slug=slug,
+ swift_project=swift_project,
+ token=token,
use_legacy_uploader=use_legacy_uploader,
- fail_on_error=fail_on_error,
- dry_run=dry_run,
- git_service=git_service,
- handle_no_reports_found=handle_no_reports_found,
- disable_file_fixes=disable_file_fixes,
)
diff --git a/codecov_cli/fallbacks.py b/codecov_cli/fallbacks.py
index d84c6eb6..2c8f9b33 100644
--- a/codecov_cli/fallbacks.py
+++ b/codecov_cli/fallbacks.py
@@ -5,15 +5,15 @@
class FallbackFieldEnum(Enum):
- commit_sha = auto()
- build_url = auto()
+ branch = auto()
build_code = auto()
+ build_url = auto()
+ commit_sha = auto()
+ git_service = auto()
job_code = auto()
pull_request_number = auto()
- slug = auto()
- branch = auto()
service = auto()
- git_service = auto()
+ slug = auto()
class CodecovOption(click.Option):
diff --git a/codecov_cli/helpers/args.py b/codecov_cli/helpers/args.py
new file mode 100644
index 00000000..0d797692
--- /dev/null
+++ b/codecov_cli/helpers/args.py
@@ -0,0 +1,31 @@
+import json
+import logging
+from pathlib import PosixPath
+
+import click
+
+from codecov_cli import __version__
+
+logger = logging.getLogger("codecovcli")
+
+
+def get_cli_args(ctx: click.Context):
+ args = ctx.obj["cli_args"]
+ args["command"] = str(ctx.command.name)
+ args["version"] = f"cli-{__version__}"
+ args.update(ctx.params)
+ if "token" in args:
+ del args["token"]
+
+ filtered_args = {}
+ for k in args.keys():
+ try:
+ if isinstance(args[k], PosixPath):
+ filtered_args[k] = str(args[k])
+ else:
+ json.dumps(args[k])
+ filtered_args[k] = args[k]
+ except Exception:
+ continue
+
+ return filtered_args
diff --git a/codecov_cli/helpers/ci_adapters/__init__.py b/codecov_cli/helpers/ci_adapters/__init__.py
index 460d78e6..fa142239 100644
--- a/codecov_cli/helpers/ci_adapters/__init__.py
+++ b/codecov_cli/helpers/ci_adapters/__init__.py
@@ -1,12 +1,15 @@
import logging
+from typing import Optional
from codecov_cli.helpers.ci_adapters.appveyor_ci import AppveyorCIAdapter
from codecov_cli.helpers.ci_adapters.azure_pipelines import AzurePipelinesCIAdapter
+from codecov_cli.helpers.ci_adapters.base import CIAdapterBase
from codecov_cli.helpers.ci_adapters.bitbucket_ci import BitbucketAdapter
from codecov_cli.helpers.ci_adapters.bitrise_ci import BitriseCIAdapter
from codecov_cli.helpers.ci_adapters.buildkite import BuildkiteAdapter
from codecov_cli.helpers.ci_adapters.circleci import CircleCICIAdapter
from codecov_cli.helpers.ci_adapters.cirrus_ci import CirrusCIAdapter
+from codecov_cli.helpers.ci_adapters.cloudbuild import GoogleCloudBuildAdapter
from codecov_cli.helpers.ci_adapters.codebuild import AWSCodeBuildCIAdapter
from codecov_cli.helpers.ci_adapters.droneci import DroneCIAdapter
from codecov_cli.helpers.ci_adapters.github_actions import GithubActionsCIAdapter
@@ -21,7 +24,7 @@
logger = logging.getLogger("codecovcli")
-def get_ci_adapter(provider_name: str = None):
+def get_ci_adapter(provider_name: str = None) -> Optional[CIAdapterBase]:
if provider_name:
for provider in get_ci_providers_list():
if provider.get_service_name().lower() == provider_name.lower():
@@ -54,6 +57,7 @@ def get_ci_providers_list():
TeamcityAdapter(),
TravisCIAdapter(),
AWSCodeBuildCIAdapter(),
+ GoogleCloudBuildAdapter(),
# local adapter should always be the last one
LocalAdapter(),
]
diff --git a/codecov_cli/helpers/ci_adapters/azure_pipelines.py b/codecov_cli/helpers/ci_adapters/azure_pipelines.py
index f444ecb1..4e253068 100644
--- a/codecov_cli/helpers/ci_adapters/azure_pipelines.py
+++ b/codecov_cli/helpers/ci_adapters/azure_pipelines.py
@@ -10,7 +10,9 @@ def detect(self) -> bool:
return bool(os.getenv("SYSTEM_TEAMFOUNDATIONCOLLECTIONURI"))
def _get_commit_sha(self):
- return os.getenv("BUILD_SOURCEVERSION")
+ return os.getenv("SYSTEM_PULLREQUEST_SOURCECOMMITID") or os.getenv(
+ "BUILD_SOURCEVERSION"
+ )
def _get_build_url(self):
if os.getenv("SYSTEM_TEAMPROJECT") and os.getenv("BUILD_BUILDID"):
diff --git a/codecov_cli/helpers/ci_adapters/cloudbuild.py b/codecov_cli/helpers/ci_adapters/cloudbuild.py
new file mode 100644
index 00000000..0f52a2e2
--- /dev/null
+++ b/codecov_cli/helpers/ci_adapters/cloudbuild.py
@@ -0,0 +1,70 @@
+import os
+
+from codecov_cli.helpers.ci_adapters.base import CIAdapterBase
+
+
+class GoogleCloudBuildAdapter(CIAdapterBase):
+ """
+ Google Cloud Build uses variable substitutions in the builds
+ https://cloud.google.com/build/docs/configuring-builds/substitute-variable-values
+ For these to be available as environment variables, so this adapter
+ can read the values, you have to manually map the substitution variables to
+ environment variables on the build step, like this
+ env:
+ - '_PR_NUMBER=$_PR_NUMBER'
+ - 'BRANCH_NAME=$BRANCH_NAME'
+ - 'BUILD_ID=$BUILD_ID'
+ - 'COMMIT_SHA=$COMMIT_SHA'
+ - 'LOCATION=$LOCATION'
+ - 'PROJECT_ID=$PROJECT_ID'
+ - 'PROJECT_NUMBER=$PROJECT_NUMBER'
+ - 'REF_NAME=$REF_NAME'
+ - 'REPO_FULL_NAME=$REPO_FULL_NAME'
+ - 'TRIGGER_NAME=$TRIGGER_NAME'
+ Read more about manual substitution mapping here:
+ https://cloud.google.com/build/docs/configuring-builds/substitute-variable-values#map_substitutions_manually
+ """
+
+ def detect(self) -> bool:
+ return all(
+ list(
+ map(os.getenv, ["LOCATION", "PROJECT_NUMBER", "PROJECT_ID", "BUILD_ID"])
+ )
+ )
+
+ def _get_branch(self):
+ return os.getenv("BRANCH_NAME")
+
+ def _get_build_code(self):
+ return os.getenv("BUILD_ID")
+
+ def _get_commit_sha(self):
+ return os.getenv("COMMIT_SHA")
+
+ def _get_slug(self):
+ return os.getenv("REPO_FULL_NAME")
+
+ def _get_build_url(self):
+ # to build the url, the environment variables LOCATION, PROJECT_ID and BUILD_ID are needed
+ if not all(list(map(os.getenv, ["LOCATION", "PROJECT_ID", "BUILD_ID"]))):
+ return None
+
+ location = os.getenv("LOCATION")
+ project_id = os.getenv("PROJECT_ID")
+ build_id = os.getenv("BUILD_ID")
+
+ return f"https://console.cloud.google.com/cloud-build/builds;region={location}/{build_id}?project={project_id}"
+
+ def _get_pull_request_number(self):
+ pr_num = os.getenv("_PR_NUMBER")
+ return pr_num if pr_num != "" else None
+
+ def _get_job_code(self):
+ job_code = os.getenv("TRIGGER_NAME")
+ return job_code if job_code != "" else None
+
+ def _get_service(self):
+ return "google_cloud_build"
+
+ def get_service_name(self):
+ return "GoogleCloudBuild"
diff --git a/codecov_cli/helpers/ci_adapters/codebuild.py b/codecov_cli/helpers/ci_adapters/codebuild.py
index 056773c4..5b0b259c 100644
--- a/codecov_cli/helpers/ci_adapters/codebuild.py
+++ b/codecov_cli/helpers/ci_adapters/codebuild.py
@@ -12,7 +12,7 @@ def detect(self) -> bool:
def _get_branch(self):
branch = os.getenv("CODEBUILD_WEBHOOK_HEAD_REF")
if branch:
- return re.sub("^refs\/heads\/", "", branch)
+ return re.sub(r"^refs\/heads\/", "", branch)
return None
def _get_build_code(self):
@@ -27,10 +27,10 @@ def _get_commit_sha(self):
def _get_slug(self):
slug = os.getenv("CODEBUILD_SOURCE_REPO_URL")
if slug:
- slug = re.sub("^.*github.com\/", "", slug)
- slug = re.sub("^.*gitlab.com\/", "", slug)
- slug = re.sub("^.*bitbucket.com\/", "", slug)
- return re.sub("\.git$", "", slug)
+ slug = re.sub(r"^.*github.com\/", "", slug)
+ slug = re.sub(r"^.*gitlab.com\/", "", slug)
+ slug = re.sub(r"^.*bitbucket.com\/", "", slug)
+ return re.sub(r"\.git$", "", slug)
return None
def _get_service(self):
@@ -39,7 +39,7 @@ def _get_service(self):
def _get_pull_request_number(self):
pr = os.getenv("CODEBUILD_SOURCE_VERSION")
if pr and pr.startswith("pr/"):
- return re.sub("^pr\/", "", pr)
+ return re.sub(r"^pr\/", "", pr)
return None
def _get_job_code(self):
diff --git a/codecov_cli/helpers/config.py b/codecov_cli/helpers/config.py
index 1ae2ac03..2c87ded2 100644
--- a/codecov_cli/helpers/config.py
+++ b/codecov_cli/helpers/config.py
@@ -1,20 +1,66 @@
import logging
import pathlib
+import typing as t
import yaml
+from codecov_cli.helpers.versioning_systems import get_versioning_system
+
logger = logging.getLogger("codecovcli")
CODECOV_API_URL = "https://api.codecov.io"
+CODECOV_INGEST_URL = "https://ingest.codecov.io"
LEGACY_CODECOV_API_URL = "https://codecov.io"
+# Relative to the project root
+CODECOV_YAML_RECOGNIZED_DIRECTORIES = [
+ "",
+ ".github/",
+ "dev/",
+]
+
+CODECOV_YAML_RECOGNIZED_FILENAMES = [
+ "codecov.yml",
+ "codecov.yaml",
+ ".codecov.yml",
+ ".codecov.yaml",
+]
+
+
+def _find_codecov_yamls():
+ vcs = get_versioning_system()
+ vcs_root = vcs.get_network_root() if vcs else None
+ project_root = vcs_root if vcs_root else pathlib.Path.cwd()
+
+ yamls = []
+ for directory in CODECOV_YAML_RECOGNIZED_DIRECTORIES:
+ dir_candidate = project_root / directory
+ if not dir_candidate.exists() or not dir_candidate.is_dir():
+ continue
+
+ for filename in CODECOV_YAML_RECOGNIZED_FILENAMES:
+ file_candidate = dir_candidate / filename
+ if file_candidate.exists() and file_candidate.is_file():
+ yamls.append(file_candidate)
+
+ return yamls
+
+
+def load_cli_config(codecov_yml_path: t.Optional[pathlib.Path]) -> t.Optional[dict]:
+ if not codecov_yml_path:
+ yamls = _find_codecov_yamls()
+ codecov_yml_path = yamls[0] if yamls else None
+
+ if not codecov_yml_path:
+ logger.warning("No config file could be found. Ignoring config.")
+ return None
+
+ if not codecov_yml_path.exists() or not codecov_yml_path.is_file():
+ logger.warning(
+ f"Config file {codecov_yml_path} not found, or is not a file. Ignoring config."
+ )
+ return None
-def load_cli_config(codecov_yml_path: pathlib.Path):
- if codecov_yml_path.exists() and codecov_yml_path.is_file():
- logger.debug(f"Loading config from {codecov_yml_path}")
- with open(codecov_yml_path, "r") as file_stream:
- return yaml.safe_load(file_stream.read())
- logger.warning(
- f"Config file {codecov_yml_path} not found, or is not a file. Ignoring config."
- )
- return None
+ logger.debug(f"Loading config from {codecov_yml_path}")
+ with open(codecov_yml_path, "r") as file_stream:
+ return yaml.safe_load(file_stream.read())
diff --git a/codecov_cli/helpers/encoder.py b/codecov_cli/helpers/encoder.py
index 191e3bfe..167a817e 100644
--- a/codecov_cli/helpers/encoder.py
+++ b/codecov_cli/helpers/encoder.py
@@ -2,6 +2,7 @@
slug_without_subgroups_regex = re.compile(r"[^/\s]+\/[^/\s]+$")
slug_with_subgroups_regex = re.compile(r"[^/\s]+(\/[^/\s]+)+$")
+encoded_slug_regex = re.compile(r"[^:\s]+(:::[^:\s]+)*(::::[^:\s]+){1}$")
def encode_slug(slug: str):
@@ -13,6 +14,16 @@ def encode_slug(slug: str):
return encoded_slug
+def decode_slug(slug: str):
+ if slug_encoded_incorrectly(slug):
+ raise ValueError("The slug is not encoded correctly")
+
+ owner, repo = slug.split("::::", 1)
+ decoded_owner = "/".join(owner.split(":::"))
+ decoded_slug = "/".join([decoded_owner, repo])
+ return decoded_slug
+
+
def slug_without_subgroups_is_invalid(slug: str):
"""
Checks if slug is in the form of owner/repo
@@ -27,3 +38,12 @@ def slug_with_subgroups_is_invalid(slug: str):
Returns True if it's invalid, otherwise return False
"""
return not slug or not slug_with_subgroups_regex.match(slug)
+
+
+def slug_encoded_incorrectly(slug: str):
+ """
+ Checks if slug is encoded incorrectly based on the encoding mechanism we use.
+ Checks if slug is in the form of owner:::subowner::::repo or owner::::repo
+ Returns True if invalid, otherwise returns False
+ """
+ return not slug or not encoded_slug_regex.match(slug)
diff --git a/codecov_cli/helpers/folder_searcher.py b/codecov_cli/helpers/folder_searcher.py
index 6abbd97b..cc87238a 100644
--- a/codecov_cli/helpers/folder_searcher.py
+++ b/codecov_cli/helpers/folder_searcher.py
@@ -2,13 +2,13 @@
import os
import pathlib
import re
-import typing
from fnmatch import translate
+from typing import Generator, List, Optional, Pattern
def _is_included(
- filename_include_regex: typing.Pattern,
- multipart_include_regex: typing.Optional[typing.Pattern],
+ filename_include_regex: Pattern,
+ multipart_include_regex: Optional[Pattern],
path: pathlib.Path,
):
return filename_include_regex.match(path.name) and (
@@ -18,8 +18,8 @@ def _is_included(
def _is_excluded(
- filename_exclude_regex: typing.Optional[typing.Pattern],
- multipart_exclude_regex: typing.Optional[typing.Pattern],
+ filename_exclude_regex: Optional[Pattern],
+ multipart_exclude_regex: Optional[Pattern],
path: pathlib.Path,
):
return (
@@ -31,14 +31,14 @@ def _is_excluded(
def search_files(
folder_to_search: pathlib.Path,
- folders_to_ignore: typing.List[str],
+ folders_to_ignore: List[str],
*,
- filename_include_regex: typing.Pattern,
- filename_exclude_regex: typing.Optional[typing.Pattern] = None,
- multipart_include_regex: typing.Optional[typing.Pattern] = None,
- multipart_exclude_regex: typing.Optional[typing.Pattern] = None,
- search_for_directories: bool = False
-) -> typing.Generator[pathlib.Path, None, None]:
+ filename_include_regex: Pattern,
+ filename_exclude_regex: Optional[Pattern] = None,
+ multipart_include_regex: Optional[Pattern] = None,
+ multipart_exclude_regex: Optional[Pattern] = None,
+ search_for_directories: bool = False,
+) -> Generator[pathlib.Path, None, None]:
""" "
Searches for files or directories in a given folder
@@ -58,7 +58,7 @@ def search_files(
this_is_excluded = functools.partial(
_is_excluded, filename_exclude_regex, multipart_exclude_regex
)
- for (dirpath, dirnames, filenames) in os.walk(folder_to_search):
+ for dirpath, dirnames, filenames in os.walk(folder_to_search):
dirs_to_remove = set(d for d in dirnames if d in folders_to_ignore)
if multipart_exclude_regex is not None:
@@ -85,7 +85,7 @@ def search_files(
yield file_path
-def globs_to_regex(patterns: typing.List[str]) -> typing.Optional[typing.Pattern]:
+def globs_to_regex(patterns: List[str]) -> Optional[Pattern]:
"""
Converts a list of glob patterns to a combined ORed regex
diff --git a/codecov_cli/helpers/git.py b/codecov_cli/helpers/git.py
index 1de19acd..780cfd55 100644
--- a/codecov_cli/helpers/git.py
+++ b/codecov_cli/helpers/git.py
@@ -1,8 +1,13 @@
import logging
import re
from enum import Enum
+from typing import Optional
from urllib.parse import urlparse
+from codecov_cli.helpers.encoder import decode_slug
+from codecov_cli.helpers.git_services import PullDict
+from codecov_cli.helpers.git_services.github import Github
+
slug_regex = re.compile(r"[^/\s]+\/[^/\s]+$")
logger = logging.getLogger("codecovcli")
@@ -17,6 +22,11 @@ class GitService(Enum):
BITBUCKET_SERVER = "bitbucket_server"
+def get_git_service(git):
+ if git == "github":
+ return Github()
+
+
def parse_slug(remote_repo_url: str):
"""
Extracts a slug from git remote urls. returns None if the url is invalid
@@ -51,13 +61,16 @@ def parse_git_service(remote_repo_url: str):
Possible cases we're considering:
- https://github.com/codecov/codecov-cli.git returns github
- git@github.com:codecov/codecov-cli.git returns github
+ - ssh://git@github.com/gitcodecov/codecov-cli returns github
+ - ssh://git@github.com:gitcodecov/codecov-cli returns github
- https://user-name@bitbucket.org/namespace-codecov/first_repo.git returns bitbucket
"""
services = [service.value for service in GitService]
parsed_url = urlparse(remote_repo_url)
service = None
- if remote_repo_url.startswith("https://"):
+ scheme = parsed_url.scheme
+ if scheme in ("https", "ssh"):
netloc = parsed_url.netloc
if "@" in netloc:
netloc = netloc.split("@", 1)[1]
diff --git a/codecov_cli/helpers/git_services/__init__.py b/codecov_cli/helpers/git_services/__init__.py
new file mode 100644
index 00000000..a1e580cf
--- /dev/null
+++ b/codecov_cli/helpers/git_services/__init__.py
@@ -0,0 +1,14 @@
+from typing import TypedDict
+
+
+class CommitInfo(TypedDict):
+ sha: str
+ label: str
+ ref: str
+ slug: str
+
+
+class PullDict(TypedDict):
+ url: str
+ head: CommitInfo
+ base: CommitInfo
diff --git a/codecov_cli/helpers/git_services/github.py b/codecov_cli/helpers/git_services/github.py
new file mode 100644
index 00000000..5aeb1744
--- /dev/null
+++ b/codecov_cli/helpers/git_services/github.py
@@ -0,0 +1,40 @@
+import json
+
+import requests
+
+from codecov_cli.helpers.git_services import PullDict
+
+
+class Github:
+ api_url = "https://api.github.com"
+ api_version = "2022-11-28"
+
+ def get_pull_request(self, slug, pr_number) -> PullDict:
+ pull_url = f"/repos/{slug}/pulls/{pr_number}"
+ url = self.api_url + pull_url
+ headers = {"X-GitHub-Api-Version": self.api_version}
+ response = requests.get(url, headers=headers)
+ if response.status_code == 200:
+ res = json.loads(response.text)
+ return {
+ "url": res["url"],
+ "head": {
+ "sha": res["head"]["sha"],
+ "label": res["head"]["label"],
+ "ref": res["head"]["ref"],
+ # Through empiric test data it seems that the "repo" key in "head" is set to None
+ # If the PR is from the same repo (e.g. not from a fork)
+ "slug": (
+ res["head"]["repo"]["full_name"]
+ if res["head"]["repo"]
+ else res["base"]["repo"]["full_name"]
+ ),
+ },
+ "base": {
+ "sha": res["base"]["sha"],
+ "label": res["base"]["label"],
+ "ref": res["base"]["ref"],
+ "slug": res["base"]["repo"]["full_name"],
+ },
+ }
+ return None
diff --git a/codecov_cli/helpers/options.py b/codecov_cli/helpers/options.py
index 6b9ceede..ae22c633 100644
--- a/codecov_cli/helpers/options.py
+++ b/codecov_cli/helpers/options.py
@@ -31,7 +31,6 @@
"-t",
"--token",
help="Codecov upload token",
- type=click.UUID,
envvar="CODECOV_TOKEN",
),
click.option(
diff --git a/codecov_cli/helpers/request.py b/codecov_cli/helpers/request.py
index 75222b99..27bd3be0 100644
--- a/codecov_cli/helpers/request.py
+++ b/codecov_cli/helpers/request.py
@@ -1,6 +1,8 @@
+import json
import logging
-import uuid
+from sys import exit
from time import sleep
+from typing import Optional
import click
import requests
@@ -15,7 +17,7 @@
USER_AGENT = f"codecov-cli/{__version__}"
-def _set_user_agent(headers: dict = None) -> dict:
+def _set_user_agent(headers: Optional[dict] = None) -> dict:
headers = headers or {}
headers.setdefault("User-Agent", USER_AGENT)
return headers
@@ -37,7 +39,10 @@ def put(url: str, data: dict = None, headers: dict = None) -> requests.Response:
def post(
- url: str, data: dict = None, headers: dict = None, params: dict = None
+ url: str,
+ data: Optional[dict] = None,
+ headers: Optional[dict] = None,
+ params: Optional[dict] = None,
) -> requests.Response:
headers = _set_user_agent(headers)
return requests.post(url, json=data, headers=headers, params=params)
@@ -47,16 +52,27 @@ def backoff_time(curr_retry):
return 2 ** (curr_retry - 1)
+class RetryException(Exception): ...
+
+
def retry_request(func):
def wrapper(*args, **kwargs):
retry = 0
while retry < MAX_RETRIES:
try:
- return func(*args, **kwargs)
+ response = func(*args, **kwargs)
+ if response.status_code >= 500:
+ logger.warning(
+ f"Response status code was {response.status_code}.",
+ extra=dict(extra_log_attributes=dict(retry=retry)),
+ )
+ raise RetryException
+ return response
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
- ) as exp:
+ RetryException,
+ ):
logger.warning(
"Request failed. Retrying",
extra=dict(extra_log_attributes=dict(retry=retry)),
@@ -70,19 +86,39 @@ def wrapper(*args, **kwargs):
@retry_request
def send_post_request(
- url: str, data: dict = None, headers: dict = None, params: dict = None
+ url: str,
+ data: Optional[dict] = None,
+ headers: Optional[dict] = None,
+ params: Optional[dict] = None,
):
return request_result(post(url=url, data=data, headers=headers, params=params))
-def get_token_header_or_fail(token: uuid.UUID) -> dict:
+@retry_request
+def send_get_request(
+ url: str, headers: dict = None, params: dict = None
+) -> RequestResult:
+ return request_result(get(url=url, headers=headers, params=params))
+
+
+def get_token_header_or_fail(token: Optional[str]) -> dict:
+ """
+ Rejects requests with no Authorization token. Prevents tokenless uploads.
+ """
if token is None:
raise click.ClickException(
"Codecov token not found. Please provide Codecov token with -t flag."
)
- if not isinstance(token, uuid.UUID):
- raise click.ClickException(f"Token must be UUID. Received {type(token)}")
- return {"Authorization": f"token {token.hex}"}
+ return {"Authorization": f"token {token}"}
+
+
+def get_token_header(token: Optional[str]) -> Optional[dict]:
+ """
+ Allows requests with no Authorization token.
+ """
+ if token is None:
+ return None
+ return {"Authorization": f"token {token}"}
@retry_request
@@ -94,7 +130,7 @@ def send_put_request(
return request_result(put(url=url, data=data, headers=headers))
-def request_result(resp):
+def request_result(resp: requests.Response) -> RequestResult:
if resp.status_code >= 400:
return RequestResult(
status_code=resp.status_code,
@@ -120,7 +156,9 @@ def log_warnings_and_errors_if_any(
)
logger.debug(
f"{process_desc} result",
- extra=dict(extra_log_attributes=dict(result=sending_result)),
+ extra=dict(
+ extra_log_attributes=dict(result=_sanitize_request_result(sending_result))
+ ),
)
if sending_result.warnings:
number_warnings = len(sending_result.warnings)
@@ -134,3 +172,27 @@ def log_warnings_and_errors_if_any(
logger.error(f"{process_desc} failed: {sending_result.error.description}")
if fail_on_error:
exit(1)
+
+
+def _sanitize_request_result(result: RequestResult):
+ if not hasattr(result, "text"):
+ return result
+
+ try:
+ text_as_dict = json.loads(result.text)
+ token = text_as_dict.get("repository").get("yaml").get("codecov").get("token")
+ if token:
+ sanitized_token = str(token)[:1] + 18 * "*"
+ text_as_dict["repository"]["yaml"]["codecov"]["token"] = sanitized_token
+ sanitized_text = json.dumps(text_as_dict)
+
+ return RequestResult(
+ status_code=result.status_code,
+ error=result.error,
+ warnings=result.warnings,
+ text=sanitized_text,
+ )
+ except (AttributeError, json.JSONDecodeError):
+ pass
+
+ return result
diff --git a/codecov_cli/helpers/versioning_systems.py b/codecov_cli/helpers/versioning_systems.py
index 9de78419..143f8c88 100644
--- a/codecov_cli/helpers/versioning_systems.py
+++ b/codecov_cli/helpers/versioning_systems.py
@@ -1,6 +1,6 @@
import logging
import subprocess
-import typing
+import typing as t
from pathlib import Path
from shutil import which
@@ -14,21 +14,17 @@ class VersioningSystemInterface(object):
def __repr__(self) -> str:
return str(type(self))
- def get_fallback_value(
- self, fallback_field: FallbackFieldEnum
- ) -> typing.Optional[str]:
+ def get_fallback_value(self, fallback_field: FallbackFieldEnum) -> t.Optional[str]:
pass
- def get_network_root(self) -> typing.Optional[Path]:
+ def get_network_root(self) -> t.Optional[Path]:
pass
- def list_relevant_files(
- self, directory: typing.Optional[Path] = None
- ) -> typing.List[str]:
+ def list_relevant_files(self, directory: t.Optional[Path] = None) -> t.List[str]:
pass
-def get_versioning_system() -> VersioningSystemInterface:
+def get_versioning_system() -> t.Optional[VersioningSystemInterface]:
for klass in [GitVersioningSystem, NoVersioningSystem]:
if klass.is_available():
logger.debug(f"versioning system found: {klass}")
@@ -38,10 +34,30 @@ def get_versioning_system() -> VersioningSystemInterface:
class GitVersioningSystem(VersioningSystemInterface):
@classmethod
def is_available(cls):
- return which("git") is not None
+ if which("git") is not None:
+ p = subprocess.run(
+ ["git", "rev-parse", "--show-toplevel"], capture_output=True
+ )
+ if p.stdout:
+ return True
+ return False
def get_fallback_value(self, fallback_field: FallbackFieldEnum):
if fallback_field == FallbackFieldEnum.commit_sha:
+ # here we will get the commit SHA of the latest commit
+ # that is NOT a merge commit
+ p = subprocess.run(
+ # List current commit parent's SHA
+ ["git", "rev-parse", "HEAD^@"],
+ capture_output=True,
+ )
+ parents_hash = p.stdout.decode().strip().splitlines()
+ if len(parents_hash) == 2:
+ # IFF the current commit is a merge commit it will have 2 parents
+ # We return the 2nd one - The commit that came from the branch merged into ours
+ return parents_hash[1]
+ # At this point we know the current commit is not a merge commit
+ # so we get it's SHA and return that
p = subprocess.run(["git", "log", "-1", "--format=%H"], capture_output=True)
if p.stdout:
return p.stdout.decode().strip()
@@ -56,7 +72,7 @@ def get_fallback_value(self, fallback_field: FallbackFieldEnum):
return branch_name if branch_name != "HEAD" else None
if fallback_field == FallbackFieldEnum.slug:
- # if there are multiple remotes, we will prioritize using the one called 'origin' if it exsits, else we will use the first one in 'git remote' list
+ # if there are multiple remotes, we will prioritize using the one called 'origin' if it exists, else we will use the first one in 'git remote' list
p = subprocess.run(["git", "remote"], capture_output=True)
@@ -78,7 +94,7 @@ def get_fallback_value(self, fallback_field: FallbackFieldEnum):
return parse_slug(remote_url)
if fallback_field == FallbackFieldEnum.git_service:
- # if there are multiple remotes, we will prioritize using the one called 'origin' if it exsits, else we will use the first one in 'git remote' list
+ # if there are multiple remotes, we will prioritize using the one called 'origin' if it exists, else we will use the first one in 'git remote' list
p = subprocess.run(["git", "remote"], capture_output=True)
if not p.stdout:
@@ -103,9 +119,7 @@ def get_network_root(self):
return Path(p.stdout.decode().rstrip())
return None
- def list_relevant_files(
- self, root_folder: typing.Optional[Path] = None
- ) -> typing.List[str]:
+ def list_relevant_files(self, root_folder: t.Optional[Path] = None) -> t.List[str]:
dir_to_use = root_folder or self.get_network_root()
if dir_to_use is None:
raise ValueError("Can't determine root folder")
@@ -115,10 +129,12 @@ def list_relevant_files(
)
return [
- filename[1:-1]
- if filename.startswith('"') and filename.endswith('"')
- else filename
- for filename in res.stdout.decode("unicode_escape").strip().split()
+ (
+ filename[1:-1]
+ if filename.startswith('"') and filename.endswith('"')
+ else filename
+ )
+ for filename in res.stdout.decode("unicode_escape").strip().split("\n")
]
diff --git a/codecov_cli/main.py b/codecov_cli/main.py
index a1113a01..0640fad8 100644
--- a/codecov_cli/main.py
+++ b/codecov_cli/main.py
@@ -11,10 +11,12 @@
from codecov_cli.commands.empty_upload import empty_upload
from codecov_cli.commands.get_report_results import get_report_results
from codecov_cli.commands.labelanalysis import label_analysis
+from codecov_cli.commands.process_test_results import process_test_results
from codecov_cli.commands.report import create_report
from codecov_cli.commands.send_notifications import send_notifications
from codecov_cli.commands.staticanalysis import static_analysis
from codecov_cli.commands.upload import do_upload
+from codecov_cli.commands.upload_coverage import upload_coverage
from codecov_cli.commands.upload_process import upload_process
from codecov_cli.helpers.ci_adapters import get_ci_adapter, get_ci_providers_list
from codecov_cli.helpers.config import load_cli_config
@@ -35,7 +37,7 @@
@click.option(
"--codecov-yml-path",
type=click.Path(path_type=pathlib.Path),
- default=pathlib.Path("codecov.yml"),
+ default=None,
)
@click.option(
"--enterprise-url", "--url", "-u", help="Change the upload host (Enterprise use)"
@@ -50,6 +52,8 @@ def cli(
enterprise_url: str,
verbose: bool = False,
):
+ ctx.obj["cli_args"] = ctx.params
+ ctx.obj["cli_args"]["version"] = f"cli-{__version__}"
configure_logger(logger, log_level=(logging.DEBUG if verbose else logging.INFO))
ctx.help_option_names = ["-h", "--help"]
ctx.obj["ci_adapter"] = get_ci_adapter(auto_load_params_from)
@@ -57,6 +61,8 @@ def cli(
ctx.obj["codecov_yaml"] = load_cli_config(codecov_yml_path)
if ctx.obj["codecov_yaml"] is None:
logger.debug("No codecov_yaml found")
+ elif (token := ctx.obj["codecov_yaml"].get("codecov", {}).get("token")) is not None:
+ ctx.default_map = {ctx.invoked_subcommand: {"token": token}}
ctx.obj["enterprise_url"] = enterprise_url
@@ -69,8 +75,10 @@ def cli(
cli.add_command(label_analysis)
cli.add_command(static_analysis)
cli.add_command(empty_upload)
+cli.add_command(upload_coverage)
cli.add_command(upload_process)
cli.add_command(send_notifications)
+cli.add_command(process_test_results)
def run():
diff --git a/codecov_cli/plugins/__init__.py b/codecov_cli/plugins/__init__.py
index 0f900529..db7a8a4d 100644
--- a/codecov_cli/plugins/__init__.py
+++ b/codecov_cli/plugins/__init__.py
@@ -17,12 +17,17 @@ def run_preparation(self, collector):
pass
-def select_preparation_plugins(cli_config: typing.Dict, plugin_names: typing.List[str]):
- plugins = [_get_plugin(cli_config, p) for p in plugin_names]
+def select_preparation_plugins(
+ cli_config: typing.Dict, plugin_names: typing.List[str], plugin_config: typing.Dict
+):
+ plugins = [_get_plugin(cli_config, p, plugin_config) for p in plugin_names]
logger.debug(
"Selected preparation plugins",
extra=dict(
- extra_log_attributes=dict(selected_plugins=list(map(type, plugins)))
+ extra_log_attributes=dict(
+ selected_plugins=list(map(type, plugins)),
+ cli_config=cli_config,
+ )
),
)
return plugins
@@ -59,14 +64,25 @@ def _load_plugin_from_yaml(plugin_dict: typing.Dict):
return NoopPlugin()
-def _get_plugin(cli_config, plugin_name):
+def _get_plugin(cli_config, plugin_name, plugin_config):
+ if plugin_name == "noop":
+ return NoopPlugin()
if plugin_name == "gcov":
- return GcovPlugin()
+ return GcovPlugin(
+ plugin_config.get("project_root", None),
+ plugin_config.get("folders_to_ignore", None),
+ plugin_config.get("gcov_executable", "gcov"),
+ plugin_config.get("gcov_include", None),
+ plugin_config.get("gcov_ignore", None),
+ plugin_config.get("gcov_args", None),
+ )
if plugin_name == "pycoverage":
config = cli_config.get("plugins", {}).get("pycoverage", {})
return Pycoverage(config)
if plugin_name == "xcode":
- return XcodePlugin()
+ return XcodePlugin(
+ plugin_config.get("swift_project", None),
+ )
if plugin_name == "compress-pycoverage":
config = cli_config.get("plugins", {}).get("compress-pycoverage", {})
return CompressPycoverageContexts(config)
diff --git a/codecov_cli/plugins/compress_pycoverage_contexts.py b/codecov_cli/plugins/compress_pycoverage_contexts.py
index 5455602f..ab9f8b36 100644
--- a/codecov_cli/plugins/compress_pycoverage_contexts.py
+++ b/codecov_cli/plugins/compress_pycoverage_contexts.py
@@ -5,7 +5,6 @@
from typing import Any, List
import ijson
-from smart_open import open
from codecov_cli.plugins.types import PreparationPluginReturn
diff --git a/codecov_cli/plugins/gcov.py b/codecov_cli/plugins/gcov.py
index d807ab6b..668095b2 100644
--- a/codecov_cli/plugins/gcov.py
+++ b/codecov_cli/plugins/gcov.py
@@ -15,24 +15,26 @@ class GcovPlugin(object):
def __init__(
self,
project_root: typing.Optional[pathlib.Path] = None,
+ folders_to_ignore: typing.Optional[typing.List[str]] = None,
+ executable: typing.Optional[str] = "gcov",
patterns_to_include: typing.Optional[typing.List[str]] = None,
patterns_to_ignore: typing.Optional[typing.List[str]] = None,
- folders_to_ignore: typing.Optional[typing.List[str]] = None,
extra_arguments: typing.Optional[typing.List[str]] = None,
):
- self.project_root = project_root or pathlib.Path(os.getcwd())
- self.patterns_to_include = patterns_to_include or []
- self.patterns_to_ignore = patterns_to_ignore or []
- self.folders_to_ignore = folders_to_ignore or []
+ self.executable = executable or "gcov"
self.extra_arguments = extra_arguments or []
+ self.folders_to_ignore = folders_to_ignore or []
+ self.patterns_to_ignore = patterns_to_ignore or []
+ self.patterns_to_include = patterns_to_include or []
+ self.project_root = project_root or pathlib.Path(os.getcwd())
def run_preparation(self, collector) -> PreparationPluginReturn:
logger.debug(
- "Running gcov plugin...",
+ f"Running {self.executable} plugin...",
)
- if shutil.which("gcov") is None:
- logger.warning("gcov is not installed or can't be found.")
+ if shutil.which(self.executable) is None:
+ logger.warning(f"{self.executable} is not installed or can't be found.")
return
filename_include_regex = globs_to_regex(["*.gcno", *self.patterns_to_include])
@@ -49,15 +51,15 @@ def run_preparation(self, collector) -> PreparationPluginReturn:
]
if not matched_paths:
- logger.warning("No gcov data found.")
+ logger.warning(f"No {self.executable} data found.")
return
- logger.warning("Running gcov on the following list of files:")
+ logger.warning(f"Running {self.executable} on the following list of files:")
for path in matched_paths:
logger.warning(path)
s = subprocess.run(
- ["gcov", "-pb", *self.extra_arguments, *matched_paths],
+ [self.executable, "-pb", *self.extra_arguments, *matched_paths],
cwd=self.project_root,
capture_output=True,
)
diff --git a/codecov_cli/plugins/pycoverage.py b/codecov_cli/plugins/pycoverage.py
index 6aba1575..99bbf96b 100644
--- a/codecov_cli/plugins/pycoverage.py
+++ b/codecov_cli/plugins/pycoverage.py
@@ -54,7 +54,6 @@ def __init__(self, config: dict):
self.config = PycoverageConfig(config)
def run_preparation(self, collector) -> PreparationPluginReturn:
-
if shutil.which("coverage") is None:
logger.warning("coverage.py is not installed or can't be found.")
return
diff --git a/codecov_cli/plugins/xcode.py b/codecov_cli/plugins/xcode.py
index a0dae751..d8e4d0db 100644
--- a/codecov_cli/plugins/xcode.py
+++ b/codecov_cli/plugins/xcode.py
@@ -16,12 +16,13 @@
class XcodePlugin(object):
def __init__(
self,
+ app_name: typing.Optional[str] = None,
derived_data_folder: typing.Optional[pathlib.Path] = None,
- app_name: typing.Optional[pathlib.Path] = None,
):
- self.derived_data_folder = pathlib.Path(
- derived_data_folder or "~/Library/Developer/Xcode/DerivedData"
- ).expanduser()
+ self.derived_data_folder = (
+ derived_data_folder
+ or pathlib.Path("~/Library/Developer/Xcode/DerivedData").expanduser()
+ )
# this is to speed up processing and to build reports for the project being tested,
# if empty the plugin will build reports for every xcode project it finds
diff --git a/codecov_cli/runners/__init__.py b/codecov_cli/runners/__init__.py
index aab02935..2c452c8e 100644
--- a/codecov_cli/runners/__init__.py
+++ b/codecov_cli/runners/__init__.py
@@ -15,7 +15,9 @@ class UnableToFindRunner(Exception):
pass
-def _load_runner_from_yaml(plugin_dict: typing.Dict) -> LabelAnalysisRunnerInterface:
+def _load_runner_from_yaml(
+ plugin_dict: typing.Dict, dynamic_params: typing.Dict
+) -> LabelAnalysisRunnerInterface:
try:
module_obj = import_module(plugin_dict["module"])
class_obj = getattr(module_obj, plugin_dict["class"])
@@ -32,16 +34,21 @@ def _load_runner_from_yaml(plugin_dict: typing.Dict) -> LabelAnalysisRunnerInter
)
raise
try:
- return class_obj(**plugin_dict["params"])
+ final_params = {**plugin_dict["params"], **dynamic_params}
+ return class_obj(**final_params)
except TypeError:
click.secho(
- f"Unable to instantiate {class_obj} with parameters {plugin_dict['params']}",
+ f"Unable to instantiate {class_obj} with parameters {final_params}",
err=True,
)
raise
-def get_runner(cli_config, runner_name) -> LabelAnalysisRunnerInterface:
+def get_runner(
+ cli_config, runner_name: str, dynamic_params: typing.Dict = None
+) -> LabelAnalysisRunnerInterface:
+ if dynamic_params is None:
+ dynamic_params = {}
if runner_name == "pytest":
config_params = cli_config.get("runners", {}).get("pytest", {})
# This is for backwards compatibility with versions <= 0.3.4
@@ -52,10 +59,12 @@ def get_runner(cli_config, runner_name) -> LabelAnalysisRunnerInterface:
logger.warning(
"Using 'python' to configure the PytestStandardRunner is deprecated. Please change to 'pytest'"
)
- return PytestStandardRunner(config_params)
+ final_params = {**config_params, **dynamic_params}
+ return PytestStandardRunner(final_params)
elif runner_name == "dan":
config_params = cli_config.get("runners", {}).get("dan", {})
- return DoAnythingNowRunner(config_params)
+ final_params = {**config_params, **dynamic_params}
+ return DoAnythingNowRunner(final_params)
logger.debug(
f"Trying to load runner {runner_name}",
extra=dict(
@@ -65,5 +74,7 @@ def get_runner(cli_config, runner_name) -> LabelAnalysisRunnerInterface:
),
)
if cli_config and runner_name in cli_config.get("runners", {}):
- return _load_runner_from_yaml(cli_config["runners"][runner_name])
+ return _load_runner_from_yaml(
+ cli_config["runners"][runner_name], dynamic_params=dynamic_params
+ )
raise UnableToFindRunner(f"Can't find runner {runner_name}")
diff --git a/codecov_cli/runners/dan_runner.py b/codecov_cli/runners/dan_runner.py
index 556c4931..1fee9af5 100644
--- a/codecov_cli/runners/dan_runner.py
+++ b/codecov_cli/runners/dan_runner.py
@@ -54,7 +54,7 @@ def process_labelanalysis_result(self, result: LabelAnalysisRequestResult):
"DAN runner missing 'process_labelanalysis_result_command' configuration value"
)
command_list = []
- if type(command) == list:
+ if isinstance(command, list):
command_list.extend(command)
else:
command_list.append(command)
diff --git a/codecov_cli/runners/pytest_standard_runner.py b/codecov_cli/runners/pytest_standard_runner.py
index 29a13010..fd040429 100644
--- a/codecov_cli/runners/pytest_standard_runner.py
+++ b/codecov_cli/runners/pytest_standard_runner.py
@@ -1,3 +1,4 @@
+import inspect
import logging
import random
import subprocess
@@ -16,6 +17,11 @@
class PytestStandardRunnerConfigParams(dict):
+ @property
+ def python_path(self) -> str:
+ python_path = self.get("python_path")
+ return python_path or "python"
+
@property
def collect_tests_options(self) -> List[str]:
return self.get("collect_tests_options", [])
@@ -37,17 +43,41 @@ def coverage_root(self) -> str:
"""
return self.get("coverage_root", "./")
+ @classmethod
+ def get_available_params(cls) -> List[str]:
+ """Lists all the @property attribute names of this class.
+ These attributes are considered the 'valid config options'
+ """
+ klass_methods = [
+ x
+ for x in dir(cls)
+ if (inspect.isdatadescriptor(getattr(cls, x)) and not x.startswith("__"))
+ ]
+ return klass_methods
-class PytestStandardRunner(LabelAnalysisRunnerInterface):
+class PytestStandardRunner(LabelAnalysisRunnerInterface):
dry_run_runner_options = ["--cov-context=test"]
+ params: PytestStandardRunnerConfigParams
def __init__(self, config_params: Optional[dict] = None) -> None:
super().__init__()
if config_params is None:
config_params = {}
+ # Before we create the config params we emit warnings if any param is unknown
+ # So the user knows something is wrong with their config
+ self._possibly_warn_bad_config(config_params)
self.params = PytestStandardRunnerConfigParams(config_params)
+ def _possibly_warn_bad_config(self, config_params: dict):
+ available_config_params = (
+ PytestStandardRunnerConfigParams.get_available_params()
+ )
+ provided_config_params = config_params.keys()
+ for provided_param in provided_config_params:
+ if provided_param not in available_config_params:
+ logger.warning(f"Config parameter '{provided_param}' is unknonw.")
+
def parse_captured_output_error(self, exp: CalledProcessError) -> str:
result = ""
for out_stream in [exp.stdout, exp.stderr]:
@@ -62,7 +92,7 @@ def _execute_pytest(self, pytest_args: List[str], capture_output: bool = True):
Raises Exception if pytest fails
Returns the complete pytest output
"""
- command = ["python", "-m", "pytest"] + pytest_args
+ command = [self.params.python_path, "-m", "pytest"] + pytest_args
try:
result = subprocess.run(
command,
@@ -92,7 +122,10 @@ def collect_tests(self):
logger.info(
"Collecting tests",
extra=dict(
- extra_log_attributes=dict(pytest_options=options_to_use),
+ extra_log_attributes=dict(
+ pytest_command=[self.params.python_path, "-m", "pytest"],
+ pytest_options=options_to_use,
+ ),
),
)
diff --git a/codecov_cli/services/commit/__init__.py b/codecov_cli/services/commit/__init__.py
index b07117eb..a872e1d1 100644
--- a/codecov_cli/services/commit/__init__.py
+++ b/codecov_cli/services/commit/__init__.py
@@ -1,11 +1,11 @@
import logging
+import os
import typing
-import uuid
-from codecov_cli.helpers.config import CODECOV_API_URL
+from codecov_cli.helpers.config import CODECOV_INGEST_URL
from codecov_cli.helpers.encoder import encode_slug
from codecov_cli.helpers.request import (
- get_token_header_or_fail,
+ get_token_header,
log_warnings_and_errors_if_any,
send_post_request,
)
@@ -19,10 +19,11 @@ def create_commit_logic(
pr: typing.Optional[str],
branch: typing.Optional[str],
slug: typing.Optional[str],
- token: uuid.UUID,
+ token: typing.Optional[str],
service: typing.Optional[str],
enterprise_url: typing.Optional[str] = None,
fail_on_error: bool = False,
+ args: dict = None,
):
encoded_slug = encode_slug(slug)
sending_result = send_commit_data(
@@ -34,6 +35,7 @@ def create_commit_logic(
token=token,
service=service,
enterprise_url=enterprise_url,
+ args=args,
)
log_warnings_and_errors_if_any(sending_result, "Commit creating", fail_on_error)
@@ -41,15 +43,44 @@ def create_commit_logic(
def send_commit_data(
- commit_sha, parent_sha, pr, branch, slug, token, service, enterprise_url
+ commit_sha,
+ parent_sha,
+ pr,
+ branch,
+ slug,
+ token,
+ service,
+ enterprise_url,
+ args,
):
+ # Old versions of the GHA use this env var instead of the regular branch
+ # argument to provide an unprotected branch name
+ if tokenless := os.environ.get("TOKENLESS"):
+ branch = tokenless
+
+ if branch and ":" in branch:
+ logger.info(f"Creating a commit for an unprotected branch: {branch}")
+ elif token is None:
+ logger.warning(
+ f"Branch `{branch}` is protected but no token was provided\nFor information on Codecov upload tokens, see https://docs.codecov.com/docs/codecov-tokens"
+ )
+ else:
+ logger.info("Using token to create a commit for protected branch `{branch}`")
+
+ headers = get_token_header(token)
+
data = {
+ "branch": branch,
+ "cli_args": args,
"commitid": commit_sha,
"parent_commit_id": parent_sha,
"pullid": pr,
- "branch": branch,
}
- headers = get_token_header_or_fail(token)
- upload_url = enterprise_url or CODECOV_API_URL
+
+ upload_url = enterprise_url or CODECOV_INGEST_URL
url = f"{upload_url}/upload/{service}/{slug}/commits"
- return send_post_request(url=url, data=data, headers=headers)
+ return send_post_request(
+ url=url,
+ data=data,
+ headers=headers,
+ )
diff --git a/codecov_cli/services/commit/base_picking.py b/codecov_cli/services/commit/base_picking.py
index 811b2d7b..7332a462 100644
--- a/codecov_cli/services/commit/base_picking.py
+++ b/codecov_cli/services/commit/base_picking.py
@@ -2,7 +2,7 @@
from codecov_cli.helpers.config import CODECOV_API_URL
from codecov_cli.helpers.request import (
- get_token_header_or_fail,
+ get_token_header,
log_warnings_and_errors_if_any,
send_put_request,
)
@@ -10,11 +10,12 @@
logger = logging.getLogger("codecovcli")
-def base_picking_logic(base_sha, pr, slug, token, service, enterprise_url):
+def base_picking_logic(base_sha, pr, slug, token, service, enterprise_url, args):
data = {
+ "cli_args": args,
"user_provided_base_sha": base_sha,
}
- headers = get_token_header_or_fail(token)
+ headers = get_token_header(token)
upload_url = enterprise_url or CODECOV_API_URL
url = f"{upload_url}/api/v1/{service}/{slug}/pulls/{pr}"
sending_result = send_put_request(url=url, data=data, headers=headers)
diff --git a/codecov_cli/services/empty_upload/__init__.py b/codecov_cli/services/empty_upload/__init__.py
index 2e3ce68d..587bb756 100644
--- a/codecov_cli/services/empty_upload/__init__.py
+++ b/codecov_cli/services/empty_upload/__init__.py
@@ -4,7 +4,7 @@
from codecov_cli.helpers.config import CODECOV_API_URL
from codecov_cli.helpers.encoder import encode_slug
from codecov_cli.helpers.request import (
- get_token_header_or_fail,
+ get_token_header,
log_warnings_and_errors_if_any,
send_post_request,
)
@@ -13,13 +13,27 @@
def empty_upload_logic(
- commit_sha, slug, token, git_service, enterprise_url, fail_on_error
+ commit_sha,
+ slug,
+ token,
+ git_service,
+ enterprise_url,
+ fail_on_error,
+ should_force,
+ args,
):
encoded_slug = encode_slug(slug)
- headers = get_token_header_or_fail(token)
+ headers = get_token_header(token)
upload_url = enterprise_url or CODECOV_API_URL
url = f"{upload_url}/upload/{git_service}/{encoded_slug}/commits/{commit_sha}/empty-upload"
- sending_result = send_post_request(url=url, headers=headers)
+ sending_result = send_post_request(
+ url=url,
+ headers=headers,
+ data={
+ "cli_args": args,
+ "should_force": should_force,
+ },
+ )
log_warnings_and_errors_if_any(sending_result, "Empty Upload", fail_on_error)
if sending_result.status_code == 200:
response_json = json.loads(sending_result.text)
diff --git a/codecov_cli/services/report/__init__.py b/codecov_cli/services/report/__init__.py
index 4d57d66d..da2b5127 100644
--- a/codecov_cli/services/report/__init__.py
+++ b/codecov_cli/services/report/__init__.py
@@ -2,15 +2,14 @@
import logging
import time
import typing
-import uuid
import requests
from codecov_cli.helpers import request
-from codecov_cli.helpers.config import CODECOV_API_URL
+from codecov_cli.helpers.config import CODECOV_API_URL, CODECOV_INGEST_URL
from codecov_cli.helpers.encoder import encode_slug
from codecov_cli.helpers.request import (
- get_token_header_or_fail,
+ get_token_header,
log_warnings_and_errors_if_any,
request_result,
send_post_request,
@@ -25,24 +24,43 @@ def create_report_logic(
code: str,
slug: str,
service: str,
- token: uuid.UUID,
+ token: typing.Optional[str],
enterprise_url: str,
+ pull_request_number: int,
fail_on_error: bool = False,
+ args: typing.Union[dict, None] = None,
):
encoded_slug = encode_slug(slug)
sending_result = send_create_report_request(
- commit_sha, code, service, token, encoded_slug, enterprise_url
+ commit_sha,
+ code,
+ service,
+ token,
+ encoded_slug,
+ enterprise_url,
+ pull_request_number,
+ args,
)
log_warnings_and_errors_if_any(sending_result, "Report creating", fail_on_error)
return sending_result
def send_create_report_request(
- commit_sha, code, service, token, encoded_slug, enterprise_url
+ commit_sha,
+ code,
+ service,
+ token,
+ encoded_slug,
+ enterprise_url,
+ pull_request_number,
+ args,
):
- data = {"code": code}
- headers = get_token_header_or_fail(token)
- upload_url = enterprise_url or CODECOV_API_URL
+ data = {
+ "cli_args": args,
+ "code": code,
+ }
+ headers = get_token_header(token)
+ upload_url = enterprise_url or CODECOV_INGEST_URL
url = f"{upload_url}/upload/{service}/{encoded_slug}/commits/{commit_sha}/reports"
return send_post_request(url=url, headers=headers, data=data)
@@ -52,9 +70,10 @@ def create_report_results_logic(
code: str,
slug: str,
service: str,
- token: uuid.UUID,
+ token: typing.Optional[str],
enterprise_url: str,
fail_on_error: bool = False,
+ args: typing.Union[dict, None] = None,
):
encoded_slug = encode_slug(slug)
sending_result = send_reports_result_request(
@@ -64,6 +83,7 @@ def create_report_results_logic(
service=service,
token=token,
enterprise_url=enterprise_url,
+ args=args,
)
log_warnings_and_errors_if_any(
@@ -73,12 +93,21 @@ def create_report_results_logic(
def send_reports_result_request(
- commit_sha, report_code, encoded_slug, service, token, enterprise_url
+ commit_sha,
+ report_code,
+ encoded_slug,
+ service,
+ token,
+ enterprise_url,
+ args,
):
- headers = get_token_header_or_fail(token)
+ data = {
+ "cli_args": args,
+ }
+ headers = get_token_header(token)
upload_url = enterprise_url or CODECOV_API_URL
url = f"{upload_url}/upload/{service}/{encoded_slug}/commits/{commit_sha}/reports/{report_code}/results"
- return send_post_request(url=url, headers=headers)
+ return send_post_request(url=url, data=data, headers=headers)
def send_reports_result_get_request(
@@ -90,7 +119,7 @@ def send_reports_result_get_request(
enterprise_url,
fail_on_error=False,
):
- headers = get_token_header_or_fail(token)
+ headers = get_token_header(token)
upload_url = enterprise_url or CODECOV_API_URL
url = f"{upload_url}/upload/{service}/{encoded_slug}/commits/{commit_sha}/reports/{report_code}/results"
number_tries = 0
diff --git a/codecov_cli/services/staticanalysis/__init__.py b/codecov_cli/services/staticanalysis/__init__.py
index 55a651fd..3cde4313 100644
--- a/codecov_cli/services/staticanalysis/__init__.py
+++ b/codecov_cli/services/staticanalysis/__init__.py
@@ -3,7 +3,7 @@
import logging
import typing
from functools import partial
-from multiprocessing import get_context
+from multiprocessing import Pool
from pathlib import Path
import click
@@ -33,6 +33,7 @@ async def run_analysis_entrypoint(
should_force: bool,
folders_to_exclude: typing.List[Path],
enterprise_url: typing.Optional[str],
+ args: dict,
):
ff = select_file_finder(config)
files = list(ff.find_files(folder, pattern, folders_to_exclude))
@@ -110,7 +111,7 @@ async def run_analysis_entrypoint(
failed_uploads = []
with click.progressbar(
length=len(files_that_need_upload),
- label=f"Upload info to storage",
+ label="Upload info to storage",
) as bar:
# It's better to have less files competing over CPU time when uploading
# Especially if we might have large files
@@ -187,7 +188,9 @@ async def process_files(
length=len(files_to_analyze),
label="Analyzing files",
) as bar:
- with get_context("fork").Pool(processes=numberprocesses) as pool:
+ # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
+ # from the link above, we want to use the default start methods
+ with Pool(processes=numberprocesses) as pool:
file_results = pool.imap_unordered(mapped_func, files_to_analyze)
for result in file_results:
bar.update(1, result)
diff --git a/codecov_cli/services/staticanalysis/analyzers/general.py b/codecov_cli/services/staticanalysis/analyzers/general.py
index b093fc7b..c0554f73 100644
--- a/codecov_cli/services/staticanalysis/analyzers/general.py
+++ b/codecov_cli/services/staticanalysis/analyzers/general.py
@@ -85,13 +85,13 @@ def _get_parent_chain(self, node):
def get_import_lines(self, root_node, imports_query):
import_lines = set()
- for (a, _) in imports_query.captures(root_node):
+ for a, _ in imports_query.captures(root_node):
import_lines.add((a.start_point[0] + 1, a.end_point[0] - a.start_point[0]))
return import_lines
def get_definition_lines(self, root_node, definitions_query):
definition_lines = set()
- for (a, _) in definitions_query.captures(root_node):
+ for a, _ in definitions_query.captures(root_node):
definition_lines.add(
(a.start_point[0] + 1, a.end_point[0] - a.start_point[0])
)
diff --git a/codecov_cli/services/staticanalysis/analyzers/python/__init__.py b/codecov_cli/services/staticanalysis/analyzers/python/__init__.py
index e535698b..d5e6db0c 100644
--- a/codecov_cli/services/staticanalysis/analyzers/python/__init__.py
+++ b/codecov_cli/services/staticanalysis/analyzers/python/__init__.py
@@ -44,7 +44,6 @@
class PythonAnalyzer(BaseAnalyzer):
-
condition_statements = [
"if_statement",
"while_statement",
diff --git a/codecov_cli/services/upload/__init__.py b/codecov_cli/services/upload/__init__.py
index 959d6ed9..003f84bc 100644
--- a/codecov_cli/services/upload/__init__.py
+++ b/codecov_cli/services/upload/__init__.py
@@ -1,6 +1,5 @@
import logging
import typing
-import uuid
from pathlib import Path
import click
@@ -10,7 +9,7 @@
from codecov_cli.helpers.request import log_warnings_and_errors_if_any
from codecov_cli.helpers.versioning_systems import VersioningSystemInterface
from codecov_cli.plugins import select_preparation_plugins
-from codecov_cli.services.upload.coverage_file_finder import select_coverage_file_finder
+from codecov_cli.services.upload.file_finder import select_file_finder
from codecov_cli.services.upload.legacy_upload_sender import LegacyUploadSender
from codecov_cli.services.upload.network_finder import select_network_finder
from codecov_cli.services.upload.upload_collector import UploadCollector
@@ -25,46 +24,81 @@ def do_upload_logic(
cli_config: typing.Dict,
versioning_system: VersioningSystemInterface,
ci_adapter: CIAdapterBase,
+ upload_coverage: bool = False,
*,
- commit_sha: str,
- report_code: str,
+ args: dict = None,
+ branch: typing.Optional[str],
build_code: typing.Optional[str],
build_url: typing.Optional[str],
- job_code: typing.Optional[str],
+ commit_sha: str,
+ disable_file_fixes: bool = False,
+ disable_search: bool = False,
+ dry_run: bool = False,
+ enterprise_url: typing.Optional[str],
env_vars: typing.Dict[str, str],
+ fail_on_error: bool = False,
+ files_search_exclude_folders: typing.List[Path],
+ files_search_explicitly_listed_files: typing.List[Path],
+ files_search_root_folder: Path,
flags: typing.List[str],
+ gcov_args: typing.Optional[str],
+ gcov_executable: typing.Optional[str],
+ gcov_ignore: typing.Optional[str],
+ gcov_include: typing.Optional[str],
+ git_service: typing.Optional[str],
+ handle_no_reports_found: bool = False,
+ job_code: typing.Optional[str],
name: typing.Optional[str],
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
network_root_folder: Path,
- coverage_files_search_root_folder: Path,
- coverage_files_search_exclude_folders: typing.List[Path],
- coverage_files_search_explicitly_listed_files: typing.List[Path],
+ parent_sha: typing.Optional[str] = None,
plugin_names: typing.List[str],
- token: uuid.UUID,
- branch: typing.Optional[str],
- slug: typing.Optional[str],
pull_request_number: typing.Optional[str],
+ report_code: str,
+ slug: typing.Optional[str],
+ swift_project: typing.Optional[str],
+ token: typing.Optional[str],
+ upload_file_type: str = "coverage",
use_legacy_uploader: bool = False,
- fail_on_error: bool = False,
- dry_run: bool = False,
- git_service: typing.Optional[str],
- enterprise_url: typing.Optional[str],
- disable_search: bool = False,
- handle_no_reports_found: bool = False,
- disable_file_fixes: bool = False,
):
- preparation_plugins = select_preparation_plugins(cli_config, plugin_names)
- coverage_file_selector = select_coverage_file_finder(
- coverage_files_search_root_folder,
- coverage_files_search_exclude_folders,
- coverage_files_search_explicitly_listed_files,
+ plugin_config = {
+ "folders_to_ignore": files_search_exclude_folders,
+ "gcov_args": gcov_args,
+ "gcov_executable": gcov_executable,
+ "gcov_ignore": gcov_ignore,
+ "gcov_include": gcov_include,
+ "project_root": files_search_root_folder,
+ "swift_project": swift_project,
+ }
+ if upload_file_type == "coverage":
+ preparation_plugins = select_preparation_plugins(
+ cli_config, plugin_names, plugin_config
+ )
+ elif upload_file_type == "test_results":
+ preparation_plugins = []
+ file_selector = select_file_finder(
+ files_search_root_folder,
+ files_search_exclude_folders,
+ files_search_explicitly_listed_files,
disable_search,
+ upload_file_type,
+ )
+ network_finder = select_network_finder(
+ versioning_system,
+ network_filter=network_filter,
+ network_prefix=network_prefix,
+ network_root_folder=network_root_folder,
)
- network_finder = select_network_finder(versioning_system)
collector = UploadCollector(
- preparation_plugins, network_finder, coverage_file_selector, disable_file_fixes
+ preparation_plugins,
+ network_finder,
+ file_selector,
+ disable_file_fixes,
+ plugin_config,
)
try:
- upload_data = collector.generate_upload_data()
+ upload_data = collector.generate_upload_data(upload_file_type)
except click.ClickException as exp:
if handle_no_reports_found:
logger.info(
@@ -104,6 +138,7 @@ def do_upload_logic(
token,
env_vars,
report_code,
+ upload_file_type,
name,
branch,
slug,
@@ -115,6 +150,9 @@ def do_upload_logic(
ci_service,
git_service,
enterprise_url,
+ parent_sha,
+ upload_coverage,
+ args,
)
else:
logger.info("dry-run option activated. NOT sending data to Codecov.")
diff --git a/codecov_cli/services/upload/coverage_file_finder.py b/codecov_cli/services/upload/file_finder.py
similarity index 64%
rename from codecov_cli/services/upload/coverage_file_finder.py
rename to codecov_cli/services/upload/file_finder.py
index 555df796..f03745df 100644
--- a/codecov_cli/services/upload/coverage_file_finder.py
+++ b/codecov_cli/services/upload/file_finder.py
@@ -1,7 +1,7 @@
import logging
import os
-import typing
from pathlib import Path
+from typing import Iterable, List, Optional, Pattern
from codecov_cli.helpers.folder_searcher import globs_to_regex, search_files
from codecov_cli.types import UploadCollectionResultFile
@@ -35,6 +35,12 @@
"test_cov.xml",
]
+test_results_files_patterns = [
+ "*junit*.xml",
+ "*test*.xml",
+ # the actual JUnit (Java) prefixes the tests with "TEST-"
+ "*TEST-*.xml",
+]
coverage_files_excluded_patterns = [
"*.am",
@@ -108,6 +114,8 @@
"*.whl",
"*.xcconfig",
"*.xcoverage.*",
+ "*.yml",
+ "*.yaml",
"*/classycle/report.xml",
"*codecov.yml",
"*~",
@@ -134,6 +142,10 @@
"*.zip",
]
+test_results_files_excluded_patterns = (
+ coverage_files_patterns + coverage_files_excluded_patterns
+)
+
default_folders_to_ignore = [
"vendor",
@@ -170,49 +182,53 @@
]
-class CoverageFileFinder(object):
+class FileFinder(object):
def __init__(
self,
- project_root: Path = None,
- folders_to_ignore: typing.List[str] = None,
- explicitly_listed_files: typing.List[Path] = None,
+ search_root: Optional[Path] = None,
+ folders_to_ignore: Optional[List[str]] = None,
+ explicitly_listed_files: Optional[List[Path]] = None,
disable_search: bool = False,
+ report_type: str = "coverage",
):
- self.project_root = project_root or Path(os.getcwd())
+ self.search_root = search_root or Path(os.getcwd())
self.folders_to_ignore = folders_to_ignore or []
self.explicitly_listed_files = explicitly_listed_files or None
self.disable_search = disable_search
+ self.report_type = report_type
- def find_coverage_files(self) -> typing.List[UploadCollectionResultFile]:
- regex_patterns_to_exclude = globs_to_regex(coverage_files_excluded_patterns)
- coverage_files_paths = []
- user_coverage_files_paths = []
+ def find_files(self) -> List[UploadCollectionResultFile]:
+ if self.report_type == "coverage":
+ files_excluded_patterns = coverage_files_excluded_patterns
+ files_patterns = coverage_files_patterns
+ elif self.report_type == "test_results":
+ files_excluded_patterns = test_results_files_excluded_patterns
+ files_patterns = test_results_files_patterns
+ regex_patterns_to_exclude = globs_to_regex(files_excluded_patterns)
+ assert regex_patterns_to_exclude # this is never `None`
+ files_paths: Iterable[Path] = []
+ user_files_paths = []
if self.explicitly_listed_files:
- user_coverage_files_paths = self.get_user_specified_coverage_files(
- regex_patterns_to_exclude
- )
+ user_files_paths = self.get_user_specified_files(regex_patterns_to_exclude)
if not self.disable_search:
- regex_patterns_to_include = globs_to_regex(coverage_files_patterns)
- coverage_files_paths = search_files(
- self.project_root,
+ regex_patterns_to_include = globs_to_regex(files_patterns)
+ assert regex_patterns_to_include # this is never `None`
+ files_paths = search_files(
+ self.search_root,
default_folders_to_ignore + self.folders_to_ignore,
filename_include_regex=regex_patterns_to_include,
filename_exclude_regex=regex_patterns_to_exclude,
)
- result_files = [
- UploadCollectionResultFile(path)
- for path in coverage_files_paths
- if coverage_files_paths
- ]
+ result_files = [UploadCollectionResultFile(path) for path in files_paths]
user_result_files = [
UploadCollectionResultFile(path)
- for path in user_coverage_files_paths
- if user_coverage_files_paths
+ for path in user_files_paths
+ if user_files_paths
]
return list(set(result_files + user_result_files))
- def get_user_specified_coverage_files(self, regex_patterns_to_exclude):
+ def get_user_specified_files(self, regex_patterns_to_exclude: Pattern):
user_filenames_to_include = []
files_excluded_but_user_includes = []
for file in self.explicitly_listed_files:
@@ -221,7 +237,7 @@ def get_user_specified_coverage_files(self, regex_patterns_to_exclude):
files_excluded_but_user_includes.append(str(file))
if files_excluded_but_user_includes:
logger.warning(
- "Some files being explicitly added are found in the list of excluded files for upload.",
+ "Some files being explicitly added are found in the list of excluded files for upload. We are still going to search for the explicitly added files.",
extra=dict(
extra_log_attributes=dict(files=files_excluded_but_user_includes)
),
@@ -230,19 +246,23 @@ def get_user_specified_coverage_files(self, regex_patterns_to_exclude):
multipart_include_regex = globs_to_regex(
[str(path.resolve()) for path in self.explicitly_listed_files]
)
- user_coverage_files_paths = list(
+ user_files_paths = list(
search_files(
- self.project_root,
- default_folders_to_ignore + self.folders_to_ignore,
+ self.search_root,
+ self.folders_to_ignore,
filename_include_regex=regex_patterns_to_include,
- filename_exclude_regex=regex_patterns_to_exclude,
multipart_include_regex=multipart_include_regex,
)
)
not_found_files = []
+ user_files_paths_resolved = [path.resolve() for path in user_files_paths]
for filepath in self.explicitly_listed_files:
- if filepath.resolve() not in user_coverage_files_paths:
- not_found_files.append(filepath)
+ if filepath.resolve() not in user_files_paths_resolved:
+ ## The file given might be linked or in a parent dir, check to see if it exists
+ if filepath.exists():
+ user_files_paths.append(filepath)
+ else:
+ not_found_files.append(filepath)
if not_found_files:
logger.warning(
@@ -250,15 +270,20 @@ def get_user_specified_coverage_files(self, regex_patterns_to_exclude):
extra=dict(extra_log_attributes=dict(not_found_files=not_found_files)),
)
- return user_coverage_files_paths
+ return user_files_paths
-def select_coverage_file_finder(
- root_folder_to_search, folders_to_ignore, explicitly_listed_files, disable_search
+def select_file_finder(
+ root_folder_to_search,
+ folders_to_ignore,
+ explicitly_listed_files,
+ disable_search,
+ report_type="coverage",
):
- return CoverageFileFinder(
+ return FileFinder(
root_folder_to_search,
folders_to_ignore,
explicitly_listed_files,
disable_search,
+ report_type,
)
diff --git a/codecov_cli/services/upload/legacy_upload_sender.py b/codecov_cli/services/upload/legacy_upload_sender.py
index 99ff6429..67711ef3 100644
--- a/codecov_cli/services/upload/legacy_upload_sender.py
+++ b/codecov_cli/services/upload/legacy_upload_sender.py
@@ -1,10 +1,7 @@
import logging
import typing
-import uuid
from dataclasses import dataclass
-import requests
-
from codecov_cli import __version__ as codecov_cli_version
from codecov_cli.helpers.config import LEGACY_CODECOV_API_URL
from codecov_cli.helpers.request import send_post_request, send_put_request
@@ -39,9 +36,10 @@ def send_upload_data(
self,
upload_data: UploadCollectionResult,
commit_sha: str,
- token: uuid.UUID,
+ token: str,
env_vars: typing.Dict[str, str],
report_code: str = None,
+ upload_file_type: str = None,
name: typing.Optional[str] = None,
branch: typing.Optional[str] = None,
slug: typing.Optional[str] = None,
@@ -53,8 +51,8 @@ def send_upload_data(
ci_service: typing.Optional[str] = None,
git_service: typing.Optional[str] = None,
enterprise_url: typing.Optional[str] = None,
+ args: dict = None,
) -> UploadSendingResult:
-
params = {
"package": f"codecov-cli/{codecov_cli_version}",
"commit": commit_sha,
@@ -70,14 +68,18 @@ def send_upload_data(
}
if token:
- headers = {"X-Upload-Token": token.hex}
+ headers = {"X-Upload-Token": token}
else:
logger.warning("Token is empty.")
headers = {"X-Upload-Token": ""}
+ data = {
+ "cli_args": args,
+ }
+
upload_url = enterprise_url or LEGACY_CODECOV_API_URL
resp = send_post_request(
- f"{upload_url}/upload/v4", headers=headers, params=params
+ f"{upload_url}/upload/v4", data=data, headers=headers, params=params
)
if resp.status_code >= 400:
return resp
@@ -119,9 +121,7 @@ def _generate_network_section(self, upload_data: UploadCollectionResult) -> byte
return network_files_section.encode() + b"<<<<<< network\n"
def _generate_coverage_files_section(self, upload_data: UploadCollectionResult):
- return b"".join(
- self._format_coverage_file(file) for file in upload_data.coverage_files
- )
+ return b"".join(self._format_coverage_file(file) for file in upload_data.files)
def _format_coverage_file(self, file: UploadCollectionResultFile) -> bytes:
header = b"# path=" + file.get_filename() + b"\n"
diff --git a/codecov_cli/services/upload/network_finder.py b/codecov_cli/services/upload/network_finder.py
index 3ccfb463..8da568eb 100644
--- a/codecov_cli/services/upload/network_finder.py
+++ b/codecov_cli/services/upload/network_finder.py
@@ -5,17 +5,39 @@
class NetworkFinder(object):
- def __init__(self, versioning_system: VersioningSystemInterface):
+ def __init__(
+ self,
+ versioning_system: VersioningSystemInterface,
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
+ network_root_folder: pathlib.Path,
+ ):
self.versioning_system = versioning_system
+ self.network_filter = network_filter
+ self.network_prefix = network_prefix
+ self.network_root_folder = network_root_folder
- def find_files(
- self,
- network_root: typing.Optional[pathlib.Path] = None,
- network_filter=None,
- network_adjuster=None,
- ) -> typing.List[str]:
- return self.versioning_system.list_relevant_files(network_root)
+ def find_files(self, ignore_filters=False) -> typing.List[str]:
+ files = self.versioning_system.list_relevant_files(self.network_root_folder)
+
+ if not ignore_filters:
+ if self.network_filter:
+ files = [file for file in files if file.startswith(self.network_filter)]
+ if self.network_prefix:
+ files = [self.network_prefix + file for file in files]
+
+ return files
-def select_network_finder(versioning_system: VersioningSystemInterface):
- return NetworkFinder(versioning_system)
+def select_network_finder(
+ versioning_system: VersioningSystemInterface,
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
+ network_root_folder: pathlib.Path,
+):
+ return NetworkFinder(
+ versioning_system,
+ network_filter,
+ network_prefix,
+ network_root_folder,
+ )
diff --git a/codecov_cli/services/upload/upload_collector.py b/codecov_cli/services/upload/upload_collector.py
index d30e036e..5d0626af 100644
--- a/codecov_cli/services/upload/upload_collector.py
+++ b/codecov_cli/services/upload/upload_collector.py
@@ -8,7 +8,7 @@
import click
-from codecov_cli.services.upload.coverage_file_finder import CoverageFileFinder
+from codecov_cli.services.upload.file_finder import FileFinder
from codecov_cli.services.upload.network_finder import NetworkFinder
from codecov_cli.types import (
PreparationPluginInterface,
@@ -28,24 +28,27 @@ def __init__(
self,
preparation_plugins: typing.List[PreparationPluginInterface],
network_finder: NetworkFinder,
- coverage_file_finder: CoverageFileFinder,
+ file_finder: FileFinder,
+ plugin_config: dict,
disable_file_fixes: bool = False,
):
self.preparation_plugins = preparation_plugins
self.network_finder = network_finder
- self.coverage_file_finder = coverage_file_finder
+ self.file_finder = file_finder
self.disable_file_fixes = disable_file_fixes
+ self.plugin_config = plugin_config
- def _produce_file_fixes_for_network(
- self, network: typing.List[str]
+ def _produce_file_fixes(
+ self, files: typing.List[str]
) -> typing.List[UploadCollectionResultFileFixer]:
- if not network or self.disable_file_fixes:
+ if not files or self.disable_file_fixes:
return []
# patterns that we don't need to specify a reason for
empty_line_regex = re.compile(r"^\s*$")
comment_regex = re.compile(r"^\s*\/\/.*$")
bracket_regex = re.compile(r"^\s*[\{\}]\s*(\/\/.*)?$")
list_regex = re.compile(r"^\s*[\]\[]\s*(\/\/.*)?$")
+ parenthesis_regex = re.compile(r"^\s*[\(\)]\s*(\/\/.*)?$")
go_function_regex = re.compile(r"^\s*func\s*[\{]\s*(\/\/.*)?$")
php_end_bracket_regex = re.compile(r"^\s*\);\s*(\/\/.*)?$")
@@ -54,7 +57,7 @@ def _produce_file_fixes_for_network(
lcov_excel_regex = re.compile(r"\/\/ LCOV_EXCL")
kt_patterns_to_apply = fix_patterns_to_apply(
- [bracket_regex], [comment_block_regex], True
+ [bracket_regex, parenthesis_regex], [comment_block_regex], True
)
go_patterns_to_apply = fix_patterns_to_apply(
[empty_line_regex, comment_regex, bracket_regex, go_function_regex],
@@ -71,7 +74,6 @@ def _produce_file_fixes_for_network(
[],
False,
)
-
cpp_swift_vala_patterns_to_apply = fix_patterns_to_apply(
[empty_line_regex, bracket_regex],
[lcov_excel_regex],
@@ -94,7 +96,7 @@ def _produce_file_fixes_for_network(
}
result = []
- for filename in network:
+ for filename in files:
for glob, fix_patterns in file_regex_patterns.items():
if fnmatch(filename, glob):
result.append(self._get_file_fixes(filename, fix_patterns))
@@ -112,6 +114,12 @@ def _get_file_fixes(
try:
with open(filename, "r") as f:
+ # If lineno is unset that means that the
+ # file is empty thus the eof should be 0
+ # so lineno will be set to -1 here
+ lineno = -1
+ # overwrite lineno in this for loop
+ # if f is empty, lineno stays at -1
for lineno, line_content in enumerate(f):
if any(
pattern.match(line_content)
@@ -123,7 +131,6 @@ def _get_file_fixes(
for pattern in fix_patterns_to_apply.without_reason
):
fixed_lines_without_reason.add(lineno + 1)
-
if fix_patterns_to_apply.eof:
eof = lineno + 1
except UnicodeDecodeError as err:
@@ -134,30 +141,40 @@ def _get_file_fixes(
reason=err.reason,
),
)
+ except IsADirectoryError:
+ logger.info(f"Skipping {filename}, found a directory not a file")
return UploadCollectionResultFileFixer(
path, fixed_lines_without_reason, fixed_lines_with_reason, eof
)
- def generate_upload_data(self) -> UploadCollectionResult:
+ def generate_upload_data(self, report_type="coverage") -> UploadCollectionResult:
for prep in self.preparation_plugins:
logger.debug(f"Running preparation plugin: {type(prep)}")
prep.run_preparation(self)
logger.debug("Collecting relevant files")
network = self.network_finder.find_files()
- coverage_files = self.coverage_file_finder.find_coverage_files()
- logger.info(f"Found {len(coverage_files)} coverage files to upload")
- if not coverage_files:
+ report_files = self.file_finder.find_files()
+ logger.info(f"Found {len(report_files)} {report_type} files to report")
+ if not report_files:
+ if report_type == "test_results":
+ error_message = "No JUnit XML reports found. Please review our documentation (https://docs.codecov.com/docs/test-result-ingestion-beta) to generate and upload the file."
+ else:
+ error_message = "No coverage reports found. Please make sure you're generating reports successfully."
raise click.ClickException(
click.style(
- "No coverage reports found. Please make sure you're generating reports successfully.",
+ error_message,
fg="red",
)
)
- for file in coverage_files:
+ for file in report_files:
logger.info(f"> {file}")
return UploadCollectionResult(
network=network,
- coverage_files=coverage_files,
- file_fixes=self._produce_file_fixes_for_network(network),
+ files=report_files,
+ file_fixes=(
+ self._produce_file_fixes(self.network_finder.find_files(True))
+ if report_type == "coverage"
+ else []
+ ),
)
diff --git a/codecov_cli/services/upload/upload_sender.py b/codecov_cli/services/upload/upload_sender.py
index af9dc7b5..6619401b 100644
--- a/codecov_cli/services/upload/upload_sender.py
+++ b/codecov_cli/services/upload/upload_sender.py
@@ -2,15 +2,14 @@
import json
import logging
import typing
-import uuid
import zlib
from typing import Any, Dict
from codecov_cli import __version__ as codecov_cli_version
-from codecov_cli.helpers.config import CODECOV_API_URL
+from codecov_cli.helpers.config import CODECOV_INGEST_URL
from codecov_cli.helpers.encoder import encode_slug
from codecov_cli.helpers.request import (
- get_token_header_or_fail,
+ get_token_header,
send_post_request,
send_put_request,
)
@@ -28,9 +27,10 @@ def send_upload_data(
self,
upload_data: UploadCollectionResult,
commit_sha: str,
- token: uuid.UUID,
+ token: typing.Optional[str],
env_vars: typing.Dict[str, str],
report_code: str,
+ upload_file_type: str = "coverage",
name: typing.Optional[str] = None,
branch: typing.Optional[str] = None,
slug: typing.Optional[str] = None,
@@ -42,23 +42,44 @@ def send_upload_data(
ci_service: typing.Optional[str] = None,
git_service: typing.Optional[str] = None,
enterprise_url: typing.Optional[str] = None,
+ parent_sha: typing.Optional[str] = None,
+ upload_coverage: bool = False,
+ args: dict = None,
) -> RequestResult:
data = {
+ "ci_service": ci_service,
"ci_url": build_url,
- "flags": flags,
+ "cli_args": args,
"env": env_vars,
- "name": name,
+ "flags": flags,
"job_code": job_code,
+ "name": name,
"version": codecov_cli_version,
}
-
- # Data to upload to Codecov
- headers = get_token_header_or_fail(token)
+ if upload_coverage:
+ data["branch"] = branch
+ data["code"] = report_code
+ data["commitid"] = commit_sha
+ data["parent_commit_id"] = parent_sha
+ data["pullid"] = pull_request_number
+ headers = get_token_header(token)
encoded_slug = encode_slug(slug)
- upload_url = enterprise_url or CODECOV_API_URL
- url = f"{upload_url}/upload/{git_service}/{encoded_slug}/commits/{commit_sha}/reports/{report_code}/uploads"
+ upload_url = enterprise_url or CODECOV_INGEST_URL
+ url, data = self.get_url_and_possibly_update_data(
+ data,
+ upload_file_type,
+ upload_url,
+ git_service,
+ branch,
+ encoded_slug,
+ commit_sha,
+ report_code,
+ upload_coverage,
+ )
# Data that goes to storage
- reports_payload = self._generate_payload(upload_data, env_vars)
+ reports_payload = self._generate_payload(
+ upload_data, env_vars, upload_file_type
+ )
logger.debug("Sending upload request to Codecov")
resp_from_codecov = send_post_request(
@@ -83,18 +104,26 @@ def send_upload_data(
return resp_from_storage
def _generate_payload(
- self, upload_data: UploadCollectionResult, env_vars: typing.Dict[str, str]
+ self,
+ upload_data: UploadCollectionResult,
+ env_vars: typing.Dict[str, str],
+ upload_file_type="coverage",
) -> bytes:
network_files = upload_data.network
- payload = {
- "path_fixes": {
- "format": "legacy",
- "value": self._get_file_fixers(upload_data),
- },
- "network_files": network_files if network_files is not None else [],
- "coverage_files": self._get_coverage_files(upload_data),
- "metadata": {},
- }
+ if upload_file_type == "coverage":
+ payload = {
+ "report_fixes": {
+ "format": "legacy",
+ "value": self._get_file_fixers(upload_data),
+ },
+ "network_files": network_files if network_files is not None else [],
+ "coverage_files": self._get_files(upload_data),
+ "metadata": {},
+ }
+ elif upload_file_type == "test_results":
+ payload = {
+ "test_results_files": self._get_files(upload_data),
+ }
json_data = json.dumps(payload)
return json_data.encode()
@@ -127,10 +156,10 @@ def _get_file_fixers(
return file_fixers
- def _get_coverage_files(self, upload_data: UploadCollectionResult):
- return [self._format_coverage_file(file) for file in upload_data.coverage_files]
+ def _get_files(self, upload_data: UploadCollectionResult):
+ return [self._format_file(file) for file in upload_data.files]
- def _format_coverage_file(self, file: UploadCollectionResultFile):
+ def _format_file(self, file: UploadCollectionResultFile):
format, formatted_content = self._get_format_info(file)
return {
"filename": file.get_filename().decode(),
@@ -145,3 +174,30 @@ def _get_format_info(self, file: UploadCollectionResultFile):
base64.b64encode(zlib.compress((file.get_content())))
).decode()
return format, formatted_content
+
+ def get_url_and_possibly_update_data(
+ self,
+ data,
+ report_type,
+ upload_url,
+ git_service,
+ branch,
+ encoded_slug,
+ commit_sha,
+ report_code,
+ upload_coverage=False,
+ ):
+ if report_type == "coverage":
+ base_url = f"{upload_url}/upload/{git_service}/{encoded_slug}"
+ if upload_coverage:
+ url = f"{base_url}/upload-coverage"
+ else:
+ url = f"{base_url}/commits/{commit_sha}/reports/{report_code}/uploads"
+ elif report_type == "test_results":
+ data["slug"] = encoded_slug
+ data["branch"] = branch
+ data["commit"] = commit_sha
+ data["service"] = git_service
+ url = f"{upload_url}/upload/test_results/v1"
+
+ return url, data
diff --git a/codecov_cli/services/upload_completion/__init__.py b/codecov_cli/services/upload_completion/__init__.py
index 9f7e2707..b595ba7f 100644
--- a/codecov_cli/services/upload_completion/__init__.py
+++ b/codecov_cli/services/upload_completion/__init__.py
@@ -4,7 +4,7 @@
from codecov_cli.helpers.config import CODECOV_API_URL
from codecov_cli.helpers.encoder import encode_slug
from codecov_cli.helpers.request import (
- get_token_header_or_fail,
+ get_token_header,
log_warnings_and_errors_if_any,
send_post_request,
)
@@ -13,13 +13,22 @@
def upload_completion_logic(
- commit_sha, slug, token, git_service, enterprise_url, fail_on_error=False
+ commit_sha,
+ slug,
+ token,
+ git_service,
+ enterprise_url,
+ fail_on_error=False,
+ args=None,
):
encoded_slug = encode_slug(slug)
- headers = get_token_header_or_fail(token)
+ headers = get_token_header(token)
upload_url = enterprise_url or CODECOV_API_URL
url = f"{upload_url}/upload/{git_service}/{encoded_slug}/commits/{commit_sha}/upload-complete"
- sending_result = send_post_request(url=url, headers=headers)
+ data = {
+ "cli_args": args,
+ }
+ sending_result = send_post_request(url=url, data=data, headers=headers)
log_warnings_and_errors_if_any(
sending_result, "Upload Completion", fail_on_error=fail_on_error
)
diff --git a/codecov_cli/services/upload_coverage/__init__.py b/codecov_cli/services/upload_coverage/__init__.py
new file mode 100644
index 00000000..9f53a554
--- /dev/null
+++ b/codecov_cli/services/upload_coverage/__init__.py
@@ -0,0 +1,90 @@
+import pathlib
+import typing
+
+from codecov_cli.helpers.ci_adapters.base import CIAdapterBase
+from codecov_cli.helpers.versioning_systems import VersioningSystemInterface
+from codecov_cli.services.upload import do_upload_logic
+
+
+def upload_coverage_logic(
+ cli_config: typing.Dict,
+ versioning_system: VersioningSystemInterface,
+ ci_adapter: CIAdapterBase,
+ *,
+ branch: typing.Optional[str],
+ build_code: typing.Optional[str],
+ build_url: typing.Optional[str],
+ commit_sha: str,
+ disable_file_fixes: bool,
+ disable_search: bool,
+ dry_run: bool,
+ enterprise_url: typing.Optional[str],
+ env_vars: typing.Dict[str, str],
+ fail_on_error: bool,
+ files_search_exclude_folders: typing.List[pathlib.Path],
+ files_search_explicitly_listed_files: typing.List[pathlib.Path],
+ files_search_root_folder: pathlib.Path,
+ flags: typing.List[str],
+ gcov_args: typing.Optional[str],
+ gcov_executable: typing.Optional[str],
+ gcov_ignore: typing.Optional[str],
+ gcov_include: typing.Optional[str],
+ git_service: typing.Optional[str],
+ handle_no_reports_found: bool,
+ job_code: typing.Optional[str],
+ name: typing.Optional[str],
+ network_filter: typing.Optional[str],
+ network_prefix: typing.Optional[str],
+ network_root_folder: pathlib.Path,
+ parent_sha: typing.Optional[str],
+ plugin_names: typing.List[str],
+ pull_request_number: typing.Optional[str],
+ report_code: str,
+ slug: typing.Optional[str],
+ swift_project: typing.Optional[str],
+ token: typing.Optional[str],
+ use_legacy_uploader: bool,
+ upload_file_type: str = "coverage",
+ args: dict = None,
+):
+ return do_upload_logic(
+ cli_config=cli_config,
+ versioning_system=versioning_system,
+ ci_adapter=ci_adapter,
+ upload_coverage=True,
+ args=args,
+ branch=branch,
+ build_code=build_code,
+ build_url=build_url,
+ commit_sha=commit_sha,
+ disable_file_fixes=disable_file_fixes,
+ disable_search=disable_search,
+ dry_run=dry_run,
+ enterprise_url=enterprise_url,
+ env_vars=env_vars,
+ fail_on_error=fail_on_error,
+ files_search_exclude_folders=files_search_exclude_folders,
+ files_search_explicitly_listed_files=files_search_explicitly_listed_files,
+ files_search_root_folder=files_search_root_folder,
+ flags=flags,
+ gcov_args=gcov_args,
+ gcov_executable=gcov_executable,
+ gcov_ignore=gcov_ignore,
+ gcov_include=gcov_include,
+ git_service=git_service,
+ handle_no_reports_found=handle_no_reports_found,
+ job_code=job_code,
+ name=name,
+ network_filter=network_filter,
+ network_prefix=network_prefix,
+ network_root_folder=network_root_folder,
+ parent_sha=parent_sha,
+ plugin_names=plugin_names,
+ pull_request_number=pull_request_number,
+ report_code=report_code,
+ slug=slug,
+ swift_project=swift_project,
+ token=token,
+ use_legacy_uploader=use_legacy_uploader,
+ upload_file_type=upload_file_type,
+ )
diff --git a/codecov_cli/types.py b/codecov_cli/types.py
index 95f9f759..4050c47e 100644
--- a/codecov_cli/types.py
+++ b/codecov_cli/types.py
@@ -1,7 +1,23 @@
import pathlib
-import typing
+import typing as t
from dataclasses import dataclass
+import click
+
+from codecov_cli.helpers.ci_adapters.base import CIAdapterBase
+from codecov_cli.helpers.versioning_systems import VersioningSystemInterface
+
+
+class ContextObject(t.TypedDict):
+ ci_adapter: t.Optional[CIAdapterBase]
+ versioning_system: t.Optional[VersioningSystemInterface]
+ codecov_yaml: t.Optional[dict]
+ enterprise_url: t.Optional[str]
+
+
+class CommandContext(click.Context):
+ obj: ContextObject
+
class UploadCollectionResultFile(object):
def __init__(self, path: pathlib.Path):
@@ -31,17 +47,17 @@ def __hash__(self) -> int:
class UploadCollectionResultFileFixer(object):
__slots__ = ["path", "fixed_lines_without_reason", "fixed_lines_with_reason", "eof"]
path: pathlib.Path
- fixed_lines_without_reason: typing.Set[int]
- fixed_lines_with_reason: typing.Optional[typing.Set[typing.Tuple[int, str]]]
- eof: typing.Optional[int]
+ fixed_lines_without_reason: t.Set[int]
+ fixed_lines_with_reason: t.Optional[t.Set[t.Tuple[int, str]]]
+ eof: t.Optional[int]
@dataclass
class UploadCollectionResult(object):
- __slots__ = ["network", "coverage_files", "file_fixes"]
- network: typing.List[str]
- coverage_files: typing.List[UploadCollectionResultFile]
- file_fixes: typing.List[UploadCollectionResultFileFixer]
+ __slots__ = ["network", "files", "file_fixes"]
+ network: t.List[str]
+ files: t.List[UploadCollectionResultFile]
+ file_fixes: t.List[UploadCollectionResultFileFixer]
class PreparationPluginInterface(object):
@@ -59,14 +75,14 @@ class RequestResultWarning(object):
class RequestError(object):
__slots__ = ("code", "params", "description")
code: str
- params: typing.Dict
+ params: t.Dict
description: str
@dataclass
class RequestResult(object):
__slots__ = ("error", "warnings", "status_code", "text")
- error: typing.Optional[RequestError]
- warnings: typing.List[RequestResultWarning]
+ error: t.Optional[RequestError]
+ warnings: t.List[RequestResultWarning]
status_code: int
text: str
diff --git a/requirements.txt b/requirements.txt
index e98a0464..f779500c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
#
-# This file is autogenerated by pip-compile with Python 3.10
+# This file is autogenerated by pip-compile with Python 3.11
# by the following command:
#
# pip-compile setup.py
@@ -15,8 +15,6 @@ charset-normalizer==3.3.0
# via requests
click==8.1.7
# via codecov-cli (setup.py)
-exceptiongroup==1.1.3
- # via anyio
h11==0.14.0
# via httpcore
httpcore==0.16.3
@@ -32,19 +30,21 @@ ijson==3.2.3
# via codecov-cli (setup.py)
pyyaml==6.0.1
# via codecov-cli (setup.py)
+regex==2023.12.25
+ # via codecov-cli (setup.py)
requests==2.31.0
# via responses
responses==0.21.0
# via codecov-cli (setup.py)
rfc3986[idna2008]==1.5.0
# via httpx
-smart-open==6.4.0
- # via codecov-cli (setup.py)
sniffio==1.3.0
# via
# anyio
# httpcore
# httpx
+test-results-parser==0.5.1
+ # via codecov-cli (setup.py)
tree-sitter==0.20.2
# via codecov-cli (setup.py)
urllib3==2.0.7
diff --git a/ruff.toml b/ruff.toml
new file mode 100644
index 00000000..42d4e461
--- /dev/null
+++ b/ruff.toml
@@ -0,0 +1,79 @@
+# Exclude a variety of commonly ignored directories.
+exclude = [
+ ".bzr",
+ ".direnv",
+ ".eggs",
+ ".git",
+ ".git-rewrite",
+ ".hg",
+ ".ipynb_checkpoints",
+ ".mypy_cache",
+ ".nox",
+ ".pants.d",
+ ".pyenv",
+ ".pytest_cache",
+ ".pytype",
+ ".ruff_cache",
+ ".svn",
+ ".tox",
+ ".venv",
+ ".vscode",
+ "__pypackages__",
+ "_build",
+ "buck-out",
+ "build",
+ "dist",
+ "node_modules",
+ "site-packages",
+ "venv",
+ "languages",
+ "samples"
+]
+
+# Same as Black.
+line-length = 88
+indent-width = 4
+
+# Assume Python 3.9
+target-version = "py39"
+
+[lint]
+# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
+# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
+# McCabe complexity (`C901`) by default.
+select = ["E4", "E7", "E9", "F"]
+ignore = ["F401"]
+
+# Allow fix for all enabled rules (when `--fix`) is provided.
+fixable = ["ALL"]
+unfixable = []
+
+# Allow unused variables when underscore-prefixed.
+dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
+
+[format]
+# Like Black, use double quotes for strings.
+quote-style = "double"
+
+# Like Black, indent with spaces, rather than tabs.
+indent-style = "space"
+
+# Like Black, respect magic trailing commas.
+skip-magic-trailing-comma = false
+
+# Like Black, automatically detect the appropriate line ending.
+line-ending = "auto"
+
+# Enable auto-formatting of code examples in docstrings. Markdown,
+# reStructuredText code/literal blocks and doctests are all supported.
+#
+# This is currently disabled by default, but it is planned for this
+# to be opt-out in the future.
+docstring-code-format = false
+
+# Set the line length limit used when formatting code snippets in
+# docstrings.
+#
+# This only has an effect when the `docstring-code-format` setting is
+# enabled.
+docstring-code-line-length = "dynamic"
\ No newline at end of file
diff --git a/samples/fake_project/.codecov.yaml b/samples/fake_project/.codecov.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/.codecov.yml b/samples/fake_project/.codecov.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/.github/.codecov.yaml b/samples/fake_project/.github/.codecov.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/.github/.codecov.yml b/samples/fake_project/.github/.codecov.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/.github/codecov.yaml b/samples/fake_project/.github/codecov.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/.github/codecov.yml b/samples/fake_project/.github/codecov.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/codecov.yaml b/samples/fake_project/codecov.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/codecov.yml b/samples/fake_project/codecov.yml
new file mode 100644
index 00000000..1ba7121f
--- /dev/null
+++ b/samples/fake_project/codecov.yml
@@ -0,0 +1,5 @@
+runners:
+ python:
+ collect_tests_options:
+ - --ignore
+ - batata
diff --git a/samples/fake_project/dev/.codecov.yaml b/samples/fake_project/dev/.codecov.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/dev/.codecov.yml b/samples/fake_project/dev/.codecov.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/dev/codecov.yaml b/samples/fake_project/dev/codecov.yaml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/fake_project/dev/codecov.yml b/samples/fake_project/dev/codecov.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/samples/junit.xml b/samples/junit.xml
new file mode 100644
index 00000000..e698e007
--- /dev/null
+++ b/samples/junit.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+ def
+ test_divide():
+ > assert Calculator.divide(1, 2) == 0.5
+ E assert 1.0 == 0.5
+ E + where 1.0 = <function Calculator.divide at 0x104c9eb90>(1, 2)
+ E + where <function Calculator.divide at 0x104c9eb90> = Calculator.divide
+ api/temp/calculator/test_calculator.py:30: AssertionError
+
+
+
\ No newline at end of file
diff --git a/scripts/build_alpine_arm.sh b/scripts/build_alpine_arm.sh
new file mode 100755
index 00000000..1c220bac
--- /dev/null
+++ b/scripts/build_alpine_arm.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+apk add musl-dev build-base
+pip install -r requirements.txt
+pip install .
+python setup.py build
+STATICCODECOV_LIB_PATH=$(find build/ -maxdepth 1 -type d -name 'lib.*' -print -quit | xargs -I {} sh -c "find {} -type f -name 'staticcodecov*' -print -quit | sed 's|^./||'")
+pip install pyinstaller
+pyinstaller --add-binary ${STATICCODECOV_LIB_PATH}:. --copy-metadata codecov-cli --hidden-import staticcodecov_languages -F codecov_cli/main.py
+cp ./dist/main ./dist/codecovcli_$1
\ No newline at end of file
diff --git a/scripts/build_linux_arm.sh b/scripts/build_linux_arm.sh
new file mode 100755
index 00000000..f90abd02
--- /dev/null
+++ b/scripts/build_linux_arm.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+apt install build-essential
+pip install -r requirements.txt
+pip install .
+python setup.py build
+STATICCODECOV_LIB_PATH=$(find build/ -maxdepth 1 -type d -name 'lib.*' -print -quit | xargs -I {} sh -c "find {} -type f -name 'staticcodecov*' -print -quit | sed 's|^./||'")
+pip install pyinstaller
+pyinstaller --add-binary ${STATICCODECOV_LIB_PATH}:. --copy-metadata codecov-cli --hidden-import staticcodecov_languages -F codecov_cli/main.py
+cp ./dist/main ./dist/codecovcli_$1
\ No newline at end of file
diff --git a/setup.py b/setup.py
index e11f15da..4ffee44c 100644
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
setup(
name="codecov-cli",
- version="0.4.0",
+ version="0.9.4",
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
description="Codecov Command Line Interface",
long_description=long_description,
@@ -19,12 +19,13 @@
author_email="support@codecov.io",
install_requires=[
"click==8.*",
- "httpx==0.23.*",
+ "httpx==0.27.*",
"ijson==3.*",
"pyyaml==6.*",
"responses==0.21.*",
- "smart-open==6.*",
"tree-sitter==0.20.*",
+ "test-results-parser==0.5.*",
+ "regex",
],
entry_points={
"console_scripts": [
diff --git a/tests/ci_adapters/test_azure_pipelines.py b/tests/ci_adapters/test_azure_pipelines.py
index fe06cbc3..6a635bfb 100644
--- a/tests/ci_adapters/test_azure_pipelines.py
+++ b/tests/ci_adapters/test_azure_pipelines.py
@@ -14,6 +14,7 @@ class AzurePipelinesEnvEnum(str, Enum):
BUILD_SOURCEVERSION = "BUILD_SOURCEVERSION"
SYSTEM_PULLREQUEST_PULLREQUESTID = "SYSTEM_PULLREQUEST_PULLREQUESTID"
SYSTEM_PULLREQUEST_PULLREQUESTNUMBER = "SYSTEM_PULLREQUEST_PULLREQUESTNUMBER"
+ SYSTEM_PULLREQUEST_SOURCECOMMITID = "SYSTEM_PULLREQUEST_SOURCECOMMITID"
SYSTEM_TEAMFOUNDATIONCOLLECTIONURI = "SYSTEM_TEAMFOUNDATIONCOLLECTIONURI"
SYSTEM_TEAMPROJECT = "SYSTEM_TEAMPROJECT"
BUILD_REPOSITORY_NAME = "BUILD_REPOSITORY_NAME"
@@ -43,6 +44,13 @@ def test_detect(self, env_dict, expected, mocker):
{AzurePipelinesEnvEnum.BUILD_SOURCEVERSION: "123456789000111"},
"123456789000111",
),
+ (
+ {
+ AzurePipelinesEnvEnum.BUILD_SOURCEVERSION: "123456789000111",
+ AzurePipelinesEnvEnum.SYSTEM_PULLREQUEST_SOURCECOMMITID: "111000987654321",
+ },
+ "111000987654321",
+ ),
],
)
def test_commit_sha(self, env_dict, expected, mocker):
diff --git a/tests/ci_adapters/test_circleci.py b/tests/ci_adapters/test_circleci.py
index 6dc7e964..02073448 100644
--- a/tests/ci_adapters/test_circleci.py
+++ b/tests/ci_adapters/test_circleci.py
@@ -143,7 +143,7 @@ def test_branch(self, env_dict, expected, mocker):
assert actual == expected
def test_raises_value_error_if_invalid_field(self):
- with pytest.raises(ValueError) as ex:
+ with pytest.raises(ValueError):
CircleCICIAdapter().get_fallback_value("some random key x 123")
def test_service(self):
diff --git a/tests/ci_adapters/test_cloudbuild.py b/tests/ci_adapters/test_cloudbuild.py
new file mode 100644
index 00000000..ec4b0889
--- /dev/null
+++ b/tests/ci_adapters/test_cloudbuild.py
@@ -0,0 +1,213 @@
+import os
+from enum import Enum
+
+import pytest
+
+from codecov_cli.fallbacks import FallbackFieldEnum
+from codecov_cli.helpers.ci_adapters.cloudbuild import GoogleCloudBuildAdapter
+
+
+class CloudBuildEnvEnum(str, Enum):
+ BRANCH_NAME = "BRANCH_NAME"
+ BUILD_ID = "BUILD_ID"
+ COMMIT_SHA = "COMMIT_SHA"
+ LOCATION = "LOCATION"
+ PROJECT_ID = "PROJECT_ID"
+ PROJECT_NUMBER = "PROJECT_NUMBER"
+ REPO_FULL_NAME = "REPO_FULL_NAME"
+ _PR_NUMBER = "_PR_NUMBER"
+ TRIGGER_NAME = "TRIGGER_NAME"
+
+
+class TestCloudBuild(object):
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, False),
+ (
+ {
+ CloudBuildEnvEnum.LOCATION: "global",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ CloudBuildEnvEnum.PROJECT_NUMBER: "123",
+ },
+ False,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ CloudBuildEnvEnum.PROJECT_NUMBER: "123",
+ },
+ False,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.LOCATION: "global",
+ CloudBuildEnvEnum.PROJECT_NUMBER: "123",
+ },
+ False,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.LOCATION: "global",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ },
+ False,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.LOCATION: "global",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ CloudBuildEnvEnum.PROJECT_NUMBER: "123",
+ },
+ True,
+ ),
+ ],
+ )
+ def test_detect(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().detect()
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ ({CloudBuildEnvEnum.BRANCH_NAME: "abc"}, "abc"),
+ ],
+ )
+ def test_branch(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(FallbackFieldEnum.branch)
+
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ (
+ {CloudBuildEnvEnum.BUILD_ID: "52cbb633-aca0-4289-90bd-76e4e60baf82"},
+ "52cbb633-aca0-4289-90bd-76e4e60baf82",
+ ),
+ ],
+ )
+ def test_build_code(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(
+ FallbackFieldEnum.build_code
+ )
+
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ (
+ {
+ CloudBuildEnvEnum.LOCATION: "global",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ },
+ None,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ },
+ None,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.LOCATION: "global",
+ },
+ None,
+ ),
+ (
+ {
+ CloudBuildEnvEnum.BUILD_ID: "fd02b20f-72a3-41b5-862d-2c15e5f289de",
+ CloudBuildEnvEnum.LOCATION: "global",
+ CloudBuildEnvEnum.PROJECT_ID: "my_project",
+ },
+ "https://console.cloud.google.com/cloud-build/builds;region=global/fd02b20f-72a3-41b5-862d-2c15e5f289de?project=my_project",
+ ),
+ ],
+ )
+ def test_build_url(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(
+ FallbackFieldEnum.build_url
+ )
+
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ ({CloudBuildEnvEnum.COMMIT_SHA: "123456789000111"}, "123456789000111"),
+ ],
+ )
+ def test_commit_sha(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(
+ FallbackFieldEnum.commit_sha
+ )
+
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ ({CloudBuildEnvEnum.TRIGGER_NAME: ""}, None),
+ ({CloudBuildEnvEnum.TRIGGER_NAME: "build-job-name"}, "build-job-name"),
+ ],
+ )
+ def test_job_code(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(
+ FallbackFieldEnum.job_code
+ )
+
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ ({CloudBuildEnvEnum._PR_NUMBER: ""}, None),
+ ({CloudBuildEnvEnum._PR_NUMBER: "123"}, "123"),
+ ],
+ )
+ def test_pull_request_number(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(
+ FallbackFieldEnum.pull_request_number
+ )
+
+ assert actual == expected
+
+ @pytest.mark.parametrize(
+ "env_dict,expected",
+ [
+ ({}, None),
+ ({CloudBuildEnvEnum.REPO_FULL_NAME: "owner/repo"}, "owner/repo"),
+ ],
+ )
+ def test_slug(self, env_dict, expected, mocker):
+ mocker.patch.dict(os.environ, env_dict)
+ actual = GoogleCloudBuildAdapter().get_fallback_value(FallbackFieldEnum.slug)
+
+ assert actual == expected
+
+ def test_service(self):
+ assert (
+ GoogleCloudBuildAdapter().get_fallback_value(FallbackFieldEnum.service)
+ == "google_cloud_build"
+ )
diff --git a/tests/ci_adapters/test_gitlabci.py b/tests/ci_adapters/test_gitlabci.py
index 61e3e3ac..66cd7915 100644
--- a/tests/ci_adapters/test_gitlabci.py
+++ b/tests/ci_adapters/test_gitlabci.py
@@ -133,7 +133,6 @@ def test_pull_request_number(self, env_dict, expected, mocker):
],
)
def test_slug(self, env_dict, expected, mocker):
-
mocker.patch.dict(
os.environ,
env_dict,
diff --git a/tests/ci_adapters/test_herokuci.py b/tests/ci_adapters/test_herokuci.py
index 092ad266..5dc75e30 100644
--- a/tests/ci_adapters/test_herokuci.py
+++ b/tests/ci_adapters/test_herokuci.py
@@ -73,7 +73,7 @@ def test_branch(self, env_dict, expected, mocker):
assert actual == expected
def test_raises_value_error_if_invalid_field(self):
- with pytest.raises(ValueError) as ex:
+ with pytest.raises(ValueError):
HerokuCIAdapter().get_fallback_value("some_random_key")
def test_service(self):
@@ -82,7 +82,7 @@ def test_service(self):
)
def test_other_values_fallback_to_none(self):
- assert HerokuCIAdapter()._get_slug() == None
- assert HerokuCIAdapter()._get_build_url() == None
- assert HerokuCIAdapter()._get_job_code() == None
- assert HerokuCIAdapter()._get_pull_request_number() == None
+ assert HerokuCIAdapter()._get_slug() is None
+ assert HerokuCIAdapter()._get_build_url() is None
+ assert HerokuCIAdapter()._get_job_code() is None
+ assert HerokuCIAdapter()._get_pull_request_number() is None
diff --git a/tests/ci_adapters/test_jenkins.py b/tests/ci_adapters/test_jenkins.py
index 69f91454..525f7d3e 100644
--- a/tests/ci_adapters/test_jenkins.py
+++ b/tests/ci_adapters/test_jenkins.py
@@ -40,18 +40,6 @@ def test_build_url(self, env_dict, expected, mocker):
actual = JenkinsAdapter().get_fallback_value(FallbackFieldEnum.build_url)
assert actual == expected
- @pytest.mark.parametrize(
- "env_dict,expected",
- [
- ({}, None),
- ({JenkinsCIEnvEnum.BUILD_URL: "url"}, "url"),
- ],
- )
- def test_build_url(self, env_dict, expected, mocker):
- mocker.patch.dict(os.environ, env_dict)
- actual = JenkinsAdapter().get_fallback_value(FallbackFieldEnum.build_url)
- assert actual == expected
-
@pytest.mark.parametrize(
"env_dict,expected",
[
@@ -99,6 +87,6 @@ def test_service(self):
)
def test_none_values(self):
- JenkinsAdapter().get_fallback_value(FallbackFieldEnum.slug) == None
- JenkinsAdapter().get_fallback_value(FallbackFieldEnum.commit_sha) == None
- JenkinsAdapter().get_fallback_value(FallbackFieldEnum.job_code) == None
+ JenkinsAdapter().get_fallback_value(FallbackFieldEnum.slug) is None
+ JenkinsAdapter().get_fallback_value(FallbackFieldEnum.commit_sha) is None
+ JenkinsAdapter().get_fallback_value(FallbackFieldEnum.job_code) is None
diff --git a/tests/ci_adapters/test_local.py b/tests/ci_adapters/test_local.py
index d92f1057..36c8022f 100644
--- a/tests/ci_adapters/test_local.py
+++ b/tests/ci_adapters/test_local.py
@@ -27,7 +27,7 @@ def test_detect_git_not_installed(self, mocker):
"codecov_cli.helpers.ci_adapters.local.subprocess.run",
return_value=mocker.MagicMock(returncode=1),
)
- assert LocalAdapter().detect() == False
+ assert not LocalAdapter().detect()
mocked_subprocess.assert_called_once()
@pytest.mark.parametrize(
diff --git a/tests/commands/test_invoke_labelanalysis.py b/tests/commands/test_invoke_labelanalysis.py
index 22f29925..729f2965 100644
--- a/tests/commands/test_invoke_labelanalysis.py
+++ b/tests/commands/test_invoke_labelanalysis.py
@@ -13,6 +13,7 @@
_dry_run_json_output,
_dry_run_list_output,
_fallback_to_collected_labels,
+ _parse_runner_params,
_potentially_calculate_absent_labels,
_send_labelanalysis_request,
)
@@ -117,6 +118,7 @@ def test__dry_run_json_output(self):
labels_to_run=list_to_run,
labels_to_skip=list_to_skip,
runner_options=runner_options,
+ fallback_reason=None,
)
stdout = out.getvalue()
@@ -124,9 +126,32 @@ def test__dry_run_json_output(self):
"runner_options": ["--option=1", "--option=2"],
"ats_tests_to_skip": ["label_3", "label_4"],
"ats_tests_to_run": ["label_1", "label_2"],
+ "ats_fallback_reason": None,
}
- def test__dry_run_json_output(self):
+ def test__dry_run_json_output_fallback_reason(self):
+ list_to_run = ["label_1", "label_2", "label_3", "label_4"]
+ list_to_skip = []
+ runner_options = ["--option=1", "--option=2"]
+
+ with StringIO() as out:
+ with redirect_stdout(out):
+ _dry_run_json_output(
+ labels_to_run=list_to_run,
+ labels_to_skip=list_to_skip,
+ runner_options=runner_options,
+ fallback_reason="test_list_processing_errors",
+ )
+ stdout = out.getvalue()
+
+ assert json.loads(stdout) == {
+ "runner_options": ["--option=1", "--option=2"],
+ "ats_tests_to_skip": [],
+ "ats_tests_to_run": ["label_1", "label_2", "label_3", "label_4"],
+ "ats_fallback_reason": "test_list_processing_errors",
+ }
+
+ def test__dry_run_space_separated_list_output(self):
list_to_run = ["label_1", "label_2"]
list_to_skip = ["label_3", "label_4"]
runner_options = ["--option=1", "--option=2"]
@@ -145,40 +170,22 @@ def test__dry_run_json_output(self):
== "TESTS_TO_RUN='--option=1' '--option=2' 'label_1' 'label_2'\nTESTS_TO_SKIP='--option=1' '--option=2' 'label_3' 'label_4'\n"
)
-
-class TestLabelAnalysisCommand(object):
- def test_labelanalysis_help(self, mocker, fake_ci_provider):
- mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
- runner = CliRunner()
-
- result = runner.invoke(cli, ["label-analysis", "--help"], obj={})
- assert result.exit_code == 0
- print(result.output)
- assert result.output.split("\n") == [
- "Usage: cli label-analysis [OPTIONS]",
- "",
- "Options:",
- " --token TEXT The static analysis token (NOT the same token",
- " as upload) [required]",
- " --head-sha TEXT Commit SHA (with 40 chars) [required]",
- " --base-sha TEXT Commit SHA (with 40 chars) [required]",
- " --runner-name, --runner TEXT Runner to use",
- " --max-wait-time INTEGER Max time (in seconds) to wait for the label",
- " analysis result before falling back to running",
- " all tests. Default is to wait forever.",
- " --dry-run Print list of tests to run AND tests skipped",
- " (and options that need to be added to the test",
- " runner) to stdout. Also prints the same",
- " information in JSON format. JSON will have",
- " keys 'ats_tests_to_run', 'ats_tests_to_skip'",
- " and 'runner_options'. List of tests to run is",
- " prefixed with ATS_TESTS_TO_RUN= List of tests",
- " to skip is prefixed with ATS_TESTS_TO_SKIP=",
- " --dry-run-format [json|space-separated-list]",
- " -h, --help Show this message and exit.",
- "",
+ def test_parse_dynamic_runner_options(self):
+ params = [
+ "wrong_param",
+ "key=value",
+ "list_key=val1,val2,val3",
+ "point=somethingwith=sign",
]
+ assert _parse_runner_params(params) == {
+ "wrong_param": None,
+ "key": "value",
+ "list_key": ["val1", "val2", "val3"],
+ "point": "somethingwith=sign",
+ }
+
+class TestLabelAnalysisCommand(object):
def test_invoke_label_analysis_missing_token(self, mocker, fake_ci_provider):
mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
runner = CliRunner()
@@ -221,7 +228,7 @@ def test_invoke_label_analysis(
):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]
- collected_labels = get_labelanalysis_deps["collected_labels"]
+ _ = get_labelanalysis_deps["collected_labels"]
label_analysis_result = {
"present_report_labels": ["test_present"],
@@ -271,7 +278,10 @@ def test_invoke_label_analysis(
)
print(result.output)
- def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
+ @pytest.mark.parametrize("processing_errors", [[], [{"error": "missing_data"}]])
+ def test_invoke_label_analysis_dry_run(
+ self, processing_errors, get_labelanalysis_deps, mocker
+ ):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]
@@ -304,7 +314,11 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
rsps.add(
responses.GET,
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
- json={"state": "finished", "result": label_analysis_result},
+ json={
+ "state": "finished",
+ "result": label_analysis_result,
+ "errors": processing_errors,
+ },
)
cli_runner = CliRunner(mix_stderr=False)
with cli_runner.isolated_filesystem():
@@ -322,16 +336,20 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
fake_runner.process_labelanalysis_result.assert_not_called()
# Dry run format defaults to json
print(result.stdout)
+ ats_fallback_reason = (
+ "test_list_processing_errors" if processing_errors else None
+ )
assert json.loads(result.stdout) == {
"runner_options": ["--labels"],
"ats_tests_to_run": ["test_absent", "test_global", "test_in_diff"],
"ats_tests_to_skip": ["test_present"],
+ "ats_fallback_reason": ats_fallback_reason,
}
def test_invoke_label_analysis_dry_run_pytest_format(
self, get_labelanalysis_deps, mocker
):
- mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
+ _ = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]
label_analysis_result = {
@@ -444,13 +462,12 @@ def test_fallback_collected_labels_covecov_500_error(
print(result.output)
assert result.exit_code == 0
- def test_fallback_dry_run(self, get_labelanalysis_deps, mocker, use_verbose_option):
+ def test_fallback_collected_labels_covecov_500_error_dry_run(
+ self, get_labelanalysis_deps, mocker
+ ):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]
collected_labels = get_labelanalysis_deps["collected_labels"]
- mock_dry_run = mocker.patch(
- "codecov_cli.commands.labelanalysis._dry_run_output"
- )
with responses.RequestsMock() as rsps:
rsps.add(
responses.POST,
@@ -460,29 +477,27 @@ def test_fallback_dry_run(self, get_labelanalysis_deps, mocker, use_verbose_opti
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
],
)
- cli_runner = CliRunner()
- result = cli_runner.invoke(
- cli,
- [
- "label-analysis",
- "--token=STATIC_TOKEN",
- f"--base-sha={FAKE_BASE_SHA}",
- "--dry-run",
- ],
- obj={},
- )
- mock_get_runner.assert_called()
- fake_runner.process_labelanalysis_result.assert_not_called()
- mock_dry_run.assert_called_with(
- {
- "present_report_labels": [],
- "absent_labels": collected_labels,
- "present_diff_labels": [],
- "global_level_labels": [],
- },
- fake_runner,
- "json",
- )
+ cli_runner = CliRunner(mix_stderr=False)
+ with cli_runner.isolated_filesystem():
+ result = cli_runner.invoke(
+ cli,
+ [
+ "label-analysis",
+ "--token=STATIC_TOKEN",
+ f"--base-sha={FAKE_BASE_SHA}",
+ "--dry-run",
+ ],
+ obj={},
+ )
+ mock_get_runner.assert_called()
+ fake_runner.process_labelanalysis_result.assert_not_called()
+ # Dry run format defaults to json
+ assert json.loads(result.stdout) == {
+ "runner_options": ["--labels"],
+ "ats_tests_to_run": sorted(collected_labels),
+ "ats_tests_to_skip": [],
+ "ats_fallback_reason": "codecov_unavailable",
+ }
assert result.exit_code == 0
def test_fallback_collected_labels_codecov_error_processing_label_analysis(
@@ -544,6 +559,65 @@ def test_fallback_collected_labels_codecov_error_processing_label_analysis(
print(result.output)
assert result.exit_code == 0
+ def test_fallback_collected_labels_codecov_error_processing_label_analysis_dry_run(
+ self, get_labelanalysis_deps, mocker, use_verbose_option
+ ):
+ mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
+ fake_runner = get_labelanalysis_deps["fake_runner"]
+ collected_labels = get_labelanalysis_deps["collected_labels"]
+
+ with responses.RequestsMock() as rsps:
+ rsps.add(
+ responses.POST,
+ "https://api.codecov.io/labels/labels-analysis",
+ json={"external_id": "label-analysis-request-id"},
+ status=201,
+ match=[
+ matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
+ ],
+ )
+ rsps.add(
+ responses.PATCH,
+ "https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
+ json={"external_id": "label-analysis-request-id"},
+ status=201,
+ match=[
+ matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
+ ],
+ )
+ rsps.add(
+ responses.GET,
+ "https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
+ json={
+ "state": "error",
+ "external_id": "uuid4-external-id",
+ "base_commit": "BASE_COMMIT_SHA",
+ "head_commit": "HEAD_COMMIT_SHA",
+ },
+ )
+ cli_runner = CliRunner(mix_stderr=False)
+ with cli_runner.isolated_filesystem():
+ result = cli_runner.invoke(
+ cli,
+ [
+ "label-analysis",
+ "--token=STATIC_TOKEN",
+ f"--base-sha={FAKE_BASE_SHA}",
+ "--dry-run",
+ ],
+ obj={},
+ )
+ mock_get_runner.assert_called()
+ fake_runner.process_labelanalysis_result.assert_not_called()
+ # Dry run format defaults to json
+ assert json.loads(result.stdout) == {
+ "runner_options": ["--labels"],
+ "ats_tests_to_run": sorted(collected_labels),
+ "ats_tests_to_skip": [],
+ "ats_fallback_reason": "test_list_processing_failed",
+ }
+ assert result.exit_code == 0
+
def test_fallback_collected_labels_codecov_max_wait_time_exceeded(
self, get_labelanalysis_deps, mocker, use_verbose_option
):
@@ -599,12 +673,67 @@ def test_fallback_collected_labels_codecov_max_wait_time_exceeded(
}
)
- def test_first_labelanalysis_request_fails_but_second_works(
+ def test_fallback_collected_labels_codecov_max_wait_time_exceeded_dry_run(
self, get_labelanalysis_deps, mocker, use_verbose_option
):
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
fake_runner = get_labelanalysis_deps["fake_runner"]
collected_labels = get_labelanalysis_deps["collected_labels"]
+ mocker.patch.object(labelanalysis_time, "monotonic", side_effect=[0, 6])
+
+ with responses.RequestsMock() as rsps:
+ rsps.add(
+ responses.POST,
+ "https://api.codecov.io/labels/labels-analysis",
+ json={"external_id": "label-analysis-request-id"},
+ status=201,
+ match=[
+ matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
+ ],
+ )
+ rsps.add(
+ responses.PATCH,
+ "https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
+ json={"external_id": "label-analysis-request-id"},
+ status=201,
+ match=[
+ matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
+ ],
+ )
+ rsps.add(
+ responses.GET,
+ "https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
+ json={"state": "processing"},
+ )
+ cli_runner = CliRunner(mix_stderr=False)
+ result = cli_runner.invoke(
+ cli,
+ [
+ "label-analysis",
+ "--token=STATIC_TOKEN",
+ f"--base-sha={FAKE_BASE_SHA}",
+ "--max-wait-time=5",
+ "--dry-run",
+ ],
+ obj={},
+ )
+ mock_get_runner.assert_called()
+ fake_runner.process_labelanalysis_result.assert_not_called()
+ # Dry run format defaults to json
+ assert json.loads(result.stdout) == {
+ "runner_options": ["--labels"],
+ "ats_tests_to_run": sorted(collected_labels),
+ "ats_tests_to_skip": [],
+ "ats_fallback_reason": "max_wait_time_exceeded",
+ }
+ assert result.exit_code == 0
+
+ def test_first_labelanalysis_request_fails_but_second_works(
+ self, get_labelanalysis_deps, mocker, use_verbose_option
+ ):
+ mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
+ fake_runner = get_labelanalysis_deps["fake_runner"]
+ _ = get_labelanalysis_deps["collected_labels"]
label_analysis_result = {
"present_report_labels": ["test_present"],
diff --git a/tests/commands/test_invoke_upload_coverage.py b/tests/commands/test_invoke_upload_coverage.py
new file mode 100644
index 00000000..bb620ec6
--- /dev/null
+++ b/tests/commands/test_invoke_upload_coverage.py
@@ -0,0 +1,140 @@
+from unittest.mock import patch
+
+from click.testing import CliRunner
+
+from codecov_cli.fallbacks import FallbackFieldEnum
+from codecov_cli.main import cli
+from codecov_cli.types import RequestError, RequestResult
+from tests.factory import FakeProvider, FakeVersioningSystem
+
+
+def test_upload_coverage_missing_commit_sha(mocker):
+ fake_ci_provider = FakeProvider({FallbackFieldEnum.commit_sha: None})
+ fake_versioning_system = FakeVersioningSystem({FallbackFieldEnum.commit_sha: None})
+ mocker.patch(
+ "codecov_cli.main.get_versioning_system", return_value=fake_versioning_system
+ )
+ mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ result = runner.invoke(cli, ["upload-coverage"], obj={})
+ assert result.exit_code != 0
+
+
+def test_upload_coverage_raise_Z_option(mocker, use_verbose_option):
+ error = RequestError(
+ code=401, params={"some": "params"}, description="Unauthorized"
+ )
+ command_result = RequestResult(
+ error=error, warnings=[], status_code=401, text="Unauthorized"
+ )
+
+ runner = CliRunner()
+ with runner.isolated_filesystem():
+ with patch(
+ "codecov_cli.services.commit.send_commit_data"
+ ) as mocked_create_commit:
+ mocked_create_commit.return_value = command_result
+ result = runner.invoke(
+ cli,
+ [
+ "upload-coverage",
+ "--fail-on-error",
+ "-C",
+ "command-sha",
+ "--slug",
+ "owner/repo",
+ "--report-type",
+ "test_results",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code != 0
+ assert "Commit creating failed: Unauthorized" in result.output
+ assert str(result) == ""
+
+
+def test_upload_coverage_options(mocker):
+ runner = CliRunner()
+ fake_ci_provider = FakeProvider({FallbackFieldEnum.commit_sha: None})
+ mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
+ with runner.isolated_filesystem():
+ runner = CliRunner()
+ result = runner.invoke(cli, ["upload-coverage", "-h"], obj={})
+ assert result.exit_code == 0
+ print(result.output)
+
+ assert result.output.split("\n")[1:] == [
+ "Usage: cli upload-coverage [OPTIONS]",
+ "",
+ "Options:",
+ " -C, --sha, --commit-sha TEXT Commit SHA (with 40 chars) [required]",
+ " -Z, --fail-on-error Exit with non-zero code in case of error",
+ " --git-service [github|gitlab|bitbucket|github_enterprise|gitlab_enterprise|bitbucket_server]",
+ " -t, --token TEXT Codecov upload token",
+ " -r, --slug TEXT owner/repo slug used instead of the private",
+ " repo token in Self-hosted",
+ " --code, --report-code TEXT The code of the report. If unsure, leave",
+ " default",
+ " --network-root-folder PATH Root folder from which to consider paths on",
+ " the network section [default: (Current",
+ " working directory)]",
+ " -s, --dir, --coverage-files-search-root-folder, --files-search-root-folder PATH",
+ " Folder where to search for coverage files",
+ " [default: (Current Working Directory)]",
+ " --exclude, --coverage-files-search-exclude-folder, --files-search-exclude-folder PATH",
+ " Folders to exclude from search",
+ " -f, --file, --coverage-files-search-direct-file, --files-search-direct-file PATH",
+ " Explicit files to upload. These will be added",
+ " to the coverage files found for upload. If you",
+ " wish to only upload the specified files,",
+ " please consider using --disable-search to",
+ " disable uploading other files.",
+ " --disable-search Disable search for coverage files. This is",
+ " helpful when specifying what files you want to",
+ " upload with the --file option.",
+ " --disable-file-fixes Disable file fixes to ignore common lines from",
+ " coverage (e.g. blank lines or empty brackets)",
+ " -b, --build, --build-code TEXT Specify the build number manually",
+ " --build-url TEXT The URL of the build where this is running",
+ " --job-code TEXT",
+ " -n, --name TEXT Custom defined name of the upload. Visible in",
+ " Codecov UI",
+ " -B, --branch TEXT Branch to which this commit belongs to",
+ " -P, --pr, --pull-request-number TEXT",
+ " Specify the pull request number mannually.",
+ " Used to override pre-existing CI environment",
+ " variables",
+ " -e, --env, --env-var TEXT Specify environment variables to be included",
+ " with this build.",
+ " -F, --flag TEXT Flag the upload to group coverage metrics.",
+ " Multiple flags allowed.",
+ " --plugin TEXT",
+ " -d, --dry-run Don't upload files to Codecov",
+ " --legacy, --use-legacy-uploader",
+ " Use the legacy upload endpoint",
+ " --handle-no-reports-found Raise no exceptions when no coverage reports",
+ " found.",
+ " --report-type [coverage|test_results]",
+ " The type of the file to upload, coverage by",
+ " default. Possible values are: testing,",
+ " coverage.",
+ " --network-filter TEXT Specify a filter on the files listed in the",
+ " network section of the Codecov report. This",
+ " will only add files whose path begin with the",
+ " specified filter. Useful for upload-specific",
+ " path fixing",
+ " --network-prefix TEXT Specify a prefix on files listed in the",
+ " network section of the Codecov report. Useful",
+ " to help resolve path fixing",
+ " --gcov-args TEXT Extra arguments to pass to gcov",
+ " --gcov-ignore TEXT Paths to ignore during gcov gathering",
+ " --gcov-include TEXT Paths to include during gcov gathering",
+ " --gcov-executable TEXT gcov executable to run. Defaults to 'gcov'",
+ " --swift-project TEXT Specify the swift project",
+ " --parent-sha TEXT SHA (with 40 chars) of what should be the",
+ " parent of this commit",
+ " -h, --help Show this message and exit.",
+ "",
+ ]
diff --git a/tests/commands/test_invoke_upload_process.py b/tests/commands/test_invoke_upload_process.py
index 026c128d..c5490e3b 100644
--- a/tests/commands/test_invoke_upload_process.py
+++ b/tests/commands/test_invoke_upload_process.py
@@ -44,6 +44,8 @@ def test_upload_process_raise_Z_option(mocker, use_verbose_option):
"command-sha",
"--slug",
"owner/repo",
+ "--report-type",
+ "test_results",
],
obj={},
)
@@ -70,20 +72,20 @@ def test_upload_process_options(mocker):
" -C, --sha, --commit-sha TEXT Commit SHA (with 40 chars) [required]",
" -Z, --fail-on-error Exit with non-zero code in case of error",
" --git-service [github|gitlab|bitbucket|github_enterprise|gitlab_enterprise|bitbucket_server]",
- " -t, --token UUID Codecov upload token",
+ " -t, --token TEXT Codecov upload token",
" -r, --slug TEXT owner/repo slug used instead of the private",
" repo token in Self-hosted",
- " --report-code TEXT The code of the report. If unsure, leave",
+ " --code, --report-code TEXT The code of the report. If unsure, leave",
" default",
" --network-root-folder PATH Root folder from which to consider paths on",
" the network section [default: (Current",
" working directory)]",
- " -s, --dir, --coverage-files-search-root-folder PATH",
+ " -s, --dir, --coverage-files-search-root-folder, --files-search-root-folder PATH",
" Folder where to search for coverage files",
" [default: (Current Working Directory)]",
- " --exclude, --coverage-files-search-exclude-folder PATH",
+ " --exclude, --coverage-files-search-exclude-folder, --files-search-exclude-folder PATH",
" Folders to exclude from search",
- " -f, --file, --coverage-files-search-direct-file PATH",
+ " -f, --file, --coverage-files-search-direct-file, --files-search-direct-file PATH",
" Explicit files to upload. These will be added",
" to the coverage files found for upload. If you",
" wish to only upload the specified files,",
@@ -91,7 +93,7 @@ def test_upload_process_options(mocker):
" disable uploading other files.",
" --disable-search Disable search for coverage files. This is",
" helpful when specifying what files you want to",
- " uload with the --file option.",
+ " upload with the --file option.",
" --disable-file-fixes Disable file fixes to ignore common lines from",
" coverage (e.g. blank lines or empty brackets)",
" -b, --build, --build-code TEXT Specify the build number manually",
@@ -112,8 +114,25 @@ def test_upload_process_options(mocker):
" -d, --dry-run Don't upload files to Codecov",
" --legacy, --use-legacy-uploader",
" Use the legacy upload endpoint",
- " --handle-no-reports-found Raise no excpetions when no coverage reports",
+ " --handle-no-reports-found Raise no exceptions when no coverage reports",
" found.",
+ " --report-type [coverage|test_results]",
+ " The type of the file to upload, coverage by",
+ " default. Possible values are: testing,",
+ " coverage.",
+ " --network-filter TEXT Specify a filter on the files listed in the",
+ " network section of the Codecov report. This",
+ " will only add files whose path begin with the",
+ " specified filter. Useful for upload-specific",
+ " path fixing",
+ " --network-prefix TEXT Specify a prefix on files listed in the",
+ " network section of the Codecov report. Useful",
+ " to help resolve path fixing",
+ " --gcov-args TEXT Extra arguments to pass to gcov",
+ " --gcov-ignore TEXT Paths to ignore during gcov gathering",
+ " --gcov-include TEXT Paths to include during gcov gathering",
+ " --gcov-executable TEXT gcov executable to run. Defaults to 'gcov'",
+ " --swift-project TEXT Specify the swift project",
" --parent-sha TEXT SHA (with 40 chars) of what should be the",
" parent of this commit",
" -h, --help Show this message and exit.",
diff --git a/tests/commands/test_process_test_results.py b/tests/commands/test_process_test_results.py
new file mode 100644
index 00000000..ea9faaba
--- /dev/null
+++ b/tests/commands/test_process_test_results.py
@@ -0,0 +1,324 @@
+import json
+import os
+
+from click.testing import CliRunner
+
+from codecov_cli.main import cli
+from codecov_cli.types import RequestResult
+
+
+def test_process_test_results(
+ mocker,
+ tmpdir,
+):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REPOSITORY": "fake/repo",
+ "GITHUB_REF": "pull/fake/pull",
+ },
+ )
+ _ = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--file",
+ "samples/junit.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 0
+ # Ensure that there's an output
+ assert result.output
+
+
+def test_process_test_results_create_github_message(
+ mocker,
+ tmpdir,
+):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REPOSITORY": "fake/repo",
+ "GITHUB_REF": "pull/fake/123",
+ },
+ )
+
+ mocker.patch(
+ "codecov_cli.commands.process_test_results.send_get_request",
+ return_value=RequestResult(status_code=200, error=None, warnings=[], text="[]"),
+ )
+
+ mocked_post = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--github-token",
+ "fake-token",
+ "--file",
+ "samples/junit.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 0
+ assert (
+ mocked_post.call_args.kwargs["url"]
+ == "https://api.github.com/repos/fake/repo/issues/123/comments"
+ )
+
+
+def test_process_test_results_update_github_message(
+ mocker,
+ tmpdir,
+):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REPOSITORY": "fake/repo",
+ "GITHUB_REF": "pull/fake/123",
+ },
+ )
+
+ github_fake_comments1 = [
+ {"id": 54321, "user": {"login": "fake"}, "body": "some text"},
+ ]
+ github_fake_comments2 = [
+ {
+ "id": 12345,
+ "user": {"login": "github-actions[bot]"},
+ "body": " and some other fake body",
+ },
+ ]
+
+ mocker.patch(
+ "codecov_cli.commands.process_test_results.send_get_request",
+ side_effect=[
+ RequestResult(
+ status_code=200,
+ error=None,
+ warnings=[],
+ text=json.dumps(github_fake_comments1),
+ ),
+ RequestResult(
+ status_code=200,
+ error=None,
+ warnings=[],
+ text=json.dumps(github_fake_comments2),
+ ),
+ ],
+ )
+
+ mocked_post = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--github-token",
+ "fake-token",
+ "--file",
+ "samples/junit.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 0
+ assert (
+ mocked_post.call_args.kwargs["url"]
+ == "https://api.github.com/repos/fake/repo/issues/comments/12345"
+ )
+
+
+def test_process_test_results_errors_getting_comments(
+ mocker,
+ tmpdir,
+):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REPOSITORY": "fake/repo",
+ "GITHUB_REF": "pull/fake/123",
+ },
+ )
+
+ mocker.patch(
+ "codecov_cli.commands.process_test_results.send_get_request",
+ return_value=RequestResult(
+ status_code=400,
+ error=None,
+ warnings=[],
+ text="",
+ ),
+ )
+
+ _ = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--github-token",
+ "fake-token",
+ "--file",
+ "samples/junit.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 1
+
+
+def test_process_test_results_non_existent_file(mocker, tmpdir):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REPOSITORY": "fake/repo",
+ "GITHUB_REF": "pull/fake/pull",
+ },
+ )
+ _ = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--file",
+ "samples/fake.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 1
+ expected_logs = [
+ "ci service found",
+ "Some files were not found",
+ ]
+ for log in expected_logs:
+ assert log in result.output
+
+
+def test_process_test_results_missing_repo(mocker, tmpdir):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REF": "pull/fake/pull",
+ },
+ )
+ if "GITHUB_REPOSITORY" in os.environ:
+ del os.environ["GITHUB_REPOSITORY"]
+ _ = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--github-token",
+ "whatever",
+ "--file",
+ "samples/junit.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 1
+ expected_logs = [
+ "ci service found",
+ "Error: Error getting repo slug from environment. Can't find GITHUB_REPOSITORY environment variable.",
+ ]
+ for log in expected_logs:
+ assert log in result.output
+
+
+def test_process_test_results_missing_ref(mocker, tmpdir):
+ _ = tmpdir.mkdir("folder").join("summary.txt")
+
+ mocker.patch.dict(
+ os.environ,
+ {
+ "GITHUB_REPOSITORY": "fake/repo",
+ },
+ )
+
+ if "GITHUB_REF" in os.environ:
+ del os.environ["GITHUB_REF"]
+ _ = mocker.patch(
+ "codecov_cli.commands.process_test_results.send_post_request",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text="yay it worked"
+ ),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ cli,
+ [
+ "process-test-results",
+ "--github-token",
+ "whatever",
+ "--file",
+ "samples/junit.xml",
+ "--disable-search",
+ ],
+ obj={},
+ )
+
+ assert result.exit_code == 1
+ expected_logs = [
+ "ci service found",
+ "Error: Error getting PR number from environment. Can't find GITHUB_REF environment variable.",
+ ]
+ for log in expected_logs:
+ assert log in result.output
diff --git a/tests/commands/test_upload_token_discovery.py b/tests/commands/test_upload_token_discovery.py
new file mode 100644
index 00000000..5080d567
--- /dev/null
+++ b/tests/commands/test_upload_token_discovery.py
@@ -0,0 +1,46 @@
+"""Tests ensuring that an env-provided token can be found."""
+
+from pathlib import Path
+from textwrap import dedent as _dedent_text_block
+
+from click.testing import CliRunner
+from pytest import MonkeyPatch
+from pytest_mock import MockerFixture
+
+from codecov_cli.commands import upload
+from codecov_cli.main import cli
+
+
+def test_no_cli_token_config_fallback(
+ mocker: MockerFixture,
+ monkeypatch: MonkeyPatch,
+ tmp_path: Path,
+) -> None:
+ """Test that a config-stored token is used with no CLI argument."""
+ # NOTE: The pytest's `caplog` fixture is not used in this test as it
+ # NOTE: doesn't play well with Click's testing CLI runner, and does
+ # NOTE: not capture any log entries for mysterious reasons.
+ #
+ # Refs:
+ # * https://github.com/pallets/click/issues/2573#issuecomment-1649773563
+ # * https://github.com/pallets/click/issues/1763#issuecomment-767687608
+ (tmp_path / ".codecov.yml").write_text(
+ _dedent_text_block(
+ """
+ ---
+
+ codecov:
+ token: sentinel-value
+
+ ...
+ """
+ )
+ )
+ monkeypatch.chdir(tmp_path)
+
+ mocker.patch.object(upload, "do_upload_logic")
+ do_upload_cmd_spy = mocker.spy(upload, "do_upload_logic")
+
+ CliRunner().invoke(cli, ["do-upload", "--commit-sha=deadbeef"], obj={})
+
+ assert do_upload_cmd_spy.call_args[-1]["token"] == "sentinel-value"
diff --git a/tests/data/files_to_fix_examples/sample.kt b/tests/data/files_to_fix_examples/sample.kt
index df8d524f..6793cb59 100644
--- a/tests/data/files_to_fix_examples/sample.kt
+++ b/tests/data/files_to_fix_examples/sample.kt
@@ -13,6 +13,9 @@ secnod line
should fix previous
}
+data class Key(
+ val key: String? = null
+)
/*
diff --git a/tests/data/reports_examples.py b/tests/data/reports_examples.py
index 0450a1dd..9c638e43 100644
--- a/tests/data/reports_examples.py
+++ b/tests/data/reports_examples.py
@@ -2,7 +2,6 @@
# Avoid parsing and removing indentation from multiline strings by defining them in the top level of this file
-
coverage_file_section_simple = b"""# path=flagtwo.coverage.xml
diff --git a/tests/helpers/git_services/test_github.py b/tests/helpers/git_services/test_github.py
new file mode 100644
index 00000000..b77ad832
--- /dev/null
+++ b/tests/helpers/git_services/test_github.py
@@ -0,0 +1,74 @@
+import json
+
+import pytest
+import requests
+from requests import Response
+
+from codecov_cli.helpers import git
+from codecov_cli.helpers.git_services.github import Github
+
+
+def test_get_pull_request(mocker):
+ def mock_request(*args, headers={}, **kwargs):
+ assert headers["X-GitHub-Api-Version"] == "2022-11-28"
+ res = {
+ "url": "https://api.github.com/repos/codecov/codecov-cli/pulls/1",
+ "head": {
+ "sha": "123",
+ "label": "codecov-cli:branch",
+ "ref": "branch",
+ "repo": {"full_name": "user_forked_repo/codecov-cli"},
+ },
+ "base": {
+ "sha": "123",
+ "label": "codecov-cli:main",
+ "ref": "main",
+ "repo": {"full_name": "codecov/codecov-cli"},
+ },
+ }
+ response = Response()
+ response.status_code = 200
+ response._content = json.dumps(res).encode("utf-8")
+ return response
+
+ mocker.patch.object(
+ requests,
+ "get",
+ side_effect=mock_request,
+ )
+ slug = "codecov/codecov-cli"
+ response = Github().get_pull_request(slug, 1)
+ assert response == {
+ "url": "https://api.github.com/repos/codecov/codecov-cli/pulls/1",
+ "head": {
+ "sha": "123",
+ "label": "codecov-cli:branch",
+ "ref": "branch",
+ "slug": "user_forked_repo/codecov-cli",
+ },
+ "base": {
+ "sha": "123",
+ "label": "codecov-cli:main",
+ "ref": "main",
+ "slug": "codecov/codecov-cli",
+ },
+ }
+
+
+def test_get_pull_request_404(mocker):
+ def mock_request(*args, headers={}, **kwargs):
+ assert headers["X-GitHub-Api-Version"] == "2022-11-28"
+ res = {}
+ response = Response()
+ response.status_code = 404
+ response._content = json.dumps(res).encode("utf-8")
+ return response
+
+ mocker.patch.object(
+ requests,
+ "get",
+ side_effect=mock_request,
+ )
+ slug = "codecov/codecov-cli"
+ response = Github().get_pull_request(slug, 1)
+ assert response is None
diff --git a/tests/helpers/test_args.py b/tests/helpers/test_args.py
new file mode 100644
index 00000000..503b877e
--- /dev/null
+++ b/tests/helpers/test_args.py
@@ -0,0 +1,51 @@
+import os
+from pathlib import PosixPath
+
+import click
+
+from codecov_cli import __version__
+from codecov_cli.helpers.args import get_cli_args
+
+
+def test_get_cli_args():
+ ctx = click.Context(click.Command("do-upload"))
+ ctx.obj = {}
+ ctx.obj["cli_args"] = {
+ "verbose": True,
+ }
+ ctx.params = {
+ "branch": "fake_branch",
+ "token": "fakeTOKEN",
+ }
+
+ expected = {
+ "branch": "fake_branch",
+ "command": "do-upload",
+ "verbose": True,
+ "version": f"cli-{__version__}",
+ }
+
+ assert get_cli_args(ctx) == expected
+
+
+def test_get_cli_args_with_posix():
+ ctx = click.Context(click.Command("do-upload"))
+ ctx.obj = {}
+ ctx.obj["cli_args"] = {
+ "verbose": True,
+ }
+ ctx.params = {
+ "branch": "fake_branch",
+ "path": PosixPath(os.getcwd()),
+ "token": "fakeTOKEN",
+ }
+
+ expected = {
+ "branch": "fake_branch",
+ "command": "do-upload",
+ "path": str(PosixPath(os.getcwd())),
+ "verbose": True,
+ "version": f"cli-{__version__}",
+ }
+
+ assert get_cli_args(ctx) == expected
diff --git a/tests/helpers/test_config.py b/tests/helpers/test_config.py
index 487f3975..c3d8d329 100644
--- a/tests/helpers/test_config.py
+++ b/tests/helpers/test_config.py
@@ -1,6 +1,7 @@
import pathlib
+from unittest.mock import Mock
-from codecov_cli.helpers.config import load_cli_config
+from codecov_cli.helpers.config import _find_codecov_yamls, load_cli_config
def test_load_config(mocker):
@@ -14,10 +15,52 @@ def test_load_config(mocker):
def test_load_config_doesnt_exist(mocker):
path = pathlib.Path("doesnt/exist")
result = load_cli_config(path)
- assert result == None
+ assert result is None
def test_load_config_not_file(mocker):
path = pathlib.Path("samples/")
result = load_cli_config(path)
- assert result == None
+ assert result is None
+
+
+def test_find_codecov_yaml(mocker):
+ fake_project_root = pathlib.Path.cwd() / "samples" / "fake_project"
+
+ mock_vcs = Mock()
+ mock_vcs.get_network_root.return_value = fake_project_root
+ mocker.patch(
+ "codecov_cli.helpers.config.get_versioning_system", return_value=mock_vcs
+ )
+
+ expected_yamls = [
+ fake_project_root / "codecov.yaml",
+ fake_project_root / ".codecov.yaml",
+ fake_project_root / "codecov.yml",
+ fake_project_root / ".codecov.yml",
+ fake_project_root / ".github" / "codecov.yaml",
+ fake_project_root / ".github" / ".codecov.yaml",
+ fake_project_root / ".github" / "codecov.yml",
+ fake_project_root / ".github" / ".codecov.yml",
+ fake_project_root / "dev" / "codecov.yaml",
+ fake_project_root / "dev" / ".codecov.yaml",
+ fake_project_root / "dev" / "codecov.yml",
+ fake_project_root / "dev" / ".codecov.yml",
+ ]
+
+ assert sorted(_find_codecov_yamls()) == sorted(expected_yamls)
+
+
+def test_load_config_finds_yaml(mocker):
+ fake_project_root = pathlib.Path.cwd() / "samples" / "fake_project"
+
+ mock_vcs = Mock()
+ mock_vcs.get_network_root.return_value = fake_project_root
+ mocker.patch(
+ "codecov_cli.helpers.config.get_versioning_system", return_value=mock_vcs
+ )
+
+ result = load_cli_config(None)
+ assert result == {
+ "runners": {"python": {"collect_tests_options": ["--ignore", "batata"]}}
+ }
diff --git a/tests/helpers/test_encoder.py b/tests/helpers/test_encoder.py
index beb6c1a2..cc185487 100644
--- a/tests/helpers/test_encoder.py
+++ b/tests/helpers/test_encoder.py
@@ -1,6 +1,11 @@
import pytest
-from codecov_cli.helpers.encoder import encode_slug, slug_without_subgroups_is_invalid
+from codecov_cli.helpers.encoder import (
+ decode_slug,
+ encode_slug,
+ slug_encoded_incorrectly,
+ slug_without_subgroups_is_invalid,
+)
@pytest.mark.parametrize(
@@ -16,7 +21,7 @@
],
)
def test_encode_invalid_slug(slug):
- with pytest.raises(ValueError) as ex:
+ with pytest.raises(ValueError):
encode_slug(slug)
@@ -53,3 +58,47 @@ def test_invalid_slug(slug):
def test_valid_slug():
slug = "owner/repo"
assert not slug_without_subgroups_is_invalid(slug)
+
+
+@pytest.mark.parametrize(
+ "slug",
+ [
+ ("invalid_slug"),
+ (""),
+ (":"),
+ (":::"),
+ ("::::"),
+ ("random string"),
+ ("owner:::subgroup:::repo"),
+ ("owner:::repo"),
+ ("owner::::subgroup::::repo"),
+ (None),
+ ],
+)
+def test_invalid_encoded_slug(slug):
+ assert slug_encoded_incorrectly(slug)
+ with pytest.raises(ValueError):
+ decode_slug(slug)
+
+
+@pytest.mark.parametrize(
+ "encoded_slug",
+ [
+ ("owner::::repo"),
+ ("owner:::subgroup::::repo"),
+ ],
+)
+def test_valid_encoded_slug(encoded_slug):
+ assert not slug_encoded_incorrectly(encoded_slug)
+
+
+@pytest.mark.parametrize(
+ "encoded_slug, decoded_slug",
+ [
+ ("owner::::repo", "owner/repo"),
+ ("owner:::subgroup::::repo", "owner/subgroup/repo"),
+ ],
+)
+def test_decode_slug(encoded_slug, decoded_slug):
+ expected_encoded_slug = decode_slug(encoded_slug)
+ assert expected_encoded_slug == decoded_slug
diff --git a/tests/helpers/test_folder_searcher.py b/tests/helpers/test_folder_searcher.py
index 3523dbee..a6ab9895 100644
--- a/tests/helpers/test_folder_searcher.py
+++ b/tests/helpers/test_folder_searcher.py
@@ -43,6 +43,7 @@ def test_search_files_with_folder_exclusion(tmp_path):
"another/some/banana.py",
"from/some/banana.py",
"to/some/banana.py",
+ "path/folder with space/banana.py",
"apple.py",
"banana.py",
]
@@ -56,6 +57,7 @@ def test_search_files_with_folder_exclusion(tmp_path):
tmp_path / "banana.py",
tmp_path / "from/some/banana.py",
tmp_path / "another/some/banana.py",
+ tmp_path / "path/folder with space/banana.py",
]
)
assert expected_results == sorted(
diff --git a/tests/helpers/test_git.py b/tests/helpers/test_git.py
index ab3d0407..77213c19 100644
--- a/tests/helpers/test_git.py
+++ b/tests/helpers/test_git.py
@@ -1,6 +1,11 @@
+import json
+
import pytest
+import requests
+from requests import Response
from codecov_cli.helpers import git
+from codecov_cli.helpers.git_services.github import Github
@pytest.mark.parametrize(
@@ -48,6 +53,7 @@
("ssh://host.abc.xz/owner/repo.git", "owner/repo"),
("user-name@host.xz:owner/repo.git/", "owner/repo"),
("host.xz:owner/repo.git/", "owner/repo"),
+ ("ssh://git@github.com/gitcodecov/codecov-cli", "gitcodecov/codecov-cli"),
],
)
def test_parse_slug_valid_address(address, slug):
@@ -102,6 +108,8 @@ def test_parse_slug_invalid_address(address):
"bitbucket",
),
("git@bitbucket.org:name-codecov/abc.git.git", "bitbucket"),
+ ("ssh://git@github.com/gitcodecov/codecov-cli", "github"),
+ ("ssh://git@github.com:gitcodecov/codecov-cli", "github"),
],
)
def test_parse_git_service_valid_address(address, git_service):
@@ -119,3 +127,9 @@ def test_parse_git_service_valid_address(address, git_service):
)
def test_parse_git_service_invalid_service(url):
assert git.parse_git_service(url) is None
+
+
+def test_get_git_service_class():
+ assert isinstance(git.get_git_service("github"), Github)
+ assert git.get_git_service("gitlab") is None
+ assert git.get_git_service("bitbucket") is None
diff --git a/tests/helpers/test_legacy_upload_sender.py b/tests/helpers/test_legacy_upload_sender.py
index 405f87e0..beb4c79c 100644
--- a/tests/helpers/test_legacy_upload_sender.py
+++ b/tests/helpers/test_legacy_upload_sender.py
@@ -1,4 +1,3 @@
-import uuid
from urllib import parse
import pytest
@@ -11,7 +10,7 @@
from tests.data import reports_examples
upload_collection = UploadCollectionResult(["1", "apple.py", "3"], [], [])
-random_token = uuid.UUID("f359afb9-8a2a-42ab-a448-c3d267ff495b")
+random_token = "f359afb9-8a2a-42ab-a448-c3d267ff495b"
random_sha = "845548c6b95223f12e8317a1820705f64beaf69e"
named_upload_data = {
"name": "name",
@@ -68,7 +67,7 @@ class TestUploadSender(object):
def test_upload_sender_post_called_with_right_parameters(
self, mocked_responses, mocked_legacy_upload_endpoint, mocked_storage_server
):
- headers = {"X-Upload-Token": random_token.hex}
+ headers = {"X-Upload-Token": random_token}
params = {
"package": f"codecov-cli/{codecov_cli_version}",
"commit": random_sha,
@@ -167,7 +166,11 @@ def test_upload_sender_http_error_with_invalid_sha(
mocked_legacy_upload_endpoint.status = 400
sender = LegacyUploadSender().send_upload_data(
- upload_collection, random_sha, random_token, {}, **named_upload_data
+ upload_collection,
+ random_sha,
+ random_token,
+ {},
+ **named_upload_data,
)
assert sender.error is not None
@@ -280,7 +283,6 @@ def test_format_coverage_file(self, mocker):
)
def test_generate_coverage_files_section(self, mocker):
-
mocker.patch(
"codecov_cli.services.upload.LegacyUploadSender._format_coverage_file",
side_effect=lambda file_bytes: file_bytes,
diff --git a/tests/helpers/test_network_finder.py b/tests/helpers/test_network_finder.py
index b7881812..afbb08c2 100644
--- a/tests/helpers/test_network_finder.py
+++ b/tests/helpers/test_network_finder.py
@@ -6,11 +6,121 @@
def test_find_files(mocker, tmp_path):
+ filenames = ["a.txt", "b.txt"]
+ filtered_filenames = []
- expected_filenames = ["a.txt", "b.txt"]
+ mocked_vs = MagicMock()
+ mocked_vs.list_relevant_files.return_value = filenames
+
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter=None,
+ network_prefix=None,
+ network_root_folder=tmp_path,
+ ).find_files()
+ == filenames
+ )
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix="bello",
+ network_root_folder=tmp_path,
+ ).find_files(False)
+ == filtered_filenames
+ )
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix="bello",
+ network_root_folder=tmp_path,
+ ).find_files(True)
+ == filenames
+ )
+ mocked_vs.list_relevant_files.assert_called_with(tmp_path)
+
+
+def test_find_files_with_filter(mocker, tmp_path):
+ filenames = ["hello/a.txt", "hello/c.txt", "bello/b.txt"]
+ filtered_filenames = ["hello/a.txt", "hello/c.txt"]
+
+ mocked_vs = MagicMock()
+ mocked_vs.list_relevant_files.return_value = filenames
+
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix=None,
+ network_root_folder=tmp_path,
+ ).find_files()
+ == filtered_filenames
+ )
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix="bello",
+ network_root_folder=tmp_path,
+ ).find_files(True)
+ == filenames
+ )
+ mocked_vs.list_relevant_files.assert_called_with(tmp_path)
+
+
+def test_find_files_with_prefix(mocker, tmp_path):
+ filenames = ["hello/a.txt", "hello/c.txt", "bello/b.txt"]
+ filtered_filenames = ["hellohello/a.txt", "hellohello/c.txt", "hellobello/b.txt"]
+
+ mocked_vs = MagicMock()
+ mocked_vs.list_relevant_files.return_value = filenames
+
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter=None,
+ network_prefix="hello",
+ network_root_folder=tmp_path,
+ ).find_files()
+ == filtered_filenames
+ )
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix="bello",
+ network_root_folder=tmp_path,
+ ).find_files(True)
+ == filenames
+ )
+ mocked_vs.list_relevant_files.assert_called_with(tmp_path)
+
+
+def test_find_files_with_filter_and_prefix(mocker, tmp_path):
+ filenames = ["hello/a.txt", "hello/c.txt", "bello/b.txt"]
+ filtered_filenames = ["bellohello/a.txt", "bellohello/c.txt"]
mocked_vs = MagicMock()
- mocked_vs.list_relevant_files.return_value = expected_filenames
+ mocked_vs.list_relevant_files.return_value = filenames
- assert NetworkFinder(mocked_vs).find_files(tmp_path) == expected_filenames
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix="bello",
+ network_root_folder=tmp_path,
+ ).find_files()
+ == filtered_filenames
+ )
+ assert (
+ NetworkFinder(
+ versioning_system=mocked_vs,
+ network_filter="hello",
+ network_prefix="bello",
+ network_root_folder=tmp_path,
+ ).find_files(True)
+ == filenames
+ )
mocked_vs.list_relevant_files.assert_called_with(tmp_path)
diff --git a/tests/helpers/test_request.py b/tests/helpers/test_request.py
index 516c29a3..7069068b 100644
--- a/tests/helpers/test_request.py
+++ b/tests/helpers/test_request.py
@@ -7,6 +7,7 @@
from codecov_cli import __version__
from codecov_cli.helpers.request import (
get,
+ get_token_header,
get_token_header_or_fail,
log_warnings_and_errors_if_any,
)
@@ -37,7 +38,7 @@ def test_log_error_no_raise(mocker):
error=error, warnings=[], status_code=401, text="Unauthorized"
)
log_warnings_and_errors_if_any(result, "Process", fail_on_error=False)
- mock_log_error.assert_called_with(f"Process failed: Unauthorized")
+ mock_log_error.assert_called_with("Process failed: Unauthorized")
def test_log_error_raise(mocker):
@@ -50,14 +51,50 @@ def test_log_error_raise(mocker):
)
with pytest.raises(SystemExit):
log_warnings_and_errors_if_any(result, "Process", fail_on_error=True)
- mock_log_error.assert_called_with(f"Process failed: Unauthorized")
+ mock_log_error.assert_called_with("Process failed: Unauthorized")
+
+
+def test_log_result_without_token(mocker):
+ mock_log_debug = mocker.patch.object(req_log, "debug")
+ result = RequestResult(
+ error=None,
+ warnings=[],
+ status_code=201,
+ text='{"message":"commit","timestamp":"2024-03-25T15:41:07Z","ci_passed":true,"state":"complete","repository":{"name":"repo","is_private":false,"active":true,"language":"python","yaml":null},"author":{"avatar_url":"https://example.com","service":"github","username":null,"name":"dependabot[bot]","ownerid":2780265},"commitid":"commit","parent_commit_id":"parent","pullid":1,"branch":"main"}',
+ )
+ log_warnings_and_errors_if_any(result, "Commit creating", False)
+ mock_log_debug.assert_called_with(
+ "Commit creating result", extra={"extra_log_attributes": {"result": result}}
+ )
+
+
+def test_log_result_with_token(mocker):
+ mock_log_debug = mocker.patch.object(req_log, "debug")
+ result = RequestResult(
+ error=None,
+ warnings=[],
+ status_code=201,
+ text='{"message": "commit", "timestamp": "2024-07-16T20:51:07Z", "ci_passed": true, "state": "complete", "repository": {"name": "repo", "is_private": false, "active": true, "language": "python", "yaml": {"codecov": {"token": "faketoken"}}, "author": {"avatar_url": "https://example.com", "service": "github", "username": "author", "name": "author", "ownerid": 3461769}, "commitid": "commit", "parent_commit_id": "parent_commit", "pullid": null, "branch": "main"}}',
+ )
+
+ expected_text = '{"message": "commit", "timestamp": "2024-07-16T20:51:07Z", "ci_passed": true, "state": "complete", "repository": {"name": "repo", "is_private": false, "active": true, "language": "python", "yaml": {"codecov": {"token": "f******************"}}, "author": {"avatar_url": "https://example.com", "service": "github", "username": "author", "name": "author", "ownerid": 3461769}, "commitid": "commit", "parent_commit_id": "parent_commit", "pullid": null, "branch": "main"}}'
+ expected = RequestResult(
+ error=None,
+ warnings=[],
+ status_code=201,
+ text=expected_text,
+ )
+ log_warnings_and_errors_if_any(result, "Commit creating", False)
+ mock_log_debug.assert_called_with(
+ "Commit creating result", extra={"extra_log_attributes": {"result": expected}}
+ )
def test_get_token_header_or_fail():
# Test with a valid UUID token
token = uuid.uuid4()
result = get_token_header_or_fail(token)
- assert result == {"Authorization": f"token {token.hex}"}
+ assert result == {"Authorization": f"token {str(token)}"}
# Test with a None token
token = None
@@ -69,12 +106,17 @@ def test_get_token_header_or_fail():
== "Codecov token not found. Please provide Codecov token with -t flag."
)
- # Test with an invalid token type
- token = "invalid_token"
- with pytest.raises(Exception) as e:
- get_token_header_or_fail(token)
- assert str(e.value) == f"Token must be UUID. Received {type(token)}"
+def test_get_token_header():
+ # Test with a valid UUID token
+ token = uuid.uuid4()
+ result = get_token_header(token)
+ assert result == {"Authorization": f"token {str(token)}"}
+
+ # Test with a None token
+ token = None
+ result = get_token_header(token)
+ assert result is None
def test_request_retry(mocker, valid_response):
@@ -95,7 +137,7 @@ def test_request_retry(mocker, valid_response):
def test_request_retry_too_many_errors(mocker):
- mock_sleep = mocker.patch("codecov_cli.helpers.request.sleep")
+ _ = mocker.patch("codecov_cli.helpers.request.sleep")
mocker.patch.object(
requests,
"post",
@@ -108,7 +150,7 @@ def test_request_retry_too_many_errors(mocker):
],
)
with pytest.raises(Exception) as exp:
- resp = send_post_request("my_url")
+ _ = send_post_request("my_url")
assert str(exp.value) == "Request failed after too many retries"
diff --git a/tests/helpers/test_upload_sender.py b/tests/helpers/test_upload_sender.py
index 26fa5511..1e164ef5 100644
--- a/tests/helpers/test_upload_sender.py
+++ b/tests/helpers/test_upload_sender.py
@@ -1,5 +1,5 @@
import json
-import uuid
+import re
from pathlib import Path
import pytest
@@ -13,9 +13,26 @@
from tests.data import reports_examples
upload_collection = UploadCollectionResult(["1", "apple.py", "3"], [], [])
-random_token = uuid.UUID("f359afb9-8a2a-42ab-a448-c3d267ff495b")
+random_token = "f359afb9-8a2a-42ab-a448-c3d267ff495b"
random_sha = "845548c6b95223f12e8317a1820705f64beaf69e"
named_upload_data = {
+ "args": None,
+ "upload_file_type": "coverage",
+ "report_code": "report_code",
+ "env_vars": {},
+ "name": "name",
+ "branch": "branch",
+ "slug": "org/repo",
+ "pull_request_number": "pr",
+ "build_code": "build_code",
+ "build_url": "build_url",
+ "job_code": "job_code",
+ "flags": "flags",
+ "ci_service": "ci_service",
+ "git_service": "github",
+}
+test_results_named_upload_data = {
+ "upload_file_type": "test_results",
"report_code": "report_code",
"env_vars": {},
"name": "name",
@@ -30,7 +47,9 @@
"git_service": "github",
}
request_data = {
+ "ci_service": "ci_service",
"ci_url": "build_url",
+ "cli_args": None,
"env": {},
"flags": "flags",
"job_code": "job_code",
@@ -50,7 +69,7 @@ def mocked_legacy_upload_endpoint(mocked_responses):
encoded_slug = encode_slug(named_upload_data["slug"])
resp = responses.Response(
responses.POST,
- f"https://api.codecov.io/upload/github/{encoded_slug}/commits/{random_sha}/reports/{named_upload_data['report_code']}/uploads",
+ f"https://ingest.codecov.io/upload/github/{encoded_slug}/commits/{random_sha}/reports/{named_upload_data['report_code']}/uploads",
status=200,
json={
"raw_upload_location": "https://puturl.com",
@@ -61,6 +80,36 @@ def mocked_legacy_upload_endpoint(mocked_responses):
yield resp
+@pytest.fixture
+def mocked_upload_coverage_endpoint(mocked_responses):
+ encoded_slug = encode_slug(named_upload_data["slug"])
+ resp = responses.Response(
+ responses.POST,
+ f"https://ingest.codecov.io/upload/github/{encoded_slug}/upload-coverage",
+ status=200,
+ json={
+ "raw_upload_location": "https://puturl.com",
+ "url": "https://app.codecov.io/commit-url",
+ },
+ )
+ mocked_responses.add(resp)
+ yield resp
+
+
+@pytest.fixture
+def mocked_test_results_endpoint(mocked_responses):
+ resp = responses.Response(
+ responses.POST,
+ "https://ingest.codecov.io/upload/test_results/v1",
+ status=200,
+ json={
+ "raw_upload_location": "https://puturl.com",
+ },
+ )
+ mocked_responses.add(resp)
+ yield resp
+
+
@pytest.fixture
def mocked_storage_server(mocked_responses):
resp = responses.Response(responses.PUT, "https://puturl.com", status=200)
@@ -133,7 +182,7 @@ class TestUploadSender(object):
def test_upload_sender_post_called_with_right_parameters(
self, mocked_responses, mocked_legacy_upload_endpoint, mocked_storage_server
):
- headers = {"Authorization": f"token {random_token.hex}"}
+ headers = {"Authorization": f"token {random_token}"}
mocked_legacy_upload_endpoint.match = [
matchers.json_params_matcher(request_data),
@@ -154,7 +203,99 @@ def test_upload_sender_post_called_with_right_parameters(
assert response.get("url") == "https://app.codecov.io/commit-url"
assert (
post_req_made.url
- == f"https://api.codecov.io/upload/github/{encoded_slug}/commits/{random_sha}/reports/{named_upload_data['report_code']}/uploads"
+ == f"https://ingest.codecov.io/upload/github/{encoded_slug}/commits/{random_sha}/reports/{named_upload_data['report_code']}/uploads"
+ )
+ assert (
+ post_req_made.headers.items() >= headers.items()
+ ) # test dict is a subset of the other
+
+ def test_upload_sender_post_called_with_right_parameters_and_upload_coverage(
+ self, mocked_responses, mocked_upload_coverage_endpoint, mocked_storage_server
+ ):
+ headers = {"Authorization": f"token {random_token}"}
+
+ sending_result = UploadSender().send_upload_data(
+ upload_collection, random_sha, random_token, upload_coverage=True, **named_upload_data
+ )
+ assert sending_result.error is None
+ assert sending_result.warnings == []
+
+ assert len(mocked_responses.calls) == 2
+
+ post_req_made = mocked_responses.calls[0].request
+ encoded_slug = encode_slug(named_upload_data["slug"])
+ response = json.loads(mocked_responses.calls[0].response.text)
+ assert response.get("url") == "https://app.codecov.io/commit-url"
+ assert (
+ post_req_made.url
+ == f"https://ingest.codecov.io/upload/github/{encoded_slug}/upload-coverage"
+ )
+ assert (
+ post_req_made.headers.items() >= headers.items()
+ ) # test dict is a subset of the other
+
+ def test_upload_sender_post_called_with_right_parameters_test_results(
+ self, mocked_responses, mocked_test_results_endpoint, mocked_storage_server
+ ):
+ headers = {"Authorization": f"token {random_token}"}
+
+ mocked_legacy_upload_endpoint.match = [
+ matchers.json_params_matcher(request_data),
+ matchers.header_matcher(headers),
+ ]
+
+ sending_result = UploadSender().send_upload_data(
+ upload_collection,
+ random_sha,
+ random_token,
+ **test_results_named_upload_data,
+ )
+ assert sending_result.error is None
+ assert sending_result.warnings == []
+
+ assert len(mocked_responses.calls) == 2
+
+ post_req_made = mocked_responses.calls[0].request
+ response = json.loads(mocked_responses.calls[0].response.text)
+ assert response.get("raw_upload_location") == "https://puturl.com"
+ assert post_req_made.url == "https://ingest.codecov.io/upload/test_results/v1"
+ assert (
+ post_req_made.headers.items() >= headers.items()
+ ) # test dict is a subset of the other
+
+ put_req_made = mocked_responses.calls[1].request
+ assert put_req_made.url == "https://puturl.com/"
+ assert "test_results_files" in put_req_made.body.decode("utf-8")
+
+ def test_upload_sender_post_called_with_right_parameters_tokenless(
+ self,
+ mocked_responses,
+ mocked_legacy_upload_endpoint,
+ mocked_storage_server,
+ mocker,
+ ):
+ headers = {}
+
+ mocked_legacy_upload_endpoint.match = [
+ matchers.json_params_matcher(request_data),
+ matchers.header_matcher(headers),
+ ]
+
+ sending_result = UploadSender().send_upload_data(
+ upload_collection, random_sha, None, **named_upload_data
+ )
+ assert sending_result.error is None
+ assert sending_result.warnings == []
+
+ assert len(mocked_responses.calls) == 2
+
+ post_req_made = mocked_responses.calls[0].request
+ encoded_slug = encode_slug(named_upload_data["slug"])
+ response = json.loads(mocked_responses.calls[0].response.text)
+ assert response.get("url") == "https://app.codecov.io/commit-url"
+ assert (
+ post_req_made.url
+ == f"https://ingest.codecov.io/upload/github/{encoded_slug}/commits/{random_sha}/reports/{named_upload_data['report_code']}/uploads"
)
assert (
post_req_made.headers.items() >= headers.items()
@@ -201,6 +342,29 @@ def test_upload_sender_result_fail_post_400(
assert sender.warnings is not None
+ @pytest.mark.parametrize("error_code", [500, 502])
+ def test_upload_sender_result_fail_post_500s(
+ self,
+ mocker,
+ mocked_responses,
+ mocked_legacy_upload_endpoint,
+ capsys,
+ error_code,
+ ):
+ mocker.patch("codecov_cli.helpers.request.sleep")
+ mocked_legacy_upload_endpoint.status = error_code
+
+ with pytest.raises(Exception, match="Request failed after too many retries"):
+ _ = UploadSender().send_upload_data(
+ upload_collection, random_sha, random_token, **named_upload_data
+ )
+
+ matcher = re.compile(
+ rf"(warning.*((Response status code was {error_code})|(Request failed\. Retrying)).*(\n)?){{6}}"
+ )
+
+ assert matcher.match(capsys.readouterr().err) is not None
+
def test_upload_sender_result_fail_put_400(
self, mocked_responses, mocked_legacy_upload_endpoint, mocked_storage_server
):
@@ -237,7 +401,7 @@ def test_generate_payload_overall(self, mocked_coverage_file):
get_fake_upload_collection_result(mocked_coverage_file), None
)
expected_report = {
- "path_fixes": {
+ "report_fixes": {
"format": "legacy",
"value": {
"SwiftExample/AppDelegate.swift": {
@@ -304,7 +468,7 @@ def test_generate_empty_payload_overall(self):
UploadCollectionResult([], [], []), None
)
expected_report = {
- "path_fixes": {
+ "report_fixes": {
"format": "legacy",
"value": {},
},
@@ -329,9 +493,7 @@ def test_coverage_file_format(self, mocker, mocked_coverage_file):
"codecov_cli.services.upload.upload_sender.UploadSender._get_format_info",
return_value=("base64+compressed", "encoded_file_data"),
)
- json_formatted_coverage_file = UploadSender()._format_coverage_file(
- mocked_coverage_file
- )
+ json_formatted_coverage_file = UploadSender()._format_file(mocked_coverage_file)
print(json_formatted_coverage_file["data"])
assert json_formatted_coverage_file == {
"filename": mocked_coverage_file.get_filename().decode(),
diff --git a/tests/helpers/test_versioning_systems.py b/tests/helpers/test_versioning_systems.py
index 70e0ad20..52d13793 100644
--- a/tests/helpers/test_versioning_systems.py
+++ b/tests/helpers/test_versioning_systems.py
@@ -8,17 +8,28 @@
class TestGitVersioningSystem(object):
@pytest.mark.parametrize(
- "commit_sha,expected", [("", None), (b" random_sha ", "random_sha")]
+ "runs_output,expected",
+ [
+ # No output for parents nor commit
+ ([b"", b""], None),
+ # No output for parents, commit has SHA
+ ([b"", b" random_sha"], "random_sha"),
+ # Commit is NOT a merge-commit
+ ([b" parent_sha", b" random_sha "], "random_sha"),
+ # Commit IS a merge-commit
+ ([b" parent_sha0\nparent_sha1", b" random_sha"], "parent_sha1"),
+ ],
)
- def test_commit_sha(self, mocker, commit_sha, expected):
- mocked_subprocess = MagicMock()
+ def test_commit_sha(self, mocker, runs_output, expected):
+ mocked_subprocess = [
+ MagicMock(**{"stdout": runs_output[0]}),
+ MagicMock(**{"stdout": runs_output[1]}),
+ ]
mocker.patch(
"codecov_cli.helpers.versioning_systems.subprocess.run",
- return_value=mocked_subprocess,
+ side_effect=mocked_subprocess,
)
- mocked_subprocess.stdout = commit_sha
-
assert (
GitVersioningSystem().get_fallback_value(FallbackFieldEnum.commit_sha)
== expected
@@ -95,7 +106,7 @@ def test_list_relevant_files_returns_correct_network_files(self, mocker, tmp_pat
return_value=mocked_subprocess,
)
# git ls-files diplays a single \n as \\\\n
- mocked_subprocess.stdout = b'a.txt\nb.txt\n"a\\\\nb.txt"\nc.txt\nd.txt'
+ mocked_subprocess.stdout = b'a.txt\nb.txt\n"a\\\\nb.txt"\nc.txt\nd.txt\n.circleci/config.yml\nLICENSE\napp/advanced calculations/advanced_calculator.js\n'
vs = GitVersioningSystem()
@@ -105,6 +116,9 @@ def test_list_relevant_files_returns_correct_network_files(self, mocker, tmp_pat
"a\\nb.txt",
"c.txt",
"d.txt",
+ ".circleci/config.yml",
+ "LICENSE",
+ "app/advanced calculations/advanced_calculator.js",
]
def test_list_relevant_files_fails_if_no_root_is_found(self, mocker):
@@ -114,5 +128,5 @@ def test_list_relevant_files_fails_if_no_root_is_found(self, mocker):
)
vs = GitVersioningSystem()
- with pytest.raises(ValueError) as ex:
+ with pytest.raises(ValueError):
vs.list_relevant_files()
diff --git a/tests/plugins/test_compress_pycoverage_contexts.py b/tests/plugins/test_compress_pycoverage_contexts.py
index 1dc8fc86..fe89a99f 100644
--- a/tests/plugins/test_compress_pycoverage_contexts.py
+++ b/tests/plugins/test_compress_pycoverage_contexts.py
@@ -172,7 +172,7 @@ class TestCompressPycoverageContexts(object):
def test_default_options(self):
plugin = CompressPycoverageContexts()
assert plugin.config.file_to_compress == pathlib.Path("coverage.json")
- assert plugin.config.delete_uncompressed == True
+ assert plugin.config.delete_uncompressed
assert plugin.file_to_compress == pathlib.Path("coverage.json")
assert plugin.file_to_write == pathlib.Path("coverage.codecov.json")
@@ -183,7 +183,7 @@ def test_change_options(self):
}
plugin = CompressPycoverageContexts(config)
assert plugin.config.file_to_compress == pathlib.Path("label.coverage.json")
- assert plugin.config.delete_uncompressed == False
+ assert not plugin.config.delete_uncompressed
assert plugin.file_to_compress == pathlib.Path("label.coverage.json")
assert plugin.file_to_write == pathlib.Path("label.coverage.codecov.json")
@@ -192,7 +192,7 @@ def test_run_preparation_fail_fast_no_file(self):
res = plugin.run_preparation(None)
assert res == PreparationPluginReturn(
success=False,
- messages=[f"File to compress coverage.json not found."],
+ messages=["File to compress coverage.json not found."],
)
def test_run_preparation_fail_fast_path_not_file(self, tmp_path):
diff --git a/tests/plugins/test_instantiation.py b/tests/plugins/test_instantiation.py
index aa2a3a7d..fdf3a842 100644
--- a/tests/plugins/test_instantiation.py
+++ b/tests/plugins/test_instantiation.py
@@ -106,35 +106,50 @@ def __init__(self):
def test_get_plugin_gcov():
- res = _get_plugin({}, "gcov")
+ res = _get_plugin({}, "gcov", {})
+ assert isinstance(res, GcovPlugin)
+
+ res = _get_plugin(
+ {},
+ "gcov",
+ {
+ "gcov_executable": "lcov",
+ },
+ )
assert isinstance(res, GcovPlugin)
def test_get_plugin_xcode():
- res = _get_plugin({}, "xcode")
+ res = _get_plugin({}, "xcode", {})
assert isinstance(res, XcodePlugin)
+def test_get_plugin_noop():
+ res = _get_plugin({}, "noop", {})
+ assert isinstance(res, NoopPlugin)
+
+
def test_get_plugin_pycoverage():
- res = _get_plugin({}, "pycoverage")
+ res = _get_plugin({}, "pycoverage", {})
assert isinstance(res, Pycoverage)
assert res.config == PycoverageConfig()
assert res.config.report_type == "xml"
pycoverage_config = {"project_root": "project/root", "report_type": "json"}
- res = _get_plugin({"plugins": {"pycoverage": pycoverage_config}}, "pycoverage")
+ res = _get_plugin({"plugins": {"pycoverage": pycoverage_config}}, "pycoverage", {})
assert isinstance(res, Pycoverage)
assert res.config == PycoverageConfig(pycoverage_config)
assert res.config.report_type == "json"
def test_get_plugin_compress_pycoverage():
- res = _get_plugin({}, "compress-pycoverage")
+ res = _get_plugin({}, "compress-pycoverage", {})
assert isinstance(res, CompressPycoverageContexts)
res = _get_plugin(
{"plugins": {"compress-pycoverage": {"file_to_compress": "something.json"}}},
"compress-pycoverage",
+ {},
)
assert isinstance(res, CompressPycoverageContexts)
assert str(res.file_to_compress) == "something.json"
@@ -175,6 +190,7 @@ def __init__(self, banana=None):
}
},
["gcov", "something", "otherthing", "second", "lalalala"],
+ {},
)
assert len(res) == 5
assert isinstance(res[0], GcovPlugin)
diff --git a/tests/runners/test_pytest_standard_runner.py b/tests/runners/test_pytest_standard_runner.py
index 2ac6780a..489a0e0c 100644
--- a/tests/runners/test_pytest_standard_runner.py
+++ b/tests/runners/test_pytest_standard_runner.py
@@ -5,7 +5,10 @@
import pytest
from pytest import ExitCode
-from codecov_cli.runners.pytest_standard_runner import PytestStandardRunner
+from codecov_cli.runners.pytest_standard_runner import (
+ PytestStandardRunner,
+ PytestStandardRunnerConfigParams,
+)
from codecov_cli.runners.pytest_standard_runner import logger as runner_logger
from codecov_cli.runners.pytest_standard_runner import stdout as pyrunner_stdout
from codecov_cli.runners.types import LabelAnalysisRequestResult
@@ -39,6 +42,48 @@ def test_execute_pytest(self, mock_subprocess):
)
assert result == output
+ @patch("codecov_cli.runners.pytest_standard_runner.logger.warning")
+ def test_warning_bad_config(self, mock_warning):
+ available_config = PytestStandardRunnerConfigParams.get_available_params()
+ assert "python_path" in available_config
+ assert "collect_tests_options" in available_config
+ assert "some_missing_option" not in available_config
+ params = dict(
+ python_path="path_to_python",
+ collect_tests_options=["option1", "option2"],
+ some_missing_option="option",
+ )
+ runner = PytestStandardRunner(params)
+ # Adding invalid config options emits a warning
+ mock_warning.assert_called_with(
+ "Config parameter 'some_missing_option' is unknonw."
+ )
+ # Warnings don't change the config
+ assert runner.params == {**params, "some_missing_option": "option"}
+ # And we can still access the config as usual
+ assert runner.params.python_path == "path_to_python"
+ assert runner.params.collect_tests_options == ["option1", "option2"]
+
+ @pytest.mark.parametrize("python_path", ["/usr/bin/python", "venv/bin/python"])
+ @patch("codecov_cli.runners.pytest_standard_runner.subprocess")
+ def test_execute_pytest_user_provided_python_path(
+ self, mock_subprocess, python_path
+ ):
+ output = "Output in stdout"
+ return_value = MagicMock(stdout=output.encode("utf-8"))
+ mock_subprocess.run.return_value = return_value
+
+ runner = PytestStandardRunner(dict(python_path=python_path))
+
+ result = runner._execute_pytest(["--option", "--ignore=batata"])
+ mock_subprocess.run.assert_called_with(
+ [python_path, "-m", "pytest", "--option", "--ignore=batata"],
+ capture_output=True,
+ check=True,
+ stdout=None,
+ )
+ assert result == output
+
@patch("codecov_cli.runners.pytest_standard_runner.subprocess")
def test_execute_pytest_fail_collection(self, mock_subprocess):
def side_effect(command, *args, **kwargs):
diff --git a/tests/runners/test_runners.py b/tests/runners/test_runners.py
index 5e869bf6..8a813d7e 100644
--- a/tests/runners/test_runners.py
+++ b/tests/runners/test_runners.py
@@ -14,7 +14,7 @@ def test_get_standard_runners(self):
assert isinstance(get_runner({}, "dan"), DoAnythingNowRunner)
# TODO: Extend with other standard runners once we create them (e.g. JS)
- def test_pytest_standard_runner_with_options_backwards_compatible(self):
+ def test_pytest_standard_runner_with_options(self):
config_params = dict(
collect_tests_options=["--option=value", "-option"],
)
@@ -26,6 +26,23 @@ def test_pytest_standard_runner_with_options_backwards_compatible(self):
)
assert runner_instance.params.coverage_root == "./"
+ def test_pytest_standard_runner_with_options_and_dynamic_options(self):
+ config_params = dict(
+ collect_tests_options=["--option=value", "-option"],
+ )
+ runner_instance = get_runner(
+ {"runners": {"pytest": config_params}},
+ "pytest",
+ {"python_path": "path/to/python"},
+ )
+ assert isinstance(runner_instance, PytestStandardRunner)
+ assert (
+ runner_instance.params.collect_tests_options
+ == config_params["collect_tests_options"]
+ )
+ assert runner_instance.params.python_path == "path/to/python"
+ assert runner_instance.params.coverage_root == "./"
+
def test_pytest_standard_runner_with_options_backwards_compatible(self):
config_params = dict(
collect_tests_options=["--option=value", "-option"],
@@ -56,7 +73,9 @@ def test_get_runner_from_yaml(self, mock_load_runner):
config = {"runners": {"my_runner": {"path": "path_to_my_runner"}}}
mock_load_runner.return_value = "MyRunner()"
assert get_runner(config, "my_runner") == "MyRunner()"
- mock_load_runner.assert_called_with({"path": "path_to_my_runner"})
+ mock_load_runner.assert_called_with(
+ {"path": "path_to_my_runner"}, dynamic_params={}
+ )
def test_load_runner_from_yaml(self, mocker):
fake_module = mocker.MagicMock(FakeRunner=FakeRunner)
@@ -66,7 +85,8 @@ def test_load_runner_from_yaml(self, mocker):
"module": "mymodule.runner",
"class": "FakeRunner",
"params": {"collect_tests_response": ["list", "of", "labels"]},
- }
+ },
+ {},
)
assert isinstance(res, FakeRunner)
assert res.collect_tests() == ["list", "of", "labels"]
@@ -83,7 +103,8 @@ def side_effect(*args, **kwargs):
"module": "mymodule.runner",
"class": "FakeRunner",
"params": {"collect_tests_response": ["list", "of", "labels"]},
- }
+ },
+ {},
)
def test_load_runner_from_yaml_class_not_found(self, mocker):
@@ -97,7 +118,8 @@ def test_load_runner_from_yaml_class_not_found(self, mocker):
"module": "mymodule.runner",
"class": "WrongClassName",
"params": {"collect_tests_response": ["list", "of", "labels"]},
- }
+ },
+ {},
)
def test_load_runner_from_yaml_fail_instantiate_class(self, mocker):
@@ -109,5 +131,6 @@ def test_load_runner_from_yaml_fail_instantiate_class(self, mocker):
"module": "mymodule.runner",
"class": "FakeRunner",
"params": {"wrong_params": ["list", "of", "labels"]},
- }
+ },
+ {},
)
diff --git a/tests/services/commit/test_base_picking.py b/tests/services/commit/test_base_picking.py
index 9b560a51..2c295fe2 100644
--- a/tests/services/commit/test_base_picking.py
+++ b/tests/services/commit/test_base_picking.py
@@ -139,3 +139,27 @@ def test_base_picking_command_error(mocker):
"error",
"Base picking failed: Unauthorized",
) in parse_outstreams_into_log_lines(result.output)
+
+
+def test_base_picking_no_token(mocker):
+ mocked_response = mocker.patch(
+ "codecov_cli.services.commit.base_picking.send_put_request",
+ return_value=RequestResult(status_code=200, error=None, warnings=[], text=""),
+ )
+ runner = CliRunner()
+ result = runner.invoke(
+ pr_base_picking,
+ [
+ "--pr",
+ "11",
+ "--base-sha",
+ "9a6902ee94c18e8e27561ce316b16d75a02c7bc1",
+ "--service",
+ "github",
+ "--slug",
+ "owner/repo",
+ ],
+ obj=mocker.MagicMock(), # context object
+ )
+ assert result.exit_code == 0
+ mocked_response.assert_called_once()
diff --git a/tests/services/commit/test_commit_service.py b/tests/services/commit/test_commit_service.py
index 082b2ab8..362cb48e 100644
--- a/tests/services/commit/test_commit_service.py
+++ b/tests/services/commit/test_commit_service.py
@@ -45,6 +45,7 @@ def test_commit_command_with_warnings(mocker):
token="token",
service="service",
enterprise_url=None,
+ args=None,
)
@@ -73,6 +74,7 @@ def test_commit_command_with_error(mocker):
token="token",
service="service",
enterprise_url=None,
+ args={},
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
@@ -93,6 +95,7 @@ def test_commit_command_with_error(mocker):
token="token",
service="service",
enterprise_url=None,
+ args={},
)
@@ -103,7 +106,15 @@ def test_commit_sender_200(mocker):
)
token = uuid.uuid4()
res = send_commit_data(
- "commit_sha", "parent_sha", "pr", "branch", "slug", token, "service", None
+ "commit_sha",
+ "parent_sha",
+ "pr",
+ "branch",
+ "owner::::repo",
+ token,
+ "service",
+ None,
+ None,
)
assert res.error is None
assert res.warnings == []
@@ -117,7 +128,15 @@ def test_commit_sender_403(mocker):
)
token = uuid.uuid4()
res = send_commit_data(
- "commit_sha", "parent_sha", "pr", "branch", "slug", token, "service", None
+ "commit_sha",
+ "parent_sha",
+ "pr",
+ "branch",
+ "owner::::repo",
+ token,
+ "service",
+ None,
+ None,
)
assert res.error == RequestError(
code="HTTP Error 403",
@@ -125,3 +144,94 @@ def test_commit_sender_403(mocker):
params={},
)
mocked_response.assert_called_once()
+
+
+def test_commit_sender_with_forked_repo(mocker):
+ mocked_response = mocker.patch(
+ "codecov_cli.services.commit.send_post_request",
+ return_value=mocker.MagicMock(status_code=200, text="success"),
+ )
+
+ _ = send_commit_data(
+ "commit_sha",
+ "parent_sha",
+ "1",
+ "user_forked_repo/codecov-cli:branch",
+ "codecov::::codecov-cli",
+ None,
+ "github",
+ None,
+ None,
+ )
+ mocked_response.assert_called_with(
+ url="https://ingest.codecov.io/upload/github/codecov::::codecov-cli/commits",
+ data={
+ "branch": "user_forked_repo/codecov-cli:branch",
+ "cli_args": None,
+ "commitid": "commit_sha",
+ "parent_commit_id": "parent_sha",
+ "pullid": "1",
+ },
+ headers=None,
+ )
+
+
+def test_commit_without_token(mocker):
+ mocked_response = mocker.patch(
+ "codecov_cli.services.commit.send_post_request",
+ return_value=mocker.MagicMock(status_code=200, text="success"),
+ )
+
+ send_commit_data(
+ "commit_sha",
+ "parent_sha",
+ "1",
+ "branch",
+ "codecov::::codecov-cli",
+ None,
+ "github",
+ None,
+ None,
+ )
+ mocked_response.assert_called_with(
+ url="https://ingest.codecov.io/upload/github/codecov::::codecov-cli/commits",
+ data={
+ "branch": "branch",
+ "cli_args": None,
+ "commitid": "commit_sha",
+ "parent_commit_id": "parent_sha",
+ "pullid": "1",
+ },
+ headers=None,
+ )
+
+
+def test_commit_sender_with_forked_repo_bad_branch(mocker):
+ mocked_response = mocker.patch(
+ "codecov_cli.services.commit.send_post_request",
+ return_value=mocker.MagicMock(status_code=200, text="success"),
+ )
+ mocker.patch("os.environ", dict(TOKENLESS="user_forked_repo/codecov-cli:branch"))
+ _res = send_commit_data(
+ "commit_sha",
+ "parent_sha",
+ "1",
+ "branch",
+ "codecov::::codecov-cli",
+ None,
+ "github",
+ None,
+ None,
+ )
+
+ mocked_response.assert_called_with(
+ url="https://ingest.codecov.io/upload/github/codecov::::codecov-cli/commits",
+ data={
+ "branch": "user_forked_repo/codecov-cli:branch",
+ "cli_args": None,
+ "commitid": "commit_sha",
+ "parent_commit_id": "parent_sha",
+ "pullid": "1",
+ },
+ headers=None,
+ )
diff --git a/tests/services/empty_upload/test_empty_upload.py b/tests/services/empty_upload/test_empty_upload.py
index 7ad75933..1e354d50 100644
--- a/tests/services/empty_upload/test_empty_upload.py
+++ b/tests/services/empty_upload/test_empty_upload.py
@@ -1,6 +1,8 @@
import json
import uuid
+import click
+import pytest
from click.testing import CliRunner
from codecov_cli.services.empty_upload import empty_upload_logic
@@ -21,7 +23,14 @@ def test_empty_upload_with_warnings(mocker):
runner = CliRunner()
with runner.isolation() as outstreams:
res = empty_upload_logic(
- "commit_sha", "owner/repo", uuid.uuid4(), "service", None, False
+ "commit_sha",
+ "owner/repo",
+ uuid.uuid4(),
+ "service",
+ None,
+ False,
+ False,
+ None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
assert out_bytes == [
@@ -50,7 +59,14 @@ def test_empty_upload_with_error(mocker):
runner = CliRunner()
with runner.isolation() as outstreams:
res = empty_upload_logic(
- "commit_sha", "owner/repo", uuid.uuid4(), "service", None, False
+ "commit_sha",
+ "owner/repo",
+ uuid.uuid4(),
+ "service",
+ None,
+ False,
+ False,
+ None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
@@ -77,7 +93,7 @@ def test_empty_upload_200(mocker):
runner = CliRunner()
with runner.isolation() as outstreams:
res = empty_upload_logic(
- "commit_sha", "owner/repo", token, "service", None, False
+ "commit_sha", "owner/repo", token, "service", None, False, False, None
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
assert out_bytes == [
@@ -96,10 +112,68 @@ def test_empty_upload_403(mocker):
return_value=mocker.MagicMock(status_code=403, text="Permission denied"),
)
token = uuid.uuid4()
- res = empty_upload_logic("commit_sha", "owner/repo", token, "service", None, False)
+ res = empty_upload_logic(
+ "commit_sha", "owner/repo", token, "service", None, False, False, None
+ )
assert res.error == RequestError(
code="HTTP Error 403",
description="Permission denied",
params={},
)
mocked_response.assert_called_once()
+
+
+def test_empty_upload_force(mocker):
+ res = {
+ "result": "Force option was enabled. Triggering passing notifications.",
+ "non_ignored_files": [],
+ }
+ mocked_response = mocker.patch(
+ "codecov_cli.helpers.request.requests.post",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text=json.dumps(res)
+ ),
+ )
+ token = uuid.uuid4()
+ runner = CliRunner()
+ with runner.isolation() as outstreams:
+ res = empty_upload_logic(
+ "commit_sha", "owner/repo", token, "service", None, False, True, None
+ )
+ out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
+ assert out_bytes == [
+ ("info", "Process Empty Upload complete"),
+ ("info", "Force option was enabled. Triggering passing notifications."),
+ ("info", "Non ignored files []"),
+ ]
+ assert res.error is None
+ assert res.warnings == []
+ mocked_response.assert_called_once()
+
+
+def test_empty_upload_no_token(mocker):
+ res = {
+ "result": "All changed files are ignored. Triggering passing notifications.",
+ "non_ignored_files": [],
+ }
+ mocked_response = mocker.patch(
+ "codecov_cli.helpers.request.requests.post",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text=json.dumps(res)
+ ),
+ )
+ runner = CliRunner()
+ with runner.isolation() as outstreams:
+ res = empty_upload_logic(
+ "commit_sha", "owner/repo", None, "service", None, False, False, None
+ )
+
+ out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
+ assert out_bytes == [
+ ("info", "Process Empty Upload complete"),
+ ("info", "All changed files are ignored. Triggering passing notifications."),
+ ("info", "Non ignored files []"),
+ ]
+ assert res.error is None
+ assert res.warnings == []
+ mocked_response.assert_called_once()
diff --git a/tests/services/report/test_report_results.py b/tests/services/report/test_report_results.py
index 3b22cc0d..f713ce6b 100644
--- a/tests/services/report/test_report_results.py
+++ b/tests/services/report/test_report_results.py
@@ -31,6 +31,7 @@ def test_report_results_command_with_warnings(mocker):
slug="owner/repo",
token="token",
enterprise_url=None,
+ args=None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
@@ -42,6 +43,7 @@ def test_report_results_command_with_warnings(mocker):
assert res == mock_send_reports_result_request.return_value
mock_send_reports_result_request.assert_called_with(
+ args=None,
commit_sha="commit_sha",
report_code="code",
service="service",
@@ -74,6 +76,7 @@ def test_report_results_command_with_error(mocker):
slug="owner/repo",
token="token",
enterprise_url=None,
+ args=None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
@@ -83,6 +86,7 @@ def test_report_results_command_with_error(mocker):
]
assert res == mock_send_reports_result_request.return_value
mock_send_reports_result_request.assert_called_with(
+ args=None,
commit_sha="commit_sha",
report_code="code",
service="service",
@@ -99,7 +103,20 @@ def test_report_results_request_200(mocker):
)
token = uuid.uuid4()
res = send_reports_result_request(
- "commit_sha", "report_code", "encoded_slug", "service", token, None
+ "commit_sha", "report_code", "encoded_slug", "service", token, None, None
+ )
+ assert res.error is None
+ assert res.warnings == []
+ mocked_response.assert_called_once()
+
+
+def test_report_results_request_no_token(mocker):
+ mocked_response = mocker.patch(
+ "codecov_cli.helpers.request.requests.post",
+ return_value=mocker.MagicMock(status_code=200),
+ )
+ res = send_reports_result_request(
+ "commit_sha", "report_code", "encoded_slug", "service", None, None, None
)
assert res.error is None
assert res.warnings == []
@@ -113,7 +130,7 @@ def test_report_results_403(mocker):
)
token = uuid.uuid4()
res = send_reports_result_request(
- "commit_sha", "report_code", "encoded_slug", "service", token, None
+ "commit_sha", "report_code", "encoded_slug", "service", token, None, None
)
assert res.error == RequestError(
code="HTTP Error 403",
@@ -125,7 +142,7 @@ def test_report_results_403(mocker):
def test_get_report_results_200_completed(mocker, capsys):
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.get",
+ "codecov_cli.helpers.request.requests.get",
return_value=mocker.MagicMock(
status_code=200,
text='{"state": "completed", "result": {"state": "failure","message": "33.33% of diff hit (target 77.77%)"}}',
@@ -145,11 +162,27 @@ def test_get_report_results_200_completed(mocker, capsys):
) in output
+def test_get_report_results_no_token(mocker, capsys):
+ mocked_response = mocker.patch(
+ "codecov_cli.helpers.request.requests.get",
+ return_value=mocker.MagicMock(
+ status_code=200,
+ text='{"state": "completed", "result": {"state": "failure","message": "33.33% of diff hit (target 77.77%)"}}',
+ ),
+ )
+ res = send_reports_result_get_request(
+ "commit_sha", "report_code", "encoded_slug", "service", None, None
+ )
+ assert res.error is None
+ assert res.warnings == []
+ mocked_response.assert_called_once()
+
+
@patch("codecov_cli.services.report.MAX_NUMBER_TRIES", 1)
def test_get_report_results_200_pending(mocker, capsys):
mocker.patch("codecov_cli.services.report.time.sleep")
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.get",
+ "codecov_cli.helpers.request.requests.get",
return_value=mocker.MagicMock(
status_code=200, text='{"state": "pending", "result": {}}'
),
@@ -167,7 +200,7 @@ def test_get_report_results_200_pending(mocker, capsys):
def test_get_report_results_200_error(mocker, capsys):
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.get",
+ "codecov_cli.helpers.request.requests.get",
return_value=mocker.MagicMock(
status_code=200, text='{"state": "error", "result": {}}'
),
@@ -188,7 +221,7 @@ def test_get_report_results_200_error(mocker, capsys):
def test_get_report_results_200_undefined_state(mocker, capsys):
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.get",
+ "codecov_cli.helpers.request.requests.get",
return_value=mocker.MagicMock(
status_code=200, text='{"state": "undefined_state", "result": {}}'
),
@@ -206,7 +239,7 @@ def test_get_report_results_200_undefined_state(mocker, capsys):
def test_get_report_results_401(mocker, capsys):
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.get",
+ "codecov_cli.helpers.request.requests.get",
return_value=mocker.MagicMock(
status_code=401, text='{"detail": "Invalid token."}'
),
diff --git a/tests/services/report/test_report_service.py b/tests/services/report/test_report_service.py
index 2a4dc2e8..153e803a 100644
--- a/tests/services/report/test_report_service.py
+++ b/tests/services/report/test_report_service.py
@@ -9,11 +9,38 @@
def test_send_create_report_request_200(mocker):
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.post",
+ "codecov_cli.helpers.request.requests.post",
return_value=mocker.MagicMock(status_code=200),
)
res = send_create_report_request(
- "commit_sha", "code", "github", uuid.uuid4(), "slug", "enterprise_url"
+ "commit_sha",
+ "code",
+ "github",
+ uuid.uuid4(),
+ "owner::::repo",
+ "enterprise_url",
+ 1,
+ None,
+ )
+ assert res.error is None
+ assert res.warnings == []
+ mocked_response.assert_called_once()
+
+
+def test_send_create_report_request_no_token(mocker):
+ mocked_response = mocker.patch(
+ "codecov_cli.helpers.request.requests.post",
+ return_value=mocker.MagicMock(status_code=200),
+ )
+ res = send_create_report_request(
+ "commit_sha",
+ "code",
+ "github",
+ None,
+ "owner::::repo",
+ "enterprise_url",
+ 1,
+ None,
)
assert res.error is None
assert res.warnings == []
@@ -22,11 +49,11 @@ def test_send_create_report_request_200(mocker):
def test_send_create_report_request_403(mocker):
mocked_response = mocker.patch(
- "codecov_cli.services.report.requests.post",
+ "codecov_cli.helpers.request.requests.post",
return_value=mocker.MagicMock(status_code=403, text="Permission denied"),
)
res = send_create_report_request(
- "commit_sha", "code", "github", uuid.uuid4(), "slug", None
+ "commit_sha", "code", "github", uuid.uuid4(), "owner::::repo", None, 1, None
)
assert res.error == RequestError(
code="HTTP Error 403",
@@ -55,6 +82,7 @@ def test_create_report_command_with_warnings(mocker):
service="github",
token="token",
enterprise_url=None,
+ pull_request_number=1,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
@@ -70,7 +98,7 @@ def test_create_report_command_with_warnings(mocker):
text="",
)
mocked_send_request.assert_called_with(
- "commit_sha", "code", "github", "token", "owner::::repo", None
+ "commit_sha", "code", "github", "token", "owner::::repo", None, 1, None
)
@@ -96,6 +124,7 @@ def test_create_report_command_with_error(mocker):
slug="owner/repo",
service="github",
token="token",
+ pull_request_number=1,
enterprise_url="enterprise_url",
)
@@ -115,5 +144,12 @@ def test_create_report_command_with_error(mocker):
warnings=[],
)
mock_send_report_data.assert_called_with(
- "commit_sha", "code", "github", "token", "owner::::repo", "enterprise_url"
+ "commit_sha",
+ "code",
+ "github",
+ "token",
+ "owner::::repo",
+ "enterprise_url",
+ 1,
+ None,
)
diff --git a/tests/services/static_analysis/test_analyse_file.py b/tests/services/static_analysis/test_analyse_file.py
index 2b28397e..43269b16 100644
--- a/tests/services/static_analysis/test_analyse_file.py
+++ b/tests/services/static_analysis/test_analyse_file.py
@@ -47,12 +47,12 @@ def test_sample_analysis(input_filename, output_filename):
@patch("builtins.open")
@patch("codecov_cli.services.staticanalysis.get_best_analyzer", return_value=None)
-def test_analyse_file_no_analyser(mock_get_analyser, mock_open):
- fake_contents = MagicMock()
+def test_analyse_file_no_analyzer(mock_get_analyzer, mock_open):
+ fake_contents = MagicMock(name="fake_file_contents")
file_name = MagicMock(actual_filepath="filepath")
- mock_open.return_value.read.return_value = fake_contents
+ mock_open.return_value.__enter__.return_value.read.return_value = fake_contents
config = {}
res = analyze_file(config, file_name)
- assert res == None
- assert mock_open.called_with("filepath", "rb")
- assert mock_get_analyser.called_with(file_name, fake_contents)
+ assert res is None
+ mock_open.assert_called_with("filepath", "rb")
+ mock_get_analyzer.assert_called_with(file_name, fake_contents)
diff --git a/tests/services/static_analysis/test_static_analysis_service.py b/tests/services/static_analysis/test_static_analysis_service.py
index 08876634..635eecf9 100644
--- a/tests/services/static_analysis/test_static_analysis_service.py
+++ b/tests/services/static_analysis/test_static_analysis_service.py
@@ -32,9 +32,7 @@ async def test_process_files_with_error(self, mocker):
],
)
)
- mock_get_context = mocker.patch(
- "codecov_cli.services.staticanalysis.get_context"
- )
+ mock_pool = mocker.patch("codecov_cli.services.staticanalysis.Pool")
def side_effect(config, filename: FileAnalysisRequest):
if filename.result_filename == "correct_file.py":
@@ -59,12 +57,12 @@ def imap_side_effect(mapped_func, files):
results.append(mapped_func(file))
return results
- mock_get_context.return_value.Pool.return_value.__enter__.return_value.imap_unordered.side_effect = (
+ mock_pool.return_value.__enter__.return_value.imap_unordered.side_effect = (
imap_side_effect
)
results = await process_files(files_found, 1, {})
- mock_get_context.return_value.Pool.return_value.__enter__.return_value.imap_unordered.assert_called()
+ mock_pool.return_value.__enter__.return_value.imap_unordered.assert_called()
assert mock_analyze_function.call_count == 2
assert results == dict(
all_data={"correct_file.py": {"hash": "abc123"}},
@@ -138,6 +136,7 @@ async def side_effect(*args, **kwargs):
should_force=False,
folders_to_exclude=[],
enterprise_url=None,
+ args=None,
)
mock_file_finder.assert_called_with({})
mock_file_finder.return_value.find_files.assert_called()
@@ -213,6 +212,7 @@ async def side_effect(client, all_data, el):
should_force=False,
folders_to_exclude=[],
enterprise_url=None,
+ args=None,
)
assert "Unknown error cancelled the upload tasks." in str(exp.value)
mock_file_finder.assert_called_with({})
@@ -386,6 +386,7 @@ async def side_effect(*args, **kwargs):
should_force=False,
folders_to_exclude=[],
enterprise_url=None,
+ args=None,
)
mock_file_finder.assert_called_with({})
mock_file_finder.return_value.find_files.assert_called()
@@ -460,6 +461,7 @@ async def side_effect(*args, **kwargs):
should_force=False,
folders_to_exclude=[],
enterprise_url=None,
+ args=None,
)
mock_file_finder.assert_called_with({})
mock_file_finder.return_value.find_files.assert_called()
@@ -536,6 +538,7 @@ async def side_effect(*args, **kwargs):
should_force=True,
folders_to_exclude=[],
enterprise_url=None,
+ args=None,
)
mock_file_finder.assert_called_with({})
mock_file_finder.return_value.find_files.assert_called()
@@ -607,6 +610,7 @@ async def side_effect(*args, **kwargs):
should_force=False,
folders_to_exclude=[],
enterprise_url=None,
+ args=None,
)
mock_file_finder.assert_called_with({})
mock_file_finder.return_value.find_files.assert_called()
diff --git a/tests/services/upload/test_coverage_file_finder.py b/tests/services/upload/test_coverage_file_finder.py
index dd83a8a4..201701c8 100644
--- a/tests/services/upload/test_coverage_file_finder.py
+++ b/tests/services/upload/test_coverage_file_finder.py
@@ -1,18 +1,19 @@
import tempfile
-import unittest
from pathlib import Path
-from codecov_cli.services.upload.coverage_file_finder import CoverageFileFinder
+import pytest
+
+from codecov_cli.services.upload.file_finder import FileFinder
from codecov_cli.types import UploadCollectionResultFile
class TestCoverageFileFinder(object):
def test_find_coverage_files_mocked_search_files(self, mocker):
mocker.patch(
- "codecov_cli.services.upload.coverage_file_finder.search_files",
+ "codecov_cli.services.upload.file_finder.search_files",
return_value=[],
)
- assert CoverageFileFinder().find_coverage_files() == []
+ assert FileFinder().find_files() == []
coverage_files_paths = [
Path("a/b.txt"),
@@ -20,7 +21,7 @@ def test_find_coverage_files_mocked_search_files(self, mocker):
]
mocker.patch(
- "codecov_cli.services.upload.coverage_file_finder.search_files",
+ "codecov_cli.services.upload.file_finder.search_files",
return_value=coverage_files_paths,
)
@@ -32,7 +33,7 @@ def test_find_coverage_files_mocked_search_files(self, mocker):
expected_paths = sorted([file.get_filename() for file in expected])
actual_paths = sorted(
- [file.get_filename() for file in CoverageFileFinder().find_coverage_files()]
+ [file.get_filename() for file in FileFinder().find_files()]
)
assert expected_paths == actual_paths
@@ -87,157 +88,358 @@ def test_find_coverage_files(self, tmp_path):
expected = {
UploadCollectionResultFile((tmp_path / file)) for file in should_find
}
- actual = set(CoverageFileFinder(tmp_path).find_coverage_files())
+ actual = set(FileFinder(tmp_path).find_files())
assert actual == expected
extra = tmp_path / "sub" / "nosetests.xml"
extra.touch()
- actual = set(CoverageFileFinder(tmp_path).find_coverage_files())
+ actual = set(FileFinder(tmp_path).find_files())
assert actual - expected == {UploadCollectionResultFile(extra)}
+ def test_find_coverage_files_test_results(self, tmp_path):
+ (tmp_path / "sub").mkdir()
+ (tmp_path / "sub" / "subsub").mkdir()
+ (tmp_path / "node_modules").mkdir()
-class TestCoverageFileFinderUserInput(unittest.TestCase):
- def setUp(self):
- self.temp_dir = tempfile.TemporaryDirectory() # Create a temporary directory
- self.project_root = Path(self.temp_dir.name)
- self.folders_to_ignore = []
- self.explicitly_listed_files = [
- self.project_root / "test_file.abc",
- self.project_root / "subdirectory" / "another_file.abc",
- ]
- self.disable_search = False
- self.coverage_file_finder = CoverageFileFinder(
- self.project_root,
- self.folders_to_ignore,
- self.explicitly_listed_files,
- self.disable_search,
- )
+ should_find = ["junit.xml", "abc.junit.xml", "sub/junit.xml"]
- def tearDown(self):
- self.temp_dir.cleanup() # Clean up the temporary directory
+ should_ignore = [
+ "abc.codecov.exe",
+ "sub/abc.codecov.exe",
+ "codecov.exe",
+ "__pycache__",
+ "sub/subsub/__pycache__",
+ ".gitignore",
+ "a.sql",
+ "a.csv",
+ ".abc-coveragerc",
+ ".coverage-xyz",
+ "sub/scoverage.measurements.xyz",
+ "sub/test_abcd_coverage.txt",
+ "test-result-ff-codecoverage.json",
+ "node_modules/abc-coverage.cov",
+ "abc-coverage.cov",
+ "coverage-abc.abc",
+ "sub/coverage-abc.abc",
+ "sub/subsub/coverage-abc.abc",
+ "coverage.abc",
+ "jacocoxyz.xml",
+ "sub/jacocoxyz.xml",
+ "codecov.abc",
+ "sub/subsub/codecov.abc",
+ "xyz.codecov.abc",
+ "sub/xyz.codecov.abc",
+ "sub/subsub/xyz.codecov.abc",
+ "cover.out",
+ "abc.gcov",
+ "sub/abc.gcov",
+ "sub/subsub/abc.gcov",
+ ]
- def test_find_coverage_files_with_existing_files(self):
- # Create some sample coverage files
+ for filename in should_find:
+ (tmp_path / filename).touch()
+
+ for filename in should_ignore:
+ (tmp_path / filename).touch()
+
+ expected = {
+ UploadCollectionResultFile((tmp_path / file)) for file in should_find
+ }
+ actual = set(FileFinder(tmp_path, report_type="test_results").find_files())
+ assert actual == expected
+
+ extra = tmp_path / "sub" / "nosetests.junit.xml"
+ extra.touch()
+ actual = set(FileFinder(tmp_path, report_type="test_results").find_files())
+ assert actual - expected == {UploadCollectionResultFile(extra)}
+
+
+@pytest.fixture()
+def coverage_file_finder_fixture():
+ temp_dir = tempfile.TemporaryDirectory() # Create a temporary directory
+ project_root = Path(temp_dir.name)
+ folders_to_ignore = []
+ explicitly_listed_files = [
+ project_root / "test_file.abc",
+ project_root / "subdirectory" / "another_file.abc",
+ ]
+ disable_search = False
+ coverage_file_finder = FileFinder(
+ project_root,
+ folders_to_ignore,
+ explicitly_listed_files,
+ disable_search,
+ )
+ yield project_root, coverage_file_finder
+ temp_dir.cleanup()
+
+
+class TestCoverageFileFinderUserInput:
+ def test_find_coverage_files_with_existing_files(
+ self, coverage_file_finder_fixture
+ ):
+ # Create some sample coverage coverage_file_finder_fixture
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
coverage_files = [
- self.project_root / "coverage.xml",
- self.project_root / "subdirectory" / "test_coverage.xml",
- self.project_root / "other_file.txt",
+ project_root / "coverage.xml",
+ project_root / "subdirectory" / "test_coverage.xml",
+ project_root / "other_file.txt",
+ project_root / ".tox" / "another_file.abc",
]
- (self.project_root / "subdirectory").mkdir()
+ (project_root / "subdirectory").mkdir()
+ (project_root / ".tox").mkdir()
for file in coverage_files:
file.touch()
result = sorted(
- [
- file.get_filename()
- for file in self.coverage_file_finder.find_coverage_files()
- ]
+ [file.get_filename() for file in coverage_file_finder.find_files()]
)
expected = [
- UploadCollectionResultFile(Path(f"{self.project_root}/coverage.xml")),
+ UploadCollectionResultFile(Path(f"{project_root}/coverage.xml")),
UploadCollectionResultFile(
- Path(f"{self.project_root}/subdirectory/test_coverage.xml")
+ Path(f"{project_root}/subdirectory/test_coverage.xml")
),
]
expected_paths = sorted([file.get_filename() for file in expected])
- self.assertEqual(result, expected_paths)
+ assert result == expected_paths
+
+ def test_find_coverage_files_with_file_in_parent(
+ self, coverage_file_finder_fixture
+ ):
+ # Create some sample coverage coverage_file_finder_fixture
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+ coverage_files = [
+ project_root.parent / "coverage.xml",
+ ]
+ for file in coverage_files:
+ file.touch()
- def test_find_coverage_files_with_no_files(self):
- result = self.coverage_file_finder.find_coverage_files()
- self.assertEqual(result, [])
+ coverage_file_finder.explicitly_listed_files = [
+ project_root.parent / "coverage.xml"
+ ]
- def test_find_coverage_files_with_disabled_search(self):
- # Create some sample coverage files
- print("project root", self.project_root)
+ result = sorted(
+ [file.get_filename() for file in coverage_file_finder.find_files()]
+ )
+ expected = [
+ UploadCollectionResultFile(Path(f"{project_root.parent}/coverage.xml"))
+ ]
+ expected_paths = sorted([file.get_filename() for file in expected])
+ assert result == expected_paths
+
+ def test_find_coverage_files_with_no_files(self, coverage_file_finder_fixture):
+ (
+ _,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+ result = coverage_file_finder.find_files()
+ assert result == []
+
+ def test_find_coverage_files_with_disabled_search(
+ self, coverage_file_finder_fixture
+ ):
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+ # Create some sample coverage coverage_file_finder_fixture
+ print("project root", project_root)
coverage_files = [
- self.project_root / "test_file.abc",
- self.project_root / "subdirectory" / "another_file.abc",
- self.project_root / "subdirectory" / "test_coverage.xml",
- self.project_root / "other_file.txt",
+ project_root / "test_file.abc",
+ project_root / "subdirectory" / "another_file.abc",
+ project_root / "subdirectory" / "test_coverage.xml",
+ project_root / "other_file.txt",
+ project_root / ".tox" / "another_file.abc",
]
- (self.project_root / "subdirectory").mkdir()
+ (project_root / "subdirectory").mkdir()
+ (project_root / ".tox").mkdir()
for file in coverage_files:
file.touch()
# Disable search
- self.coverage_file_finder.disable_search = True
+ coverage_file_finder.disable_search = True
result = sorted(
- [
- file.get_filename()
- for file in self.coverage_file_finder.find_coverage_files()
- ]
+ [file.get_filename() for file in coverage_file_finder.find_files()]
)
expected = [
- UploadCollectionResultFile(Path(f"{self.project_root}/test_file.abc")),
+ UploadCollectionResultFile(Path(f"{project_root}/test_file.abc")),
UploadCollectionResultFile(
- Path(f"{self.project_root}/subdirectory/another_file.abc")
+ Path(f"{project_root}/subdirectory/another_file.abc")
),
]
expected_paths = sorted([file.get_filename() for file in expected])
- self.assertEqual(result, expected_paths)
+ assert result == expected_paths
- def test_find_coverage_files_with_user_specified_files(self):
- # Create some sample coverage files
+ def test_find_coverage_files_with_user_specified_files(
+ self, coverage_file_finder_fixture
+ ):
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+
+ # Create some sample coverage coverage_file_finder_fixture
coverage_files = [
- self.project_root / "coverage.xml",
- self.project_root / "subdirectory" / "test_coverage.xml",
- self.project_root / "test_file.abc",
- self.project_root / "subdirectory" / "another_file.abc",
+ project_root / "coverage.xml",
+ project_root / "subdirectory" / "test_coverage.xml",
+ project_root / "test_file.abc",
+ project_root / "subdirectory" / "another_file.abc",
+ project_root / ".tox" / "another_file.abc",
]
- (self.project_root / "subdirectory").mkdir()
+ (project_root / "subdirectory").mkdir()
+ (project_root / ".tox").mkdir()
for file in coverage_files:
file.touch()
result = sorted(
- [
- file.get_filename()
- for file in self.coverage_file_finder.find_coverage_files()
- ]
+ [file.get_filename() for file in coverage_file_finder.find_files()]
)
expected = [
- UploadCollectionResultFile(Path(f"{self.project_root}/coverage.xml")),
+ UploadCollectionResultFile(Path(f"{project_root}/coverage.xml")),
UploadCollectionResultFile(
- Path(f"{self.project_root}/subdirectory/test_coverage.xml")
+ Path(f"{project_root}/subdirectory/test_coverage.xml")
),
- UploadCollectionResultFile(Path(f"{self.project_root}/test_file.abc")),
+ UploadCollectionResultFile(Path(f"{project_root}/test_file.abc")),
UploadCollectionResultFile(
- Path(f"{self.project_root}/subdirectory/another_file.abc")
+ Path(f"{project_root}/subdirectory/another_file.abc")
),
]
expected_paths = sorted([file.get_filename() for file in expected])
- self.assertEqual(result, expected_paths)
+ assert result == expected_paths
- def test_find_coverage_files_with_user_specified_files_not_found(self):
- # Create some sample coverage files
+ def test_find_coverage_files_with_user_specified_files_not_found(
+ self, coverage_file_finder_fixture
+ ):
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+
+ # Create some sample coverage coverage_file_finder_fixture
coverage_files = [
- self.project_root / "coverage.xml",
- self.project_root / "subdirectory" / "test_coverage.xml",
+ project_root / "coverage.xml",
+ project_root / "subdirectory" / "test_coverage.xml",
+ project_root / ".tox" / "another_file.abc",
]
- (self.project_root / "subdirectory").mkdir()
+ (project_root / "subdirectory").mkdir()
+ (project_root / ".tox").mkdir()
for file in coverage_files:
file.touch()
# Add a non-existent file to explicitly_listed_files
- self.coverage_file_finder.explicitly_listed_files.append(
- self.project_root / "non_existent.xml"
+ coverage_file_finder.explicitly_listed_files.append(
+ project_root / "non_existent.xml"
+ )
+
+ result = sorted(
+ [file.get_filename() for file in coverage_file_finder.find_files()]
+ )
+
+ expected = [
+ UploadCollectionResultFile(Path(f"{project_root}/coverage.xml")),
+ UploadCollectionResultFile(
+ Path(f"{project_root}/subdirectory/test_coverage.xml")
+ ),
+ ]
+ expected_paths = sorted([file.get_filename() for file in expected])
+ assert result == expected_paths
+
+ def test_find_coverage_files_with_user_specified_files_in_default_ignored_folder(
+ self, coverage_file_finder_fixture
+ ):
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+
+ # Create some sample coverage files
+ coverage_files = [
+ project_root / "coverage.xml",
+ project_root / "subdirectory" / "test_coverage.xml",
+ project_root / "test_file.abc",
+ project_root / "subdirectory" / "another_file.abc",
+ project_root / ".tox" / "another_file.abc",
+ ]
+ (project_root / "subdirectory").mkdir()
+ (project_root / ".tox").mkdir()
+ for file in coverage_files:
+ file.touch()
+
+ coverage_file_finder.explicitly_listed_files = [
+ project_root / ".tox" / "another_file.abc",
+ ]
+ result = sorted(
+ [file.get_filename() for file in coverage_file_finder.find_files()]
)
+ expected = [
+ UploadCollectionResultFile(Path(f"{project_root}/coverage.xml")),
+ UploadCollectionResultFile(
+ Path(f"{project_root}/subdirectory/test_coverage.xml")
+ ),
+ UploadCollectionResultFile(Path(f"{project_root}/.tox/another_file.abc")),
+ ]
+ expected_paths = sorted([file.get_filename() for file in expected])
+
+ assert result == expected_paths
+
+ def test_find_coverage_files_with_user_specified_files_in_excluded(
+ self, capsys, coverage_file_finder_fixture
+ ):
+ (
+ project_root,
+ coverage_file_finder,
+ ) = coverage_file_finder_fixture
+
+ # Create some sample coverage coverage_file_finder_fixture
+ coverage_files = [
+ project_root / "coverage.xml",
+ project_root / "subdirectory" / "test_coverage.xml",
+ project_root / "test_file.abc",
+ project_root / "subdirectory" / "another_file.abc",
+ project_root / "subdirectory" / "another_file.bash",
+ project_root / ".tox" / "another_file.abc",
+ ]
+ (project_root / "subdirectory").mkdir()
+ (project_root / ".tox").mkdir()
+ for file in coverage_files:
+ file.touch()
+
+ coverage_file_finder.explicitly_listed_files.append(
+ project_root / "subdirectory" / "another_file.bash"
+ )
result = sorted(
- [
- file.get_filename()
- for file in self.coverage_file_finder.find_coverage_files()
- ]
+ [file.get_filename() for file in coverage_file_finder.find_files()]
)
expected = [
- UploadCollectionResultFile(Path(f"{self.project_root}/coverage.xml")),
+ UploadCollectionResultFile(Path(f"{project_root}/coverage.xml")),
UploadCollectionResultFile(
- Path(f"{self.project_root}/subdirectory/test_coverage.xml")
+ Path(f"{project_root}/subdirectory/test_coverage.xml")
+ ),
+ UploadCollectionResultFile(Path(f"{project_root}/test_file.abc")),
+ UploadCollectionResultFile(
+ Path(f"{project_root}/subdirectory/another_file.abc")
+ ),
+ UploadCollectionResultFile(
+ Path(f"{project_root}/subdirectory/another_file.bash")
),
]
expected_paths = sorted([file.get_filename() for file in expected])
- self.assertEqual(result, expected_paths)
+
+ assert result == expected_paths
+
+ assert (
+ "Some files being explicitly added are found in the list of excluded files for upload. We are still going to search for the explicitly added files."
+ in capsys.readouterr().err
+ )
diff --git a/tests/services/upload/test_upload_collector.py b/tests/services/upload/test_upload_collector.py
index 15279275..39124d0e 100644
--- a/tests/services/upload/test_upload_collector.py
+++ b/tests/services/upload/test_upload_collector.py
@@ -1,25 +1,29 @@
from pathlib import Path
from unittest.mock import patch
+from codecov_cli.helpers.versioning_systems import GitVersioningSystem
+from codecov_cli.services.upload.file_finder import FileFinder
+from codecov_cli.services.upload.network_finder import NetworkFinder
from codecov_cli.services.upload.upload_collector import UploadCollector
+from codecov_cli.types import UploadCollectionResultFile
def test_fix_kt_files():
kt_file = Path("tests/data/files_to_fix_examples/sample.kt")
- col = UploadCollector(None, None, None)
+ col = UploadCollector(None, None, None, None)
- fixes = col._produce_file_fixes_for_network([str(kt_file)])
+ fixes = col._produce_file_fixes([kt_file])
assert len(fixes) == 1
fixes_for_kt_file = fixes[0]
- assert fixes_for_kt_file.eof == 30
- assert fixes_for_kt_file.fixed_lines_without_reason == set([1, 3, 7, 9, 12, 14])
+ assert fixes_for_kt_file.eof == 33
+ assert fixes_for_kt_file.fixed_lines_without_reason == set([1, 3, 7, 9, 12, 14, 18])
assert fixes_for_kt_file.fixed_lines_with_reason == set(
[
- (17, " /*\n"),
- (22, "*/\n"),
+ (20, " /*\n"),
+ (25, "*/\n"),
]
)
@@ -27,9 +31,9 @@ def test_fix_kt_files():
def test_fix_go_files():
go_file = Path("tests/data/files_to_fix_examples/sample.go")
- col = UploadCollector(None, None, None)
+ col = UploadCollector(None, None, None, None)
- fixes = col._produce_file_fixes_for_network([str(go_file)])
+ fixes = col._produce_file_fixes([go_file])
assert len(fixes) == 1
fixes_for_go_file = fixes[0]
@@ -53,9 +57,9 @@ def test_fix_bad_encoding_files(mock_open):
mock_open.side_effect = UnicodeDecodeError("", bytes(), 0, 0, "")
go_file = Path("tests/data/files_to_fix_examples/bad_encoding.go")
- col = UploadCollector(None, None, None)
+ col = UploadCollector(None, None, None, None)
- fixes = col._produce_file_fixes_for_network([str(go_file)])
+ fixes = col._produce_file_fixes([go_file])
assert len(fixes) == 1
fixes_for_go_file = fixes[0]
assert fixes_for_go_file.eof is None
@@ -66,9 +70,9 @@ def test_fix_bad_encoding_files(mock_open):
def test_fix_php_files():
php_file = Path("tests/data/files_to_fix_examples/sample.php")
- col = UploadCollector(None, None, None)
+ col = UploadCollector(None, None, None, None)
- fixes = col._produce_file_fixes_for_network([str(php_file)])
+ fixes = col._produce_file_fixes([php_file])
assert len(fixes) == 1
fixes_for_php_file = fixes[0]
@@ -81,9 +85,9 @@ def test_fix_php_files():
def test_fix_for_cpp_swift_vala(tmp_path):
cpp_file = Path("tests/data/files_to_fix_examples/sample.cpp")
- col = UploadCollector(None, None, None)
+ col = UploadCollector(None, None, None, None)
- fixes = col._produce_file_fixes_for_network([str(cpp_file)])
+ fixes = col._produce_file_fixes([cpp_file])
assert len(fixes) == 1
fixes_for_cpp_file = fixes[0]
@@ -103,9 +107,70 @@ def test_fix_for_cpp_swift_vala(tmp_path):
def test_fix_when_disabled_fixes(tmp_path):
cpp_file = Path("tests/data/files_to_fix_examples/sample.cpp")
- col = UploadCollector(None, None, None, True)
+ col = UploadCollector(None, None, None, None, True)
- fixes = col._produce_file_fixes_for_network([str(cpp_file)])
+ fixes = col._produce_file_fixes([cpp_file])
assert len(fixes) == 0
assert fixes == []
+
+
+def test_generate_upload_data(tmp_path):
+ (tmp_path / "sub").mkdir()
+ (tmp_path / "sub" / "subsub").mkdir()
+ (tmp_path / "node_modules").mkdir()
+
+ should_find = [
+ "abc-coverage.cov",
+ "coverage-abc.abc",
+ "sub/coverage-abc.abc",
+ "sub/subsub/coverage-abc.abc",
+ "coverage.abc",
+ "jacocoxyz.xml",
+ "sub/jacocoxyz.xml",
+ "codecov.abc",
+ "sub/subsub/codecov.abc",
+ "xyz.codecov.abc",
+ "sub/xyz.codecov.abc",
+ "sub/subsub/xyz.codecov.abc",
+ "cover.out",
+ "abc.gcov",
+ "sub/abc.gcov",
+ "sub/subsub/abc.gcov",
+ ]
+
+ should_ignore = [
+ "abc.codecov.exe",
+ "sub/abc.codecov.exe",
+ "codecov.exe",
+ "__pycache__",
+ "sub/subsub/__pycache__",
+ ".gitignore",
+ "a.sql",
+ "a.csv",
+ ".abc-coveragerc",
+ ".coverage-xyz",
+ "sub/scoverage.measurements.xyz",
+ "sub/test_abcd_coverage.txt",
+ "test-result-ff-codecoverage.json",
+ "node_modules/abc-coverage.cov",
+ ]
+
+ for filename in should_find:
+ (tmp_path / filename).touch()
+
+ for filename in should_ignore:
+ (tmp_path / filename).touch()
+
+ file_finder = FileFinder(tmp_path)
+
+ network_finder = NetworkFinder(GitVersioningSystem(), None, None, None)
+
+ collector = UploadCollector([], network_finder, file_finder, None)
+
+ res = collector.generate_upload_data()
+
+ expected = {UploadCollectionResultFile(tmp_path / file) for file in should_find}
+
+ for file in expected:
+ assert file in res.files
diff --git a/tests/services/upload/test_upload_service.py b/tests/services/upload/test_upload_service.py
index dfb6c4da..2cf216d1 100644
--- a/tests/services/upload/test_upload_service.py
+++ b/tests/services/upload/test_upload_service.py
@@ -20,8 +20,8 @@ def test_do_upload_logic_happy_path_legacy_uploader(mocker):
mock_select_preparation_plugins = mocker.patch(
"codecov_cli.services.upload.select_preparation_plugins"
)
- mock_select_coverage_file_finder = mocker.patch(
- "codecov_cli.services.upload.select_coverage_file_finder"
+ mock_select_file_finder = mocker.patch(
+ "codecov_cli.services.upload.select_file_finder"
)
mock_select_network_finder = mocker.patch(
"codecov_cli.services.upload.select_network_finder"
@@ -47,6 +47,7 @@ def test_do_upload_logic_happy_path_legacy_uploader(mocker):
cli_config,
versioning_system,
ci_adapter,
+ upload_file_type="coverage",
commit_sha="commit_sha",
report_code="report_code",
build_code="build_code",
@@ -54,19 +55,27 @@ def test_do_upload_logic_happy_path_legacy_uploader(mocker):
job_code="job_code",
env_vars=None,
flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
name="name",
+ network_filter=None,
+ network_prefix=None,
network_root_folder=None,
- coverage_files_search_root_folder=None,
- coverage_files_search_exclude_folders=None,
- coverage_files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
plugin_names=["first_plugin", "another", "forth"],
token="token",
branch="branch",
use_legacy_uploader=True,
slug="slug",
+ swift_project="App",
pull_request_number="pr",
git_service="git_service",
enterprise_url=None,
+ args=None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
assert out_bytes == [
@@ -77,17 +86,33 @@ def test_do_upload_logic_happy_path_legacy_uploader(mocker):
assert res == LegacyUploadSender.send_upload_data.return_value
mock_select_preparation_plugins.assert_called_with(
- cli_config, ["first_plugin", "another", "forth"]
- )
- mock_select_coverage_file_finder.assert_called_with(None, None, None, False)
- mock_select_network_finder.assert_called_with(versioning_system)
- mock_generate_upload_data.assert_called_with()
+ cli_config,
+ ["first_plugin", "another", "forth"],
+ {
+ "folders_to_ignore": None,
+ "gcov_args": None,
+ "gcov_executable": None,
+ "gcov_ignore": None,
+ "gcov_include": None,
+ "project_root": None,
+ "swift_project": "App",
+ },
+ )
+ mock_select_file_finder.assert_called_with(None, None, None, False, "coverage")
+ mock_select_network_finder.assert_called_with(
+ versioning_system,
+ network_filter=None,
+ network_prefix=None,
+ network_root_folder=None,
+ )
+ mock_generate_upload_data.assert_called_with("coverage")
mock_send_upload_data.assert_called_with(
mock_generate_upload_data.return_value,
"commit_sha",
"token",
None,
"report_code",
+ "coverage",
"name",
"branch",
"slug",
@@ -99,6 +124,9 @@ def test_do_upload_logic_happy_path_legacy_uploader(mocker):
"service",
"git_service",
None,
+ None,
+ False,
+ None,
)
@@ -106,8 +134,8 @@ def test_do_upload_logic_happy_path(mocker):
mock_select_preparation_plugins = mocker.patch(
"codecov_cli.services.upload.select_preparation_plugins"
)
- mock_select_coverage_file_finder = mocker.patch(
- "codecov_cli.services.upload.select_coverage_file_finder"
+ mock_select_file_finder = mocker.patch(
+ "codecov_cli.services.upload.select_file_finder"
)
mock_select_network_finder = mocker.patch(
"codecov_cli.services.upload.select_network_finder"
@@ -133,6 +161,7 @@ def test_do_upload_logic_happy_path(mocker):
cli_config,
versioning_system,
ci_adapter,
+ upload_file_type="coverage",
commit_sha="commit_sha",
report_code="report_code",
build_code="build_code",
@@ -140,15 +169,22 @@ def test_do_upload_logic_happy_path(mocker):
job_code="job_code",
env_vars=None,
flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
name="name",
+ network_filter=None,
+ network_prefix=None,
network_root_folder=None,
- coverage_files_search_root_folder=None,
- coverage_files_search_exclude_folders=None,
- coverage_files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
plugin_names=["first_plugin", "another", "forth"],
token="token",
branch="branch",
slug="slug",
+ swift_project="App",
pull_request_number="pr",
git_service="git_service",
enterprise_url=None,
@@ -162,17 +198,33 @@ def test_do_upload_logic_happy_path(mocker):
assert res == UploadSender.send_upload_data.return_value
mock_select_preparation_plugins.assert_called_with(
- cli_config, ["first_plugin", "another", "forth"]
- )
- mock_select_coverage_file_finder.assert_called_with(None, None, None, False)
- mock_select_network_finder.assert_called_with(versioning_system)
- mock_generate_upload_data.assert_called_with()
+ cli_config,
+ ["first_plugin", "another", "forth"],
+ {
+ "folders_to_ignore": None,
+ "gcov_args": None,
+ "gcov_executable": None,
+ "gcov_ignore": None,
+ "gcov_include": None,
+ "project_root": None,
+ "swift_project": "App",
+ },
+ )
+ mock_select_file_finder.assert_called_with(None, None, None, False, "coverage")
+ mock_select_network_finder.assert_called_with(
+ versioning_system,
+ network_filter=None,
+ network_prefix=None,
+ network_root_folder=None,
+ )
+ mock_generate_upload_data.assert_called_with("coverage")
mock_send_upload_data.assert_called_with(
mock_generate_upload_data.return_value,
"commit_sha",
"token",
None,
"report_code",
+ "coverage",
"name",
"branch",
"slug",
@@ -184,6 +236,9 @@ def test_do_upload_logic_happy_path(mocker):
"service",
"git_service",
None,
+ None,
+ False,
+ None,
)
@@ -191,8 +246,8 @@ def test_do_upload_logic_dry_run(mocker):
mock_select_preparation_plugins = mocker.patch(
"codecov_cli.services.upload.select_preparation_plugins"
)
- mock_select_coverage_file_finder = mocker.patch(
- "codecov_cli.services.upload.select_coverage_file_finder"
+ mock_select_file_finder = mocker.patch(
+ "codecov_cli.services.upload.select_file_finder"
)
mock_select_network_finder = mocker.patch(
"codecov_cli.services.upload.select_network_finder"
@@ -214,6 +269,7 @@ def test_do_upload_logic_dry_run(mocker):
cli_config,
versioning_system,
ci_adapter,
+ upload_file_type="coverage",
commit_sha="commit_sha",
report_code="report_code",
build_code="build_code",
@@ -221,27 +277,49 @@ def test_do_upload_logic_dry_run(mocker):
job_code="job_code",
env_vars=None,
flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
name="name",
+ network_filter=None,
+ network_prefix=None,
network_root_folder=None,
- coverage_files_search_root_folder=None,
- coverage_files_search_exclude_folders=None,
- coverage_files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
plugin_names=["first_plugin", "another", "forth"],
token="token",
branch="branch",
slug="slug",
+ swift_project="App",
pull_request_number="pr",
dry_run=True,
git_service="git_service",
enterprise_url=None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
- mock_select_coverage_file_finder.assert_called_with(None, None, None, False)
- mock_select_network_finder.assert_called_with(versioning_system)
+ mock_select_file_finder.assert_called_with(None, None, None, False, "coverage")
+ mock_select_network_finder.assert_called_with(
+ versioning_system,
+ network_filter=None,
+ network_prefix=None,
+ network_root_folder=None,
+ )
assert mock_generate_upload_data.call_count == 1
assert mock_send_upload_data.call_count == 0
mock_select_preparation_plugins.assert_called_with(
- cli_config, ["first_plugin", "another", "forth"]
+ cli_config,
+ ["first_plugin", "another", "forth"],
+ {
+ "folders_to_ignore": None,
+ "gcov_args": None,
+ "gcov_executable": None,
+ "gcov_ignore": None,
+ "gcov_include": None,
+ "project_root": None,
+ "swift_project": "App",
+ },
)
assert out_bytes == [
("info", "dry-run option activated. NOT sending data to Codecov."),
@@ -257,7 +335,7 @@ def test_do_upload_logic_dry_run(mocker):
def test_do_upload_logic_verbose(mocker, use_verbose_option):
mocker.patch("codecov_cli.services.upload.select_preparation_plugins")
- mocker.patch("codecov_cli.services.upload.select_coverage_file_finder")
+ mocker.patch("codecov_cli.services.upload.select_file_finder")
mocker.patch("codecov_cli.services.upload.select_network_finder")
mocker.patch.object(UploadCollector, "generate_upload_data")
mocker.patch.object(
@@ -274,27 +352,35 @@ def test_do_upload_logic_verbose(mocker, use_verbose_option):
cli_config,
versioning_system,
ci_adapter,
- commit_sha="commit_sha",
- report_code="report_code",
+ branch="branch",
build_code="build_code",
build_url="build_url",
- job_code="job_code",
+ commit_sha="commit_sha",
+ dry_run=True,
+ enterprise_url=None,
env_vars=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
+ git_service="git_service",
+ job_code="job_code",
name="name",
+ network_filter=None,
+ network_prefix=None,
network_root_folder=None,
- coverage_files_search_root_folder=None,
- coverage_files_search_exclude_folders=None,
- coverage_files_search_explicitly_listed_files=None,
plugin_names=["first_plugin", "another", "forth"],
- token="token",
- branch="branch",
+ pull_request_number="pr",
+ report_code="report_code",
slug="slug",
+ swift_project="App",
+ token="token",
+ upload_file_type="coverage",
use_legacy_uploader=True,
- pull_request_number="pr",
- dry_run=True,
- git_service="git_service",
- enterprise_url=None,
)
out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
assert out_bytes == [
@@ -321,8 +407,8 @@ def test_do_upload_no_cov_reports_found(mocker):
mock_select_preparation_plugins = mocker.patch(
"codecov_cli.services.upload.select_preparation_plugins"
)
- mock_select_coverage_file_finder = mocker.patch(
- "codecov_cli.services.upload.select_coverage_file_finder",
+ mock_select_file_finder = mocker.patch(
+ "codecov_cli.services.upload.select_file_finder",
)
mock_select_network_finder = mocker.patch(
"codecov_cli.services.upload.select_network_finder"
@@ -348,6 +434,7 @@ def side_effect(*args, **kwargs):
cli_config,
versioning_system,
ci_adapter,
+ upload_file_type="coverage",
commit_sha="commit_sha",
report_code="report_code",
build_code="build_code",
@@ -355,15 +442,22 @@ def side_effect(*args, **kwargs):
job_code="job_code",
env_vars=None,
flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
name="name",
+ network_filter=None,
+ network_prefix=None,
network_root_folder=None,
- coverage_files_search_root_folder=None,
- coverage_files_search_exclude_folders=None,
- coverage_files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
plugin_names=["first_plugin", "another", "forth"],
token="token",
branch="branch",
slug="slug",
+ swift_project="App",
pull_request_number="pr",
git_service="git_service",
enterprise_url=None,
@@ -383,11 +477,26 @@ def side_effect(*args, **kwargs):
text="No coverage reports found. Triggering notificaions without uploading.",
)
mock_select_preparation_plugins.assert_called_with(
- cli_config, ["first_plugin", "another", "forth"]
- )
- mock_select_coverage_file_finder.assert_called_with(None, None, None, False)
- mock_select_network_finder.assert_called_with(versioning_system)
- mock_generate_upload_data.assert_called_with()
+ cli_config,
+ ["first_plugin", "another", "forth"],
+ {
+ "folders_to_ignore": None,
+ "gcov_args": None,
+ "gcov_executable": None,
+ "gcov_ignore": None,
+ "gcov_include": None,
+ "project_root": None,
+ "swift_project": "App",
+ },
+ )
+ mock_select_file_finder.assert_called_with(None, None, None, False, "coverage")
+ mock_select_network_finder.assert_called_with(
+ versioning_system,
+ network_filter=None,
+ network_prefix=None,
+ network_root_folder=None,
+ )
+ mock_generate_upload_data.assert_called_with("coverage")
mock_upload_completion_call.assert_called_with(
commit_sha="commit_sha",
slug="slug",
@@ -402,8 +511,8 @@ def test_do_upload_rase_no_cov_reports_found_error(mocker):
mock_select_preparation_plugins = mocker.patch(
"codecov_cli.services.upload.select_preparation_plugins"
)
- mock_select_coverage_file_finder = mocker.patch(
- "codecov_cli.services.upload.select_coverage_file_finder",
+ mock_select_file_finder = mocker.patch(
+ "codecov_cli.services.upload.select_file_finder",
)
mock_select_network_finder = mocker.patch(
"codecov_cli.services.upload.select_network_finder"
@@ -424,10 +533,11 @@ def side_effect(*args, **kwargs):
ci_adapter.get_fallback_value.return_value = "service"
with pytest.raises(click.ClickException) as exp:
- res = do_upload_logic(
+ _ = do_upload_logic(
cli_config,
versioning_system,
ci_adapter,
+ upload_file_type="coverage",
commit_sha="commit_sha",
report_code="report_code",
build_code="build_code",
@@ -435,15 +545,22 @@ def side_effect(*args, **kwargs):
job_code="job_code",
env_vars=None,
flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
name="name",
+ network_filter=None,
+ network_prefix=None,
network_root_folder=None,
- coverage_files_search_root_folder=None,
- coverage_files_search_exclude_folders=None,
- coverage_files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
plugin_names=["first_plugin", "another", "forth"],
token="token",
branch="branch",
slug="slug",
+ swift_project="App",
pull_request_number="pr",
git_service="git_service",
enterprise_url=None,
@@ -454,8 +571,124 @@ def side_effect(*args, **kwargs):
== "No coverage reports found. Please make sure you're generating reports successfully."
)
mock_select_preparation_plugins.assert_called_with(
- cli_config, ["first_plugin", "another", "forth"]
+ cli_config,
+ ["first_plugin", "another", "forth"],
+ {
+ "folders_to_ignore": None,
+ "gcov_args": None,
+ "gcov_executable": None,
+ "gcov_ignore": None,
+ "gcov_include": None,
+ "project_root": None,
+ "swift_project": "App",
+ },
+ )
+ mock_select_file_finder.assert_called_with(None, None, None, False, "coverage")
+ mock_select_network_finder.assert_called_with(
+ versioning_system,
+ network_filter=None,
+ network_prefix=None,
+ network_root_folder=None,
+ )
+ mock_generate_upload_data.assert_called_with("coverage")
+
+
+def test_do_upload_logic_happy_path_test_results(mocker):
+ mock_select_preparation_plugins = mocker.patch(
+ "codecov_cli.services.upload.select_preparation_plugins"
+ )
+ mock_select_file_finder = mocker.patch(
+ "codecov_cli.services.upload.select_file_finder"
+ )
+ mock_select_network_finder = mocker.patch(
+ "codecov_cli.services.upload.select_network_finder"
+ )
+ mock_generate_upload_data = mocker.patch.object(
+ UploadCollector, "generate_upload_data"
+ )
+ mock_send_upload_data = mocker.patch.object(
+ UploadSender,
+ "send_upload_data",
+ return_value=UploadSendingResult(
+ error=None,
+ warnings=[UploadSendingResultWarning(message="somewarningmessage")],
+ ),
+ )
+ cli_config = {}
+ versioning_system = mocker.MagicMock()
+ ci_adapter = mocker.MagicMock()
+ ci_adapter.get_fallback_value.return_value = "service"
+ runner = CliRunner()
+ with runner.isolation() as outstreams:
+ res = do_upload_logic(
+ cli_config,
+ versioning_system,
+ ci_adapter,
+ args={"args": "fake_args"},
+ branch="branch",
+ build_code="build_code",
+ build_url="build_url",
+ commit_sha="commit_sha",
+ enterprise_url=None,
+ env_vars=None,
+ files_search_exclude_folders=None,
+ files_search_explicitly_listed_files=None,
+ files_search_root_folder=None,
+ flags=None,
+ gcov_args=None,
+ gcov_executable=None,
+ gcov_ignore=None,
+ gcov_include=None,
+ git_service="git_service",
+ job_code="job_code",
+ name="name",
+ network_filter="some_dir",
+ network_prefix="hello/",
+ network_root_folder="root/",
+ plugin_names=["first_plugin", "another", "forth"],
+ pull_request_number="pr",
+ report_code="report_code",
+ slug="slug",
+ swift_project="App",
+ token="token",
+ upload_file_type="test_results",
+ )
+ out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
+ assert out_bytes == [
+ ("info", "Process Upload complete"),
+ ("info", "Upload process had 1 warning"),
+ ("warning", "Warning 1: somewarningmessage"),
+ ]
+
+ assert res == UploadSender.send_upload_data.return_value
+ mock_select_preparation_plugins.assert_not_called
+ mock_select_file_finder.assert_called_with(None, None, None, False, "test_results")
+ mock_select_network_finder.assert_called_with(
+ versioning_system,
+ network_filter="some_dir",
+ network_prefix="hello/",
+ network_root_folder="root/",
+ )
+ mock_generate_upload_data.assert_called_with("test_results")
+ mock_send_upload_data.assert_called_with(
+ mock_generate_upload_data.return_value,
+ "commit_sha",
+ "token",
+ None,
+ "report_code",
+ "test_results",
+ "name",
+ "branch",
+ "slug",
+ "pr",
+ "build_code",
+ "build_url",
+ "job_code",
+ None,
+ "service",
+ "git_service",
+ None,
+ None,
+ False,
+ {"args": "fake_args"},
)
- mock_select_coverage_file_finder.assert_called_with(None, None, None, False)
- mock_select_network_finder.assert_called_with(versioning_system)
- mock_generate_upload_data.assert_called_with()
diff --git a/tests/services/upload_completion/test_upload_completion.py b/tests/services/upload_completion/test_upload_completion.py
index 6ab5ea3b..6a6d5494 100644
--- a/tests/services/upload_completion/test_upload_completion.py
+++ b/tests/services/upload_completion/test_upload_completion.py
@@ -1,6 +1,8 @@
import json
import uuid
+import click
+import pytest
from click.testing import CliRunner
from codecov_cli.services.upload_completion import upload_completion_logic
@@ -93,6 +95,35 @@ def test_upload_completion_200(mocker):
mocked_response.assert_called_once()
+def test_upload_completion_no_token(mocker):
+ res = {
+ "uploads_total": 2,
+ "uploads_success": 2,
+ "uploads_processing": 0,
+ "uploads_error": 0,
+ }
+ mocked_response = mocker.patch(
+ "codecov_cli.helpers.request.requests.post",
+ return_value=RequestResult(
+ status_code=200, error=None, warnings=[], text=json.dumps(res)
+ ),
+ )
+ runner = CliRunner()
+ with runner.isolation() as outstreams:
+ res = upload_completion_logic("commit_sha", "owner/repo", None, "service", None)
+ out_bytes = parse_outstreams_into_log_lines(outstreams[0].getvalue())
+ assert out_bytes == [
+ ("info", "Process Upload Completion complete"),
+ (
+ "info",
+ "{'uploads_total': 2, 'uploads_success': 2, 'uploads_processing': 0, 'uploads_error': 0}",
+ ),
+ ]
+ assert res.error is None
+ assert res.warnings == []
+ mocked_response.assert_called_once()
+
+
def test_upload_completion_403(mocker):
mocked_response = mocker.patch(
"codecov_cli.helpers.request.requests.post",
diff --git a/tests/test_codecov_cli.py b/tests/test_codecov_cli.py
index 5bf650db..6d3a81c3 100644
--- a/tests/test_codecov_cli.py
+++ b/tests/test_codecov_cli.py
@@ -11,7 +11,9 @@ def test_existing_commands():
"get-report-results",
"label-analysis",
"pr-base-picking",
+ "process-test-results",
"send-notifications",
"static-analysis",
+ "upload-coverage",
"upload-process",
]