diff --git a/.github/workflows/reusable-build-test-release.yml b/.github/workflows/reusable-build-test-release.yml index 5c6042038..b896e8bc1 100644 --- a/.github/workflows/reusable-build-test-release.yml +++ b/.github/workflows/reusable-build-test-release.yml @@ -230,6 +230,8 @@ jobs: matrix_supportedSC4S: ${{ steps.matrix.outputs.supportedSC4S }} matrix_supportedModinputFunctionalVendors: ${{ steps.matrix.outputs.supportedModinputFunctionalVendors }} matrix_supportedUIVendors: ${{ steps.matrix.outputs.supportedUIVendors }} + python39_splunk: ${{steps.python39_splunk.outputs.splunk}} + python39_sc4s: ${{steps.python39_splunk.outputs.sc4s}} permissions: contents: write packages: read @@ -265,7 +267,12 @@ jobs: type=ref,event=pr - name: matrix id: matrix - uses: splunk/addonfactory-test-matrix-action@v1.11 + uses: splunk/addonfactory-test-matrix-action@v1.10 + - name: python39_Splunk + id: python39_splunk + run: | + echo "splunk={\"version\":\"unreleased-python3_9-a076ce4c50aa\", \"build\":\"a076ce4c50aa\", \"islatest\":false, \"isoldest\":false}" >> "$GITHUB_OUTPUT" + echo "sc4s={\"version\":\"2.49.5\", \"docker_registry\":\"ghcr.io/splunk/splunk-connect-for-syslog/container2\"}" >> "$GITHUB_OUTPUT" fossa-scan: runs-on: ubuntu-latest @@ -704,7 +711,7 @@ jobs: SemVer: ${{ steps.semantic.outputs.new_release_version }} PrNumber: ${{ github.event.number }} - id: uccgen - uses: splunk/addonfactory-ucc-generator-action@v1 + uses: splunk/addonfactory-ucc-generator-action@v2 with: version: ${{ steps.BuildVersion.outputs.VERSION }} @@ -1017,11 +1024,17 @@ jobs: - meta - setup-workflow runs-on: ubuntu-latest + continue-on-error: ${{ matrix.python39 }} strategy: fail-fast: false matrix: splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} + python39: [false] + include: + - splunk: ${{ fromJson(needs.meta.outputs.python39_splunk) }} + sc4s: ${{ fromJson(needs.meta.outputs.python39_sc4s) }} + python39: true container: image: ghcr.io/splunk/workflow-engine-base:2.0.3 env: @@ -1044,6 +1057,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -1077,6 +1094,7 @@ jobs: echo "Splunk password is available in SecretServer shared folder: Shared Splunk - GDI - Lab Credentials under SPLUNK_DEPLOYMENT_PASSWORD" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -1095,28 +1113,22 @@ jobs: sc4s-version: ${{ matrix.sc4s.version }} sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} - - name: Cancel workflow - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: cancelled() - run: | - cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) - cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) - cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) - if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" - else - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" - exit 1 - fi - name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation id: update-argo-token if: ${{ !cancelled() }} run: | ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id ta-github-workflow-automation-token | jq -r '.SecretString') echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" - name: Check if pod was deleted id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} if: ${{ !cancelled() }} shell: bash env: @@ -1126,6 +1138,20 @@ jobs: if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then echo "retry-workflow=true" >> "$GITHUB_OUTPUT" fi + - name: Cancel workflow + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + if: cancelled() || ${{ steps.is-pod-deleted.outcome }} != 'success' + run: | + cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) + cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) + cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) + if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" + else + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" + exit 1 + fi - name: Retrying workflow id: retry-wf shell: bash @@ -1213,16 +1239,7 @@ jobs: - name: Test Report id: test_report uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && !contains(matrix.splunk.version, 'unreleased-python3_9') }} - with: - name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} test report - path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" - reporter: java-junit - - name: Test Report Python 3.9 - continue-on-error: true - id: test_report_python_3_9 - uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && contains(matrix.splunk.version, 'unreleased-python3_9') }} + if: ${{ !cancelled() }} with: name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} test report path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" @@ -1249,11 +1266,17 @@ jobs: - meta - setup-workflow runs-on: ubuntu-latest + continue-on-error: ${{ matrix.python39 }} strategy: fail-fast: false matrix: splunk: ${{ fromJson(needs.meta.outputs.matrix_latestSplunk) }} sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} + python39: [false] + include: + - splunk: ${{ fromJson(needs.meta.outputs.python39_splunk) }} + sc4s: ${{ fromJson(needs.meta.outputs.python39_sc4s) }} + python39: true container: image: ghcr.io/splunk/workflow-engine-base:2.0.3 env: @@ -1275,6 +1298,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -1308,6 +1335,7 @@ jobs: echo "Splunk password is available in SecretServer shared folder: Shared Splunk - GDI - Lab Credentials under SPLUNK_DEPLOYMENT_PASSWORD" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -1326,22 +1354,16 @@ jobs: sc4s-version: ${{ matrix.sc4s.version }} sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} - - name: Cancel workflow - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: cancelled() + - name: calculate timeout + id: calculate-timeout run: | - cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) - cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) - cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) - if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" - else - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" - exit 1 - fi + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" - name: Check if pod was deleted id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} if: ${{ !cancelled() }} shell: bash env: @@ -1351,6 +1373,20 @@ jobs: if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then echo "retry-workflow=true" >> "$GITHUB_OUTPUT" fi + - name: Cancel workflow + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + if: cancelled() || ${{ steps.is-pod-deleted.outcome }} != 'success' + run: | + cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) + cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) + cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) + if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" + else + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" + exit 1 + fi - name: Retrying workflow id: retry-wf shell: bash @@ -1424,16 +1460,7 @@ jobs: - name: Test Report id: test_report uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && !contains(matrix.splunk.version, 'unreleased-python3_9') }} - with: - name: splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} test report - path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" - reporter: java-junit - - name: Test Report Python 3.9 - continue-on-error: true - id: test_report_python_3_9 - uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && contains(matrix.splunk.version, 'unreleased-python3_9') }} + if: ${{ !cancelled() }} with: name: splunk ${{ matrix.splunk.version }} ${{ env.TEST_TYPE }} test report path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" @@ -1459,12 +1486,18 @@ jobs: - meta - setup-workflow runs-on: ubuntu-latest + continue-on-error: ${{ matrix.python39 }} strategy: fail-fast: false matrix: splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} browser: [ "chrome","firefox" ] vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedUIVendors) }} + python39: [false] + include: + - splunk: ${{ fromJson(needs.meta.outputs.python39_splunk) }} + browser: "chrome" + python39: true container: image: ghcr.io/splunk/workflow-engine-base:2.0.3 env: @@ -1487,6 +1520,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -1520,6 +1557,7 @@ jobs: echo "Splunk password is available in SecretServer shared folder: Shared Splunk - GDI - Lab Credentials under SPLUNK_DEPLOYMENT_PASSWORD" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -1538,28 +1576,22 @@ jobs: vendor-version: ${{ matrix.vendor-version.image }} sc4s-version: "No" k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} - - name: Cancel workflow - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: cancelled() - run: | - cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) - cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) - cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) - if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" - else - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" - exit 1 - fi - name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation id: update-argo-token if: ${{ !cancelled() }} run: | ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id ta-github-workflow-automation-token | jq -r '.SecretString') echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" - name: Check if pod was deleted id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} if: ${{ !cancelled() }} shell: bash env: @@ -1569,6 +1601,20 @@ jobs: if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted" ; then echo "retry-workflow=true" >> "$GITHUB_OUTPUT" fi + - name: Cancel workflow + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + if: cancelled() || ${{ steps.is-pod-deleted.outcome }} != 'success' + run: | + cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) + cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) + cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) + if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" + else + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" + exit 1 + fi - name: Retrying workflow id: retry-wf shell: bash @@ -1642,16 +1688,7 @@ jobs: - name: Test Report id: test_report uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && !contains(matrix.splunk.version, 'unreleased-python3_9') }} - with: - name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} test report - path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" - reporter: java-junit - - name: Test Report Python 3.9 - continue-on-error: true - id: test_report_python_3_9 - uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && contains(matrix.splunk.version, 'unreleased-python3_9') }} + if: ${{ !cancelled() }} with: name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} test report path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" @@ -1677,6 +1714,7 @@ jobs: - meta - setup-workflow runs-on: ubuntu-latest + continue-on-error: ${{ matrix.python39 }} strategy: fail-fast: false matrix: @@ -1684,6 +1722,11 @@ jobs: modinput-type: [ "modinput_functional" ] vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedModinputFunctionalVendors) }} marker: ${{ fromJson(inputs.marker) }} + python39: [false] + include: + - splunk: ${{ fromJson(needs.meta.outputs.python39_splunk) }} + modinput-type: [ "modinput_functional" ] + python39: true container: image: ghcr.io/splunk/workflow-engine-base:2.0.3 env: @@ -1706,6 +1749,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -1751,6 +1798,7 @@ jobs: echo "test-arg=$TEST_ARG_M" >> "$GITHUB_OUTPUT" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -1769,28 +1817,22 @@ jobs: vendor-version: ${{ matrix.vendor-version.image }} sc4s-version: "No" k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} - - name: Cancel workflow - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: cancelled() - run: | - cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) - cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) - cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) - if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" - else - echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" - exit 1 - fi - name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation id: update-argo-token if: ${{ !cancelled() }} run: | ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id ta-github-workflow-automation-token | jq -r '.SecretString') echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" - name: Check if pod was deleted id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} if: ${{ !cancelled() }} shell: bash env: @@ -1800,6 +1842,20 @@ jobs: if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then echo "retry-workflow=true" >> "$GITHUB_OUTPUT" fi + - name: Cancel workflow + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + if: cancelled() || ${{ steps.is-pod-deleted.outcome }} != 'success' + run: | + cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) + cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) + cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) + if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" + else + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" + exit 1 + fi - name: Retrying workflow id: retry-wf shell: bash @@ -1873,16 +1929,7 @@ jobs: - name: Test Report id: test_report uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && !contains(matrix.splunk.version, 'unreleased-python3_9') }} - with: - name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} test report - path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" - reporter: java-junit - - name: Test Report Python 3.9 - continue-on-error: true - id: test_report_python_3_9 - uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && contains(matrix.splunk.version, 'unreleased-python3_9') }} + if: ${{ !cancelled() }} with: name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} test report path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" @@ -1908,11 +1955,17 @@ jobs: - meta - setup-workflow runs-on: ubuntu-latest + continue-on-error: ${{ matrix.python39 }} strategy: fail-fast: false matrix: splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} os: [ "ubuntu:14.04", "ubuntu:16.04","ubuntu:18.04","ubuntu:22.04", "centos:7", "redhat:8.0", "redhat:8.2", "redhat:8.3", "redhat:8.4", "redhat:8.5" ] + python39: [false] + include: + - splunk: ${{ fromJson(needs.meta.outputs.python39_splunk) }} + os: "ubuntu:22.04" + python39: true container: image: ghcr.io/splunk/workflow-engine-base:2.0.3 env: @@ -1934,6 +1987,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -1980,6 +2037,7 @@ jobs: } >> "$GITHUB_OUTPUT" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -2000,10 +2058,29 @@ jobs: os-name: ${{ steps.os-name-version.outputs.os-name }} os-version: ${{ steps.os-name-version.outputs.os-version }} k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" + - name: Check if pod was deleted + id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} + if: ${{ !cancelled() }} + shell: bash + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + run: | + set -o xtrace + if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then + echo "retry-workflow=true" >> "$GITHUB_OUTPUT" + fi - name: Cancel workflow env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: cancelled() + if: cancelled() || ${{ steps.is-pod-deleted.outcome }} != 'success' run: | cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) @@ -2014,17 +2091,6 @@ jobs: echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" exit 1 fi - - name: Check if pod was deleted - id: is-pod-deleted - if: ${{ !cancelled() }} - shell: bash - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - run: | - set -o xtrace - if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then - echo "retry-workflow=true" >> "$GITHUB_OUTPUT" - fi - name: Retrying workflow id: retry-wf shell: bash @@ -2098,16 +2164,7 @@ jobs: - name: Test Report id: test_report uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && !contains(matrix.splunk.version, 'unreleased-python3_9') }} - with: - name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} test report - path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" - reporter: java-junit - - name: Test Report Python 3.9 - continue-on-error: true - id: test_report_python_3_9 - uses: dorny/test-reporter@v1 - if: ${{ !cancelled() && contains(matrix.splunk.version, 'unreleased-python3_9') }} + if: ${{ !cancelled() }} with: name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ steps.os-name-version.outputs.os-name }} ${{ steps.os-name-version.outputs.os-version }} test report path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" @@ -2159,6 +2216,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -2204,6 +2265,7 @@ jobs: } >> "$GITHUB_OUTPUT" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -2224,10 +2286,29 @@ jobs: os-name: ${{ steps.os-name-version.outputs.os-name }} os-version: ${{ steps.os-name-version.outputs.os-version }} k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" + - name: Check if pod was deleted + id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} + if: ${{ !cancelled() }} + shell: bash + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + run: | + set -o xtrace + if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then + echo "retry-workflow=true" >> "$GITHUB_OUTPUT" + fi - name: Cancel workflow env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - if: cancelled() + if: cancelled() || ${{ steps.is-pod-deleted.outcome }} != 'success' run: | cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) @@ -2238,17 +2319,6 @@ jobs: echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" exit 1 fi - - name: Check if pod was deleted - id: is-pod-deleted - if: ${{ !cancelled() }} - shell: bash - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - run: | - set -o xtrace - if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then - echo "retry-workflow=true" >> "$GITHUB_OUTPUT" - fi - name: Retrying workflow id: retry-wf shell: bash @@ -2382,6 +2452,10 @@ jobs: - uses: actions/checkout@v3 with: submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v2 with: @@ -2432,6 +2506,7 @@ jobs: } >> "$GITHUB_OUTPUT" - name: run-tests id: run-tests + timeout-minutes: 340 continue-on-error: true if: ${{ steps.get-escu-detections.outputs.escu-test-run == 'true' }} env: @@ -2451,6 +2526,25 @@ jobs: vendor-version: ${{ matrix.vendor-version.image }} sc4s-version: "No" k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 360-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" + - name: Check if pod was deleted + id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} + if: ${{ steps.get-escu-detections.outputs.escu-test-run == 'true' }} + shell: bash + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + run: | + set -o xtrace + if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then + echo "retry-workflow=true" >> "$GITHUB_OUTPUT" + fi - name: Cancel workflow env: ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} @@ -2465,17 +2559,6 @@ jobs: echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" exit 1 fi - - name: Check if pod was deleted - id: is-pod-deleted - if: ${{ steps.get-escu-detections.outputs.escu-test-run == 'true' }} - shell: bash - env: - ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} - run: | - set -o xtrace - if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then - echo "retry-workflow=true" >> "$GITHUB_OUTPUT" - fi - name: Retrying workflow id: retry-wf shell: bash