Skip to content

Cilium E2E Upgrade (ci-e2e-upgrade) #33

Cilium E2E Upgrade (ci-e2e-upgrade)

Cilium E2E Upgrade (ci-e2e-upgrade) #33

name: Cilium E2E Upgrade (ci-e2e-upgrade)
# Any change in triggers needs to be reflected in the concurrency group.
on:
workflow_dispatch:
inputs:
PR-number:
description: "Pull request number."
required: true
context-ref:
description: "Context in which the workflow runs. If PR is from a fork, will be the PR target branch (general case). If PR is NOT from a fork, will be the PR branch itself (this allows committers to test changes to workflows directly from PRs)."
required: true
SHA:
description: "SHA under test (head of the PR branch)."
required: true
extra-args:
description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow."
required: false
default: '{}'
push:
branches:
- 'renovate/main-**'
# Run every 8 hours
schedule:
- cron: '0 5/8 * * *'
# By specifying the access of one of the scopes, all of those that are not
# specified are set to 'none'.
permissions:
# To read actions state with catchpoint/workflow-telemetry-action
actions: read
# To be able to access the repository with actions/checkout
contents: read
# To allow retrieving information from the PR API
pull-requests: read
# To be able to set commit status
statuses: write
concurrency:
# Structure:
# - Workflow name
# - Event type
# - A unique identifier depending on event type:
# - schedule: SHA
# - workflow_dispatch: PR number
#
# This structure ensures a unique concurrency group name is generated for each
# type of testing, such that re-runs will cancel the previous run.
group: |
${{ github.workflow }}
${{ github.event_name }}
${{
(github.event_name == 'push' && github.sha) ||
(github.event_name == 'schedule' && github.sha) ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number)
}}
cancel-in-progress: true
env:
test_concurrency: 5
jobs:
echo-inputs:
if: ${{ github.event_name == 'workflow_dispatch' }}
name: Echo Workflow Dispatch Inputs
runs-on: ubuntu-24.04
steps:
- name: Echo Workflow Dispatch Inputs
run: |
echo '${{ tojson(inputs) }}'
commit-status-start:
name: Commit Status Start
runs-on: ubuntu-latest
steps:
- name: Set initial commit status
uses: myrotvorets/set-commit-status-action@3730c0a348a2ace3c110851bed53331bc6406e9f # v2.0.1
with:
sha: ${{ inputs.SHA || github.sha }}
wait-for-images:
name: Wait for images
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout context ref (trusted)
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.context-ref || github.sha }}
persist-credentials: false
- name: Wait for images
uses: ./.github/actions/wait-for-images
with:
SHA: ${{ inputs.SHA }}
setup-and-test:
needs: [wait-for-images]
runs-on: ${{ vars.GH_RUNNER_EXTRA_POWER_UBUNTU_LATEST || 'ubuntu-latest' }}
name: 'Setup & Test'
env:
job_name: 'Setup & Test'
strategy:
fail-fast: false
max-parallel: 25
matrix:
include:
- name: '1'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'rhel8.6-20241031.113911'
kube-proxy: 'iptables'
kpr: 'false'
tunnel: 'vxlan'
host-fw: 'true'
- name: '2'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.4-20241104.124130'
kube-proxy: 'iptables'
kpr: 'false'
tunnel: 'disabled'
host-fw: 'true'
- name: '3'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.10-20241104.124130'
kube-proxy: 'iptables'
kpr: 'false'
tunnel: 'disabled'
endpoint-routes: 'true'
- name: '4'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.10-20241104.124130'
kube-proxy: 'iptables'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'vxlan'
lb-mode: 'snat'
endpoint-routes: 'true'
egress-gateway: 'true'
- name: '5'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.15-20241104.124130'
kube-proxy: 'iptables'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'disabled'
lb-mode: 'dsr'
endpoint-routes: 'true'
egress-gateway: 'true'
host-fw: 'false' # enabling breaks downgrading (missed tail calls)
- name: '6'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '6.1-20241104.124130'
kube-proxy: 'none'
kpr: 'true'
devices: '{eth0,eth1}'
tunnel: 'vxlan'
lb-mode: 'snat'
egress-gateway: 'true'
host-fw: 'true'
lb-acceleration: 'testing-only'
ingress-controller: 'true'
bgp-control-plane: 'true'
- name: '7'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-next-20241105.013305'
kube-proxy: 'none'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'disabled'
lb-mode: 'snat'
egress-gateway: 'true'
lb-acceleration: 'testing-only'
ingress-controller: 'true'
local-redirect-policy: 'true'
node-local-dns: 'true'
- name: '8'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-next-20241105.013305'
kube-proxy: 'iptables'
kpr: 'false'
tunnel: 'geneve'
endpoint-routes: 'true'
misc: 'socketLB.enabled=false,nodePort.enabled=true,bpf.masquerade=true'
local-redirect-policy: 'true'
node-local-dns: 'true'
- name: '9'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.10-20241104.124130'
kube-proxy: 'iptables'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'vxlan'
encryption: 'wireguard'
encryption-node: 'false'
lb-mode: 'snat'
endpoint-routes: 'true'
egress-gateway: 'true'
- name: '10'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.15-20241104.124130'
kube-proxy: 'iptables'
kpr: 'false'
tunnel: 'disabled'
encryption: 'wireguard'
encryption-node: 'false'
encryption-strict-mode: 'true'
misc: 'socketLB.enabled=true'
local-redirect-policy: 'true'
node-local-dns: 'true'
- name: '11'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '6.1-20241104.124130'
kube-proxy: 'none'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'vxlan'
encryption: 'wireguard'
encryption-node: 'true'
lb-mode: 'snat'
egress-gateway: 'true'
ingress-controller: 'true'
misc: 'socketLB.hostNamespaceOnly=true'
local-redirect-policy: 'true'
node-local-dns: 'true'
- name: '12'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-next-20241105.013305'
kube-proxy: 'none'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'disabled'
encryption: 'wireguard'
encryption-node: 'true'
encryption-strict-mode: 'true'
lb-mode: 'snat'
egress-gateway: 'true'
ingress-controller: 'true'
- name: '13'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'rhel8.6-20241031.113911'
kube-proxy: 'iptables'
kpr: 'false'
tunnel: 'vxlan'
misc: 'policyCIDRMatchMode=nodes'
- name: '14'
# Switch to 5.15 until https://github.com/cilium/cilium/issues/27642
# has been resolved. https://github.com/cilium/cilium/pull/30837#issuecomment-1960897445
# explains why 5.4 might cause north-south-loadbalancing tests to
# fail.
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.15-20241104.124130'
kube-proxy: 'iptables'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'vxlan'
lb-mode: 'snat'
egress-gateway: 'true'
lb-acceleration: 'testing-only'
ingress-controller: 'true'
- name: '15'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-next-20241105.013305'
kube-proxy: 'none'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'disabled'
ingress-controller: 'true'
# Disable bpf.tproxy=true until https://github.com/cilium/cilium/issues/31918
# has been resolved.
misc: 'bpfClockProbe=false,cni.uninstall=false'
ciliumendpointslice: 'true'
- name: '16'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.15-20241104.124130'
kube-proxy: 'none'
kpr: 'true'
devices: '{eth0,eth1}'
secondary-network: 'true'
tunnel: 'vxlan'
lb-mode: 'snat'
encryption: 'wireguard'
encryption-node: 'false'
host-fw: 'true'
ciliumendpointslice: 'true'
ingress-controller: 'true'
- name: '17'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit,bpf.masquerade=true,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true'
kube-proxy: 'none'
kpr: 'true'
ipv6: 'true'
tunnel: 'disabled'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
- name: '18'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit-l2,bpf.masquerade=true,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true'
kube-proxy: 'none'
kpr: 'true'
ipv6: 'true'
tunnel: 'disabled'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
- name: '19'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit,bpf.masquerade=true'
kube-proxy: 'none'
kpr: 'true'
tunnel: 'vxlan'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
- name: '20'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit-l2,bpf.masquerade=true'
kube-proxy: 'none'
kpr: 'true'
tunnel: 'vxlan'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
- name: '21'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit,bpf.masquerade=true'
kube-proxy: 'none'
kpr: 'true'
tunnel: 'geneve'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
- name: '22'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit-l2,bpf.masquerade=true'
kube-proxy: 'none'
kpr: 'true'
tunnel: 'geneve'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
- name: '23'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-20241105.013305'
misc: 'bpf.datapathMode=netkit,bpf.masquerade=true'
kube-proxy: 'none'
kpr: 'true'
ipv6: 'true'
tunnel: 'disabled'
devices: '{eth0,eth1}'
secondary-network: 'true'
ingress-controller: 'true'
host-fw: 'true'
skip-upgrade: 'true'
- name: '24'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-net-20241105.013305'
misc: 'bpf.datapathMode=netkit,bpf.masquerade=true,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true'
kube-proxy: 'none'
kpr: 'true'
ipv6: 'true'
tunnel: 'disabled'
ciliumendpointslice: 'true'
endpoint-routes: 'true'
skip-upgrade: 'true'
- name: '25'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-net-20241105.013305'
misc: 'bpf.datapathMode=netkit-l2,bpf.masquerade=true,enableIPv4BIGTCP=true,enableIPv6BIGTCP=true'
kube-proxy: 'none'
kpr: 'true'
ipv6: 'true'
tunnel: 'disabled'
ciliumendpointslice: 'true'
endpoint-routes: 'true'
skip-upgrade: 'true'
# Example of a feature that is being introduced, and we want to test
# it without performing an upgrade, we use skip-upgrade: 'true'
# - name: '23'
# # renovate: datasource=docker depName=quay.io/lvh-images/kind
# kernel: 'bpf-20241105.013305'
# misc: 'bpf.datapathMode=netkit-l2,bpf.masquerade=true'
# skip-upgrade: 'true'
timeout-minutes: 55
steps:
- name: Collect Workflow Telemetry
uses: catchpoint/workflow-telemetry-action@94c3c3d9567a0205de6da68a76c428ce4e769af1 # v2.0.0
with:
comment_on_pr: false
- name: Checkout context ref (trusted)
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.context-ref || github.sha }}
persist-credentials: false
- name: Cleanup Disk space in runner
if: runner.name == 'ubuntu-latest'
uses: ./.github/actions/disk-cleanup
- name: Set Environment Variables
uses: ./.github/actions/set-env-variables
- name: Set up job variables
id: vars
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
SHA="${{ inputs.SHA }}"
else
SHA="${{ github.sha }}"
fi
echo sha=${SHA} >> $GITHUB_OUTPUT
CILIUM_DOWNGRADE_VERSION=$(contrib/scripts/print-downgrade-version.sh stable)
echo downgrade_version=${CILIUM_DOWNGRADE_VERSION} >> $GITHUB_OUTPUT
- name: Derive stable Cilium installation config
if: ${{ matrix.skip-upgrade != 'true' }}
id: cilium-stable-config
uses: ./.github/actions/cilium-config
with:
image-tag: ${{ steps.vars.outputs.downgrade_version }}
chart-dir: './untrusted/cilium-downgrade/install/kubernetes/cilium/'
tunnel: ${{ matrix.tunnel }}
devices: ${{ matrix.devices }}
endpoint-routes: ${{ matrix.endpoint-routes }}
ipv6: ${{ matrix.ipv6 }}
kpr: ${{ matrix.kpr }}
lb-mode: ${{ matrix.lb-mode }}
lb-acceleration: ${{ matrix.lb-acceleration }}
encryption: ${{ matrix.encryption }}
encryption-node: ${{ matrix.encryption-node }}
encryption-strict-mode: ${{ matrix.encryption-strict-mode }}
egress-gateway: ${{ matrix.egress-gateway }}
host-fw: ${{ matrix.host-fw }}
mutual-auth: false
ingress-controller: ${{ matrix.ingress-controller }}
misc: ${{ matrix.misc || 'bpfClockProbe=false,cni.uninstall=false' }}
ciliumendpointslice: ${{ matrix.ciliumendpointslice }}
local-redirect-policy: ${{ matrix.local-redirect-policy }}
bgp-control-plane: ${{ matrix.bgp-control-plane }}
- name: Derive newest Cilium installation config
id: cilium-newest-config
uses: ./.github/actions/cilium-config
with:
image-tag: ${{ steps.vars.outputs.sha }}
chart-dir: './untrusted/cilium-newest/install/kubernetes/cilium'
tunnel: ${{ matrix.tunnel }}
devices: ${{ matrix.devices }}
endpoint-routes: ${{ matrix.endpoint-routes }}
ipv6: ${{ matrix.ipv6 }}
kpr: ${{ matrix.kpr }}
lb-mode: ${{ matrix.lb-mode }}
lb-acceleration: ${{ matrix.lb-acceleration }}
encryption: ${{ matrix.encryption }}
encryption-node: ${{ matrix.encryption-node }}
encryption-strict-mode: ${{ matrix.encryption-strict-mode }}
egress-gateway: ${{ matrix.egress-gateway }}
host-fw: ${{ matrix.host-fw }}
mutual-auth: false
ingress-controller: ${{ matrix.ingress-controller }}
misc: ${{ matrix.misc || 'bpfClockProbe=false,cni.uninstall=false' }}
ciliumendpointslice: ${{ matrix.ciliumendpointslice }}
local-redirect-policy: ${{ matrix.local-redirect-policy }}
bgp-control-plane: ${{ matrix.bgp-control-plane }}
- name: Set Kind params
id: kind-params
shell: bash
run: |
IP_FAM="dual"
if [ "${{ matrix.ipv6 }}" == "false" ]; then
IP_FAM="ipv4"
fi
echo params="--xdp --secondary-network \"\" 3 \"\" \"\" ${{ matrix.kube-proxy }} $IP_FAM" >> $GITHUB_OUTPUT
- name: Provision K8s on LVH VM
uses: ./.github/actions/lvh-kind
with:
test-name: e2e-conformance
kernel: ${{ matrix.kernel }}
kind-params: "${{ steps.kind-params.outputs.params }}"
kind-image: ${{ env.KIND_K8S_IMAGE }}
- name: Install Cilium CLI
uses: cilium/cilium-cli@3286926bbf80fdd0103a372256459e577224f9f6 # v0.16.20
with:
skip-build: ${{ env.CILIUM_CLI_SKIP_BUILD }}
image-repo: ${{ env.CILIUM_CLI_IMAGE_REPO }}
image-tag: ${{ steps.vars.outputs.sha }}
# Warning: since this is a privileged workflow, subsequent workflow job
# steps must take care not to execute untrusted code.
- name: Checkout pull request branch (NOT TRUSTED)
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ steps.vars.outputs.sha }}
persist-credentials: false
path: untrusted/cilium-newest
sparse-checkout: |
install/kubernetes/cilium
examples
- name: Checkout ${{ steps.vars.outputs.downgrade_version }} branch to get the Helm chart
if: ${{ matrix.skip-upgrade != 'true' }}
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ steps.vars.outputs.downgrade_version }}
persist-credentials: false
path: untrusted/cilium-downgrade
sparse-checkout: |
install/kubernetes/cilium
- name: Install Cilium ${{ matrix.skip-upgrade == 'true' && 'from main' || steps.vars.outputs.downgrade_version }}
shell: bash
run: |
kubectl patch node kind-worker3 --type=json -p='[{"op":"add","path":"/metadata/labels/cilium.io~1no-schedule","value":"true"}]'
if ${{ matrix.skip-upgrade != 'true' }}; then
cilium install ${{ steps.cilium-stable-config.outputs.config }}
else
cilium install ${{ steps.cilium-newest-config.outputs.config }}
fi
cilium status --wait
kubectl get pods --all-namespaces -o wide
kubectl -n kube-system exec daemonset/cilium -- cilium status
mkdir -p cilium-junits
- name: Install node local DNS
if: ${{ matrix.node-local-dns == 'true' }}
shell: bash
run: |
kubedns=$(kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}) && sed -i "s/__PILLAR__DNS__SERVER__/$kubedns/g;" untrusted/cilium-newest/examples/kubernetes-local-redirect/node-local-dns.yaml
sed -i "s/__PILLAR__UPSTREAM__SERVERS__/1.1.1.1/g;" untrusted/cilium-newest/examples/kubernetes-local-redirect/node-local-dns.yaml
kubectl apply -k untrusted/cilium-newest/examples/kubernetes-local-redirect
kubectl rollout status -n kube-system ds/node-local-dns
- name: Start conn-disrupt-test
shell: bash
run: |
# Create pods which establish long lived connections. It will be used by
# subsequent connectivity tests with --include-conn-disrupt-test to catch any
# interruption in such flows.
cilium connectivity test --include-conn-disrupt-test --conn-disrupt-test-setup \
--conn-disrupt-test-restarts-path "./cilium-conn-disrupt-restarts" \
--conn-disrupt-dispatch-interval 0ms
- name: Upgrade Cilium
if: ${{ matrix.skip-upgrade != 'true' }}
shell: bash
run: |
cilium upgrade \
${{ steps.cilium-newest-config.outputs.config }}
cilium status --wait --wait-duration=10m
kubectl get pods --all-namespaces -o wide
kubectl -n kube-system exec daemonset/cilium -- cilium status
- name: Setup Cilium CLI flags
id: cli-flags
run: |
EXTRA=()
if [ "${{ matrix.secondary-network }}" = "true" ]; then
EXTRA+=("\"--secondary-network-iface=eth1\"")
fi
if [ "${{ matrix.encryption-strict-mode }}" = "true" ]; then
# "Test Cilium after upgrade" ran strict-mode-encryption test which caused temporary drops.
EXTRA+=("\"--expected-drop-reasons=+Traffic is unencrypted\"")
fi
echo flags="--include-unsafe-tests \
--collect-sysdump-on-failure \
--flush-ct \
--sysdump-hubble-flows-count=1000000 \
--sysdump-hubble-flows-timeout=5m \
--sysdump-output-filename \"cilium-sysdump-${{ matrix.name }}-<ts>\" \
--junit-file \"cilium-junits/${{ env.job_name }} (${{ matrix.name }}).xml\" \
--junit-property github_job_step=\"Run tests upgrade 2 (${{ matrix.name }})\" \
${EXTRA[@]}" >> $GITHUB_OUTPUT
- name: Test Cilium ${{ matrix.skip-upgrade != 'true' && 'after upgrade' }}
shell: bash
run: |
cilium connectivity test \
--include-conn-disrupt-test \
--test "no-interrupted-connections" \
--conn-disrupt-test-restarts-path "./cilium-conn-disrupt-restarts" \
${{ steps.cli-flags.outputs.flags }}
- name: Run sequential Cilium tests
shell: bash
run: |
cilium connectivity test \
--test "seq-.*" \
${{ steps.cli-flags.outputs.flags }}
- name: Run concurrent Cilium tests
shell: bash
run: |
cilium connectivity test \
--test-concurrency=${{ env.test_concurrency }} \
--test "!seq-.*" \
${{ steps.cli-flags.outputs.flags }}
- name: Setup conn dirstupt tests after a flush-ct Cilium tests
shell: bash
run: |
cilium connectivity test \
--include-conn-disrupt-test \
--conn-disrupt-test-setup \
--conn-disrupt-test-restarts-path "./cilium-conn-disrupt-restarts" \
--conn-disrupt-dispatch-interval 0ms
- name: Downgrade Cilium ${{ steps.vars.outputs.downgrade_version }}
if: ${{ matrix.skip-upgrade != 'true' }}
shell: bash
run: |
cilium upgrade \
${{ steps.cilium-stable-config.outputs.config }}
cilium status --wait --wait-duration=10m
kubectl get pods --all-namespaces -o wide
kubectl -n kube-system exec daemonset/cilium -- cilium status
- name: Test Cilium after downgrade to ${{ steps.vars.outputs.downgrade_version }}
if: ${{ matrix.skip-upgrade != 'true' }}
shell: bash
run: |
cilium connectivity test \
--include-conn-disrupt-test \
--test "no-interrupted-connections" \
--conn-disrupt-test-restarts-path "./cilium-conn-disrupt-restarts" \
${{ steps.cli-flags.outputs.flags }}
- name: Run sequential Cilium tests
if: ${{ matrix.skip-upgrade != 'true' }}
shell: bash
run: |
cilium connectivity test \
--test "seq-.*" \
${{ steps.cli-flags.outputs.flags }}
- name: Run concurrent Cilium tests
if: ${{ matrix.skip-upgrade != 'true' }}
shell: bash
run: |
cilium connectivity test \
--test-concurrency=${{ env.test_concurrency }} \
--test "!seq-.*" \
${{ steps.cli-flags.outputs.flags }}
- name: Fetch artifacts
if: ${{ !success() }}
shell: bash
run: |
kubectl get pods --all-namespaces -o wide
cilium status
mkdir -p cilium-sysdumps
cilium sysdump --output-filename cilium-sysdump-${{ matrix.name }}-final
- name: Upload artifacts
if: ${{ !success() }}
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
with:
name: cilium-sysdumps-${{ matrix.name }}
path: cilium-sysdump-*.zip
- name: Upload JUnits [junit]
if: ${{ always() }}
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
with:
name: cilium-junits-${{ matrix.name }}
path: cilium-junits/*.xml
- name: Publish Test Results As GitHub Summary
if: ${{ always() }}
uses: aanm/junit2md@332ebf0fddd34e91b03a832cfafaa826306558f9 # v0.0.3
with:
junit-directory: "cilium-junits"
merge-upload:
if: ${{ always() }}
name: Merge and Upload Artifacts
runs-on: ubuntu-latest
needs: setup-and-test
steps:
- name: Checkout context ref (trusted)
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ inputs.context-ref || github.sha }}
persist-credentials: false
- name: Merge Sysdumps
uses: ./.github/actions/merge-artifacts
with:
name: cilium-sysdumps
pattern: cilium-sysdumps-*
token: ${{ secrets.GITHUB_TOKEN }}
- name: Merge JUnits
uses: ./.github/actions/merge-artifacts
with:
name: cilium-junits
pattern: cilium-junits-*
token: ${{ secrets.GITHUB_TOKEN }}
commit-status-final:
if: ${{ always() }}
name: Commit Status Final
needs: setup-and-test
runs-on: ubuntu-latest
steps:
- name: Set final commit status
uses: myrotvorets/set-commit-status-action@3730c0a348a2ace3c110851bed53331bc6406e9f # v2.0.1
with:
sha: ${{ inputs.SHA || github.sha }}
status: ${{ needs.setup-and-test.result }}