Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

e2e: Move tests to gh action using azure workers #260

Merged
merged 3 commits into from
Dec 11, 2023
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions .github/workflows/ccruntime_e2e.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: ccruntime e2e tests
on:
pull_request:
branches:
- main

permissions:
contents: read

jobs:
e2e:
name: operator tests
strategy:
matrix:
runtimeclass: ["kata-qemu", "kata-clh"]
instance: ["az-ubuntu-2004", "az-ubuntu-2204"]
runs-on: ${{ matrix.instance }}
steps:
- uses: actions/checkout@v4

- name: Install deps
run: |
sudo apt-get update -y
sudo apt-get install -y ansible python-is-python3

- name: Run e2e tests
run: |
cd tests/e2e
export PATH="$PATH:/usr/local/bin"
./run-local.sh -r "${{ matrix.runtimeclass }}" -u
4 changes: 2 additions & 2 deletions tests/e2e/cluster/up.sh
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,11 @@ main() {

# Untaint the node so that pods can be scheduled on it.
for role in master control-plane; do
kubectl taint nodes "$(hostname)" \
kubectl taint nodes "$SAFE_HOST_NAME" \
"node-role.kubernetes.io/$role:NoSchedule-"
done

kubectl label node "$(hostname)" node.kubernetes.io/worker=
kubectl label node "$SAFE_HOST_NAME" node.kubernetes.io/worker=
}

main "$@"
3 changes: 3 additions & 0 deletions tests/e2e/lib.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
# SPDX-License-Identifier: Apache-2.0
#

# Normalize system hostname to the usual kubectl node name
export SAFE_HOST_NAME=$(hostname | tr '[:upper:]' '[:lower:]')

# Wait until the node is ready. It is set a timeout of 180 seconds.
#
check_node_is_ready() {
Expand Down
12 changes: 6 additions & 6 deletions tests/e2e/operator.sh
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ install_operator() {

# The node should be 'worker' labeled
local label="node.kubernetes.io/worker"
if ! kubectl get node "$(hostname)" -o jsonpath='{.metadata.labels}' \
if ! kubectl get node "$SAFE_HOST_NAME" -o jsonpath='{.metadata.labels}' \
| grep -q "$label"; then
kubectl label node "$(hostname)" "$label="
kubectl label node "$SAFE_HOST_NAME" "$label="
fi

handle_older_containerd
Expand Down Expand Up @@ -169,7 +169,7 @@ uninstall_ccruntime() {
local cmd="! sudo -E kubectl get pods -n confidential-containers-system|"
cmd+="grep -q -e cc-operator-daemon-install"
cmd+=" -e cc-operator-pre-install-daemon"
if ! wait_for_process 180 30 "$cmd"; then
if ! wait_for_process 720 30 "$cmd"; then
echo "ERROR: there are ccruntime pods still running"
echo "::group::Describe pods from $op_ns namespace"
kubectl -n "$op_ns" describe pods || true
Expand All @@ -182,10 +182,10 @@ uninstall_ccruntime() {
! kubectl get --no-headers runtimeclass 2>/dev/null | grep -q kata

# Labels should be gone
if kubectl get nodes "$(hostname)" -o jsonpath='{.metadata.labels}' | \
if kubectl get nodes "$SAFE_HOST_NAME" -o jsonpath='{.metadata.labels}' | \
grep -q -e cc-preinstall -e katacontainers.io; then
echo "ERROR: there are labels left behind"
kubectl get nodes "$(hostname)" -o jsonpath='{.metadata.labels}'
kubectl get nodes "$SAFE_HOST_NAME" -o jsonpath='{.metadata.labels}'

return 1
fi
Expand Down Expand Up @@ -247,7 +247,7 @@ uninstall_operator() {
local pod="cc-operator-controller-manager"
local cmd="! kubectl get pods -n confidential-containers-system |"
cmd+="grep -q $pod"
if ! wait_for_process 90 30 "$cmd"; then
if ! wait_for_process 180 30 "$cmd"; then
echo "ERROR: the controller manager is still running"

local pod_id="$(get_pods_regex $pod $op_ns)"
Expand Down
Loading