Skip to content

Test Samples (k3d and EKS) #1817

Test Samples (k3d and EKS)

Test Samples (k3d and EKS) #1817

Workflow file for this run

name: Test Samples
on:
workflow_dispatch:
inputs:
version:
description: 'Radius version number to use (e.g. 0.1.0, 0.1.0-rc1, edge). Defaults to edge.'
required: false
default: 'edge'
type: string
push:
branches:
- v*.*
- edge
paths:
- "samples/**"
- ".github/workflows/**"
pull_request:
types: [opened, synchronize, reopened]
branches:
- v*.*
- edge
schedule: # 7:45 AM Pacific Time
- cron: "45 15 * * *"
env:
RAD_CLI_URL: https://get.radapp.dev/tools/rad/install.sh
RUN_IDENTIFIER: samplestest-${{ github.run_id }}-${{ github.run_attempt }}
jobs:
test:
name: Sample tests
runs-on: [self-hosted, 1ES.Pool=1ES-Radius-Samples]
strategy:
fail-fast: false
matrix:
include:
- name: demo
runOnPullRequest: true
app: demo
path: ./samples/demo/app.bicep
args: --application demo
uiTestFile: tests/demo/demo.app.spec.ts
port: 3000
container: demo
enableDapr: false
- name: dapr
runOnPullRequest: true
app: dapr
path: ./samples/dapr/dapr.bicep
enableDapr: true
- name: volumes
runOnPullRequest: true
app: myapp
path: ./samples/volumes/app.bicep
enableDapr: false
- name: eshop
runOnPullRequest: true
app: eshop
path: ./samples/eshop/iac/eshop.bicep
uiTestFile: tests/eshop/container.app.spec.ts
enableDapr: false
- name: eshop-azure
runOnPullRequest: false
app: eshop-azure
path: ./samples/eshop/iac/eshop.bicep
args: -p platform=azure -p appName=eshop-azure
uiTestFile: tests/eshop/container.app.spec.ts
credential: azure
enableDapr: false
- name: eshop-aws
runOnPullRequest: false
app: eshop-aws-${{ github.run_id }}-${{ github.run_attempt }}
path: ./samples/eshop/iac/eshop.bicep
args: -p platform=aws -p eksClusterName=eks-samplestest-${{ github.run_id }}-${{ github.run_attempt }}-eshop-aws -p appName=eshop-aws-${{ github.run_id }}-${{ github.run_attempt }}
uiTestFile: tests/eshop/container.app.spec.ts
credential: aws
enableDapr: false
env:
BRANCH: ${{ github.base_ref || github.ref_name }}
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
AZURE_LOCATION: westus3
AWS_REGION: us-west-2
AWS_ZONES: us-west-2a,us-west-2b,us-west-2c
steps:
# Setup the test assets and configuration
- name: Generate output variables
id: gen-id
run: |
RUN_IDENTIFIER=${{ env.RUN_IDENTIFIER }}-${{ matrix.name }}
if [[ "${{ github.event_name }}" == "pull_request" && "${{ matrix.runOnPullRequest }}" == "false" ]]; then
RUN_TEST=false
else
RUN_TEST=true
fi
if [[ "${{ matrix.enableDapr }}" == "true" ]]; then
ENABLE_DAPR=true
else
ENABLE_DAPR=false
fi
# Set output variables to be used in the other jobs
echo "RUN_IDENTIFIER=${RUN_IDENTIFIER}" >> $GITHUB_OUTPUT
echo "TEST_AZURE_RESOURCE_GROUP=rg-${RUN_IDENTIFIER}" >> $GITHUB_OUTPUT
echo "TEST_EKS_CLUSTER_NAME=eks-${RUN_IDENTIFIER}" >> $GITHUB_OUTPUT
echo "RUN_TEST=${RUN_TEST}" >> $GITHUB_OUTPUT
echo "ENABLE_DAPR=${ENABLE_DAPR}" >> $GITHUB_OUTPUT
- name: Checkout code
if: steps.gen-id.outputs.RUN_TEST == 'true'
uses: actions/checkout@v3
- name: Ensure inputs.version is valid semver
if: steps.gen-id.outputs.RUN_TEST == 'true' && inputs.version != ''
run: |
python ./.github/scripts/validate_semver.py ${{ inputs.version }}
- name: Setup Node
if: steps.gen-id.outputs.RUN_TEST == 'true'
uses: actions/setup-node@v3
with:
node-version: 16
- name: az CLI login
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'azure'
run: |
az login --service-principal \
--username ${{ secrets.AZURE_SP_TESTS_APPID }} \
--password ${{ secrets.AZURE_SP_TESTS_PASSWORD }} \
--tenant ${{ secrets.AZURE_SP_TESTS_TENANTID }}
# Create and install test environment
- name: Create Azure resource group
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'azure'
id: create-azure-resource-group
run: |
current_time=$(date +%s)
az group create \
--location ${{ env.AZURE_LOCATION }} \
--name ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} \
--subscription ${{ secrets.AZURE_SUBSCRIPTIONID_TESTS }} \
--tags creationTime=$current_time
while [ $(az group exists --name ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} --subscription ${{ secrets.AZURE_SUBSCRIPTIONID_TESTS }}) = false ]; do
echo "Waiting for resource group ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} to be created..."
sleep 5
done
- name: Configure AWS
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws'
run: |
aws configure set aws_access_key_id ${{ secrets.AWS_ACCESS_KEY_ID }}
aws configure set aws_secret_access_key ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws configure set region ${{ env.AWS_REGION }}
aws configure set output json
- name: Create EKS Cluster
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws'
id: create-eks
run: |
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
eksctl create cluster \
--name ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} \
--nodes-min 1 --nodes-max 2 --node-type t3.large \
--zones ${{ env.AWS_ZONES }} \
--managed \
--region ${{ env.AWS_REGION }}
while [[ "$(eksctl get cluster ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} --region ${{ env.AWS_REGION }} -o json | jq -r .[0].Status)" != "ACTIVE" ]]; do
echo "Waiting for EKS cluster to be created..."
sleep 60
done
timeout-minutes: 60
continue-on-error: false
- name: Install k3d
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws'
run: |
aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }}
- name: Download k3d
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential != 'aws'
run: wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash
- name: Create k3d cluster
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential != 'aws'
run: k3d cluster create --agents 2 -p "80:80@loadbalancer" --k3s-arg "--disable=traefik@server:0"
- name: Install Dapr
if: steps.gen-id.outputs.RUN_TEST == 'true' && steps.gen-id.outputs.ENABLE_DAPR == 'true'
run: |
helm repo add dapr https://dapr.github.io/helm-charts/
helm install dapr dapr/dapr --version=1.6 --namespace dapr-system --create-namespace --wait
# TODO: Remove authentication once GHCR is public
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: rad-ci-bot
password: ${{ secrets.GH_RAD_CI_BOT_PAT }}
- name: Download rad CLI
if: steps.gen-id.outputs.RUN_TEST == 'true'
run: |
for attempt in 1 2 3 4 5; do
if [[ -n "${{ inputs.version }}" && "${{ inputs.version }}" != "edge" && "${{ inputs.version }}" != *"rc"* ]]; then
INPUT_CHANNEL=$(echo "${{ inputs.version }}" | cut -d '.' -f 1,2)
echo "Downloading rad CLI version $INPUT_CHANNEL"
wget -q "${{ env.RAD_CLI_URL }}" -O - | /bin/bash -s $INPUT_CHANNEL
elif [[ -n "${{ inputs.version }}" ]]; then
echo "Downloading rad CLI version ${{ inputs.version }}"
wget -q "${{ env.RAD_CLI_URL }}" -O - | /bin/bash -s ${{ inputs.version }}
else
echo "Downloading edge rad CLI"
wget -q "${{ env.RAD_CLI_URL }}" -O - | /bin/bash -s edge
fi
if [ $? -eq 0 ]; then
break
fi
done
- name: Initialize local environment
if: steps.gen-id.outputs.RUN_TEST == 'true'
run: |
if [[ "${{ matrix.credential }}" == "aws" ]]; then
rad install kubernetes
else
rad install kubernetes --set rp.publicEndpointOverride=localhost
fi
rad group create default
rad workspace create kubernetes default --group default
rad group switch default
rad env create default
rad env switch default
rad recipe register default -e default -w default --template-kind bicep --template-path radius.azurecr.io/recipes/dev/rediscaches:latest --resource-type Applications.Datastores/redisCaches
rad recipe register default -e default -w default --template-kind bicep --template-path radius.azurecr.io/recipes/dev/mongodatabases:latest --resource-type Applications.Datastores/mongoDatabases
if [[ "${{ matrix.credential }}" == "azure" ]]; then
rad env update default --azure-subscription-id ${{ secrets.AZURE_SUBSCRIPTIONID_TESTS }} --azure-resource-group ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }}
rad credential register azure --client-id ${{ secrets.AZURE_SP_TESTS_APPID }} --client-secret ${{ secrets.AZURE_SP_TESTS_PASSWORD }} --tenant-id ${{ secrets.AZURE_SP_TESTS_TENANTID }}
fi
if [[ "${{ matrix.credential }}" == "aws" ]]; then
rad env update default --aws-region ${{ env.AWS_REGION }} --aws-account-id ${{ secrets.AWS_ACCOUNT_ID }}
rad credential register aws --access-key-id ${{ secrets.AWS_ACCESS_KEY_ID }} --secret-access-key ${{ secrets.AWS_SECRET_ACCESS_KEY }}
fi
# Deploy application and run tests
- name: Deploy app
if: steps.gen-id.outputs.RUN_TEST == 'true'
run: rad deploy ${{ matrix.path }} ${{ matrix.args }}
- name: Wait for all pods to be ready
if: steps.gen-id.outputs.RUN_TEST == 'true'
run: |
namespace="default-${{ matrix.app }}"
label="radapp.io/application=${{ matrix.app }}"
kubectl wait --for=condition=Ready pod -l $label -n $namespace --timeout=5m
- name: Run Playwright Test
if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.uiTestFile != ''
id: run-playwright-test
run: |
if [[ "${{ matrix.container }}" != "" ]]; then
rad resource expose containers ${{ matrix.container }} ${{ matrix.args }} --port ${{ matrix.port }} &
else
endpoint="$(rad app status -a ${{ matrix.app }} | sed 's/ /\n/g' | grep http)"
echo "Endpoint: $endpoint"
export ENDPOINT=$endpoint
fi
cd playwright/
npm ci
npx playwright install --with-deps
npx playwright test ${{ matrix.uiTestFile }} --retries 3
- name: Upload Playwright Results
uses: actions/upload-artifact@v3
if: always() && ( steps.run-playwright-test.outcome == 'success' || steps.run-playwright-test.outcome == 'failure' )
with:
name: playwright-report-${{ matrix.name }}
path: playwright/playwright-report/
retention-days: 30
if-no-files-found: error
# Handle failures
- name: Get Pod logs for failed tests
id: get-pod-logs
if: failure() && steps.run-playwright-test.outcome == 'failure'
run: |
# Create pod-logs directory
mkdir -p playwright/pod-logs/${{ matrix.name }}
# Get pod logs and save to file
namespace="default-${{ matrix.app }}"
label="radius.dev/application=${{ matrix.app }}"
pod_names=($(kubectl get pods -l $label -n $namespace -o jsonpath='{.items[*].metadata.name}'))
for pod_name in "${pod_names[@]}"; do
kubectl logs $pod_name -n $namespace > playwright/pod-logs/${{ matrix.name }}/${pod_name}.txt
done
echo "Pod logs saved to playwright/pod-logs/${{ matrix.name }}/"
# Get kubernetes events and save to file
kubectl get events -n $namespace > playwright/pod-logs/${{ matrix.name }}/events.txt
- name: Upload Pod logs for failed tests
uses: actions/upload-artifact@v3
if: failure() && steps.get-pod-logs.outcome == 'success'
with:
name: ${{ matrix.name }}-pod-logs
path: playwright/pod-logs/${{ matrix.name }}
retention-days: 30
if-no-files-found: error
- name: Create GitHub issue on failure
if: failure() && github.event_name != 'pull_request' && github.event_name != 'workflow_dispatch'
run: gh issue create --title "Samples deployment failed for ${{ matrix.app }}" --body "Test failed on ${{ github.repository }}. See [workflow logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details." --repo ${{ github.repository }}
# Cleanup
- name: Delete app
if: steps.gen-id.outputs.RUN_TEST == 'true'
run: |
rad app delete ${{ matrix.app }} -y
- name: Delete Azure resource group
if: always() && steps.gen-id.outputs.RUN_TEST == 'true' && steps.create-azure-resource-group.outcome == 'success'
run: |
# Delete Azure resources created by the test
# if deletion fails, purge workflow will purge the resource group and its resources later
az group delete \
--subscription ${{ secrets.AZURE_SUBSCRIPTIONID_TESTS }} \
--name ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} \
--yes
- name: Delete AWS Resources
if: always() && steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws'
run: |
# Delete all AWS resources created by the test
./.github/scripts/delete-aws-resources.sh ${{ matrix.app }}
- name: Delete EKS Cluster
if: always() && steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws'
run: |
# Uninstall Radius from EKS cluster
rad uninstall kubernetes
# Delete EKS cluster
echo "Deleting EKS cluster: ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }}"
eksctl delete cluster --name ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} --region ${{ env.AWS_REGION }} --wait