From fb42ce75d6d8710ad244f8c7ed4ccefa96b32970 Mon Sep 17 00:00:00 2001 From: Yetkin Timocin Date: Wed, 1 May 2024 15:07:04 -0700 Subject: [PATCH] Adding a job that will purge AWS EKS clusters every 6 hours (#1462) Signed-off-by: ytimocin Signed-off-by: sk593 --- .github/scripts/purge-aws-eks-clusters.sh | 47 +++ .github/scripts/purge-aws-rds-snapshots.sh | 7 +- .github/workflows/purge-aws-eks-clusters.yaml | 42 ++ .../workflows/purge-aws-rds-snapshots.yaml | 4 +- .github/workflows/test-aks.yaml | 381 ------------------ .github/workflows/test.yaml | 4 +- 6 files changed, 96 insertions(+), 389 deletions(-) create mode 100755 .github/scripts/purge-aws-eks-clusters.sh create mode 100644 .github/workflows/purge-aws-eks-clusters.yaml delete mode 100644 .github/workflows/test-aks.yaml diff --git a/.github/scripts/purge-aws-eks-clusters.sh b/.github/scripts/purge-aws-eks-clusters.sh new file mode 100755 index 00000000..a6582183 --- /dev/null +++ b/.github/scripts/purge-aws-eks-clusters.sh @@ -0,0 +1,47 @@ +# ------------------------------------------------------------ +# Copyright 2023 The Radius Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ------------------------------------------------------------ + +#!/bin/bash + +# Current time in seconds since epoch +current_time=$(date +%s) + +# Age limit in seconds (6 hours * 3600 seconds/hour) +age_limit=$((6 * 3600)) + +echo "Starting cluster purge script." + +# List clusters and their creation times, filter and delete those older than 6 hours and starting with 'eks-samplestest-' +aws eks list-clusters --query "clusters[]" --output text | xargs -I {} aws eks describe-cluster --name {} --query "cluster.{name: name, createdAt: createdAt}" --output text | while read -r created_at name; do + # Convert creation time to seconds since the epoch + # Remove milliseconds and adjust timezone format from "-07:00" to "-0700" + formatted_created_at="${created_at%.*}${created_at##*.}" + formatted_created_at="${formatted_created_at%:*}${formatted_created_at##*:}" + + # Convert creation time to seconds + created_at_seconds=$(date -d "$formatted_created_at" +%s) + + # Calculate age in seconds + age=$((current_time - created_at_seconds)) + + # Check if age is greater than age limit and name starts with 'eks-samplestest-' + if [ "$age" -gt "$age_limit" ] && [[ "$name" == eks-samplestest-* ]]; then + echo "Deleting cluster $name older than 6 hours." + eksctl delete cluster --name "$name" --wait --force + else + echo "Cluster $name is not older than 6 hours or does not meet naming criteria." + fi +done diff --git a/.github/scripts/purge-aws-rds-snapshots.sh b/.github/scripts/purge-aws-rds-snapshots.sh index f9efcc03..8068770f 100755 --- a/.github/scripts/purge-aws-rds-snapshots.sh +++ b/.github/scripts/purge-aws-rds-snapshots.sh @@ -4,7 +4,7 @@ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software @@ -16,8 +16,7 @@ set -xe -aws rds describe-db-snapshots --query 'DBSnapshots[].DBSnapshotIdentifier' --output text > snapshots.txt -for rds_snapshot_identifier in $(cat ./snapshots.txt) -do +aws rds describe-db-snapshots --query 'DBSnapshots[].DBSnapshotIdentifier' --output text >snapshots.txt +for rds_snapshot_identifier in $(cat ./snapshots.txt); do aws rds delete-db-snapshot --db-snapshot-identifier $rds_snapshot_identifier done diff --git a/.github/workflows/purge-aws-eks-clusters.yaml b/.github/workflows/purge-aws-eks-clusters.yaml new file mode 100644 index 00000000..83eede03 --- /dev/null +++ b/.github/workflows/purge-aws-eks-clusters.yaml @@ -0,0 +1,42 @@ +name: Purge AWS EKS Clusters + +on: + schedule: + # Runs every day at 7 AM + - cron: "0 7 * * *" + +env: + GH_TOKEN: ${{ github.token }} + AWS_REGION: us-west-2 + +jobs: + purge_eks_clusters: + name: Purge AWS EKS Clusters + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install AWS CLI + run: | + sudo apt-get update + sudo apt-get install -y awscli + + - name: Install eksctl + run: | + curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp + sudo mv /tmp/eksctl /usr/local/bin + + - name: Delete old EKS clusters + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_REGION: ${{ env.AWS_REGION }} + run: bash .github/scripts/purge-aws-eks-clusters.sh + + - name: Create GitHub issue on failure + if: failure() && github.event_name != 'pull_request' + run: | + gh issue create --title "Purge AWS EKS Clusters workflow failed" \ + --body "Test failed on ${{ github.repository }}. See [workflow logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details." \ + --repo ${{ github.repository }} diff --git a/.github/workflows/purge-aws-rds-snapshots.yaml b/.github/workflows/purge-aws-rds-snapshots.yaml index 257dd7ba..53a8ddce 100644 --- a/.github/workflows/purge-aws-rds-snapshots.yaml +++ b/.github/workflows/purge-aws-rds-snapshots.yaml @@ -4,8 +4,8 @@ on: # Runs at 00:30 and 12:30 - cron: "30 0,12 * * *" env: - GH_TOKEN: ${{ github.token }} - AWS_REGION: us-west-2 + GH_TOKEN: ${{ github.token }} + AWS_REGION: us-west-2 jobs: purge_rds_snapshots: name: Purge AWS RDS DBInstance snapshots diff --git a/.github/workflows/test-aks.yaml b/.github/workflows/test-aks.yaml deleted file mode 100644 index f6a30b77..00000000 --- a/.github/workflows/test-aks.yaml +++ /dev/null @@ -1,381 +0,0 @@ -name: Test Samples (AKS and EKS) - -on: - workflow_dispatch: - inputs: - version: - description: "Radius version number to use (e.g. 0.1.0, 0.1.0-rc1, edge). Defaults to edge." - required: false - default: "edge" - type: string - push: - branches: - - v*.* - - edge - paths: - - "samples/**" - - ".github/workflows/**" - pull_request: - types: [opened, synchronize, reopened] - branches: - - v*.* - - edge - schedule: # Run every 2 hours - - cron: "0 */2 * * *" -env: - RUN_IDENTIFIER: samplestest-${{ github.run_id }}-${{ github.run_attempt }} -jobs: - # setup the test environment - setup: - name: Setup - runs-on: ubuntu-latest - env: - BRANCH: ${{ github.base_ref || github.ref_name }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AZURE_LOCATION: eastus - AKS_RESOURCE_GROUP: samples-test-rg - AKS_CLUSTER_NAME: samples-test-aks - AWS_REGION: us-west-2 - AWS_ZONES: us-west-2a,us-west-2b,us-west-2c - steps: - - name: Checkout code - uses: actions/checkout@v3 - - name: Setup Node - uses: actions/setup-node@v3 - with: - node-version: 20 - - name: az CLI login - run: | - az login --service-principal \ - --username ${{ secrets.AZURE_SANDBOX_APP_ID }} \ - --password ${{ secrets.AZURE_SANDBOX_PASSWORD }} \ - --tenant ${{ secrets.AZURE_SANDBOX_TENANT_ID }} - - name: Get kubeconf credential for AKS cluster - run: | - az aks get-credentials \ - --subscription ${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }} \ - --resource-group ${{ env.AKS_RESOURCE_GROUP }} \ - --name ${{ env.AKS_CLUSTER_NAME }} - - name: Download rad CLI - run: | - RADIUS_VERSION="${{ inputs.version }}" - if [[ -z "${{ inputs.version }}" ]]; then - RADIUS_VERSION=edge - fi - ./.github/scripts/install-radius.sh $RADIUS_VERSION - - name: Clean up cluster - run: ./.github/scripts/cleanup-cluster.sh - - name: Reinstall Radius after cleanup - run: | - rad install kubernetes --reinstall - test: - name: Sample tests - runs-on: ${{ matrix.os }} - needs: setup - strategy: - fail-fast: false - matrix: - include: - - name: demo - os: ubuntu-latest - runOnPullRequest: true - app: demo - env: demo - path: ./samples/demo/app.bicep - deployArgs: --application demo -p image=ghcr.io/radius-project/samples/demo:latest - exposeArgs: --application demo - uiTestFile: tests/demo/demo.app.spec.ts - port: 3000 - container: demo - - name: dapr - os: ubuntu-latest-m - runOnPullRequest: true - app: dapr - env: dapr - path: ./samples/dapr/dapr.bicep - deployArgs: -p environment='/planes/radius/local/resourceGroups/dapr/providers/Applications.Core/environments/dapr' -p frontendImage=ghcr.io/radius-project/samples/dapr-frontend:latest -p backendImage=ghcr.io/radius-project/samples/dapr-backend:latest - - name: volumes - os: ubuntu-latest - runOnPullRequest: true - app: myapp - env: volumes - path: ./samples/volumes/app.bicep - deployArgs: -p image=ghcr.io/radius-project/samples/volumes:latest - - name: eshop-containers - os: ubuntu-latest-m - runOnPullRequest: true - app: eshop - env: containers - path: ./samples/eshop/eshop.bicep - uiTestFile: tests/eshop/eshop.app.spec.ts - deployArgs: -p environment='/planes/radius/local/resourceGroups/eshop-containers/providers/Applications.Core/environments/containers' - - name: eshop-azure - os: ubuntu-latest-m - runOnPullRequest: false - app: eshop-azure - env: azure - path: ./samples/eshop/eshop.bicep - uiTestFile: tests/eshop/eshop.app.spec.ts - deployArgs: -p environment='/planes/radius/local/resourceGroups/eshop-azure/providers/Applications.Core/environments/azure' -p applicationName=eshop-azure - credential: azure - - name: eshop-aws - os: ubuntu-latest-m - runOnPullRequest: false - app: eshop-aws - env: aws - path: ./samples/eshop/eshop.bicep - uiTestFile: tests/eshop/eshop.app.spec.ts - deployArgs: -p environment='/planes/radius/local/resourceGroups/eshop-aws/providers/Applications.Core/environments/aws' -p applicationName=eshop-aws - credential: aws - env: - BRANCH: ${{ github.base_ref || github.ref_name }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AZURE_LOCATION: eastus - AKS_RESOURCE_GROUP: samples-test-rg - AKS_CLUSTER_NAME: samples-test-aks - AWS_REGION: us-west-2 - AWS_ZONES: us-west-2a,us-west-2b,us-west-2c - steps: - # Setup the test assets and configuration - - name: Generate output variables - id: gen-id - run: | - RUN_IDENTIFIER=${{ env.RUN_IDENTIFIER }}-${{ matrix.name }} - - if [[ "${{ github.event_name }}" == "pull_request" && "${{ matrix.runOnPullRequest }}" == "false" ]]; then - RUN_TEST=false - else - RUN_TEST=true - fi - - # Set output variables to be used in the other jobs - echo "RUN_IDENTIFIER=${RUN_IDENTIFIER}" >> $GITHUB_OUTPUT - echo "TEST_AZURE_RESOURCE_GROUP=rg-${RUN_IDENTIFIER}" >> $GITHUB_OUTPUT - echo "TEST_EKS_CLUSTER_NAME=eks-${RUN_IDENTIFIER}" >> $GITHUB_OUTPUT - echo "RUN_TEST=${RUN_TEST}" >> $GITHUB_OUTPUT - - name: Checkout code - if: steps.gen-id.outputs.RUN_TEST == 'true' - uses: actions/checkout@v3 - - name: Ensure inputs.version is valid semver - if: steps.gen-id.outputs.RUN_TEST == 'true' && inputs.version != '' - run: | - python ./.github/scripts/validate_semver.py ${{ inputs.version }} - - name: Setup Node - if: steps.gen-id.outputs.RUN_TEST == 'true' - uses: actions/setup-node@v3 - with: - node-version: 20 - - name: az CLI login - if: steps.gen-id.outputs.RUN_TEST == 'true' - run: | - az login --service-principal \ - --username ${{ secrets.AZURE_SANDBOX_APP_ID }} \ - --password ${{ secrets.AZURE_SANDBOX_PASSWORD }} \ - --tenant ${{ secrets.AZURE_SANDBOX_TENANT_ID }} - - name: Configure AWS - if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws' - run: | - aws configure set aws_access_key_id ${{ secrets.AWS_ACCESS_KEY_ID }} - aws configure set aws_secret_access_key ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws configure set region ${{ env.AWS_REGION }} - aws configure set output json - - name: Get kubeconf credential for AKS cluster - if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.name != 'eshop-aws' - run: | - az aks get-credentials \ - --subscription ${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }} \ - --resource-group ${{ env.AKS_RESOURCE_GROUP }} \ - --name ${{ env.AKS_CLUSTER_NAME }} - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - # Create and install test environment - - name: Create Azure resource group - if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'azure' - id: create-azure-resource-group - run: | - current_time=$(date +%s) - az group create \ - --location ${{ env.AZURE_LOCATION }} \ - --name ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} \ - --subscription ${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }} \ - --tags creationTime=$current_time - while [ $(az group exists --name ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} --subscription ${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }}) = false ]; do - echo "Waiting for resource group ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} to be created..." - sleep 5 - done - - name: Create EKS Cluster - if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws' - id: create-eks - run: | - curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp - sudo mv /tmp/eksctl /usr/local/bin - eksctl create cluster \ - --name ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} \ - --nodes-min 1 --nodes-max 2 --node-type t3.large \ - --zones ${{ env.AWS_ZONES }} \ - --managed \ - --region ${{ env.AWS_REGION }} - while [[ "$(eksctl get cluster ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} --region ${{ env.AWS_REGION }} -o json | jq -r .[0].Status)" != "ACTIVE" ]]; do - echo "Waiting for EKS cluster to be created..." - sleep 60 - done - aws eks update-kubeconfig --region ${{ env.AWS_REGION }} --name ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} - timeout-minutes: 60 - continue-on-error: false - - name: Download rad CLI - if: steps.gen-id.outputs.RUN_TEST == 'true' - run: | - RADIUS_VERSION="${{ inputs.version }}" - if [[ -z "${{ inputs.version }}" ]]; then - RADIUS_VERSION=edge - fi - ./.github/scripts/install-radius.sh $RADIUS_VERSION - ## This step is temporary until we have Recipe Packs for Azure & AWS and update the eShop sample - - name: Configure Radius test workspace - if: steps.gen-id.outputs.RUN_TEST == 'true' - run: | - set -x - - export PATH=$GITHUB_WORKSPACE/bin:$PATH - which rad || { echo "cannot find rad"; exit 1; } - - # Install Radius for AWS - if [[ "${{ matrix.credential }}" == "aws" ]]; then - rad install kubernetes - fi - - echo "*** Create workspace, group and environment for test ***" - rad workspace create kubernetes --force - rad group create ${{ matrix.name }} - rad group switch ${{ matrix.name }} - rad env create ${{ matrix.env }} - rad env switch ${{ matrix.env }} - - if [[ "${{ matrix.credential }}" == "azure" ]]; then - rad deploy ./samples/eshop/environments/azure.bicep -p azureResourceGroup=${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} -p azureSubscriptionId=${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }} - rad env switch ${{ matrix.env }} - elif [[ "${{ matrix.credential }}" == "aws" ]]; then - rad deploy ./samples/eshop/environments/aws.bicep -p awsAccountId=${{ secrets.AWS_ACCOUNT_ID }} -p awsRegion=${{ env.AWS_REGION }} -p eksClusterName=${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} - rad env switch ${{ matrix.env }} - else - echo "Registering recipes for ${{ matrix.env }} environment..." - rad recipe register default -e ${{ matrix.env }} --template-kind bicep --template-path ghcr.io/radius-project/recipes/local-dev/rediscaches:latest --resource-type Applications.Datastores/redisCaches - rad recipe register default -e ${{ matrix.env }} --template-kind bicep --template-path ghcr.io/radius-project/recipes/local-dev/mongodatabases:latest --resource-type Applications.Datastores/mongoDatabases - rad recipe register default -e ${{ matrix.env }} --template-kind bicep --template-path ghcr.io/radius-project/recipes/local-dev/sqldatabases:latest --resource-type Applications.Datastores/sqlDatabases - rad recipe register default -e ${{ matrix.env }} --template-kind bicep --template-path ghcr.io/radius-project/recipes/local-dev/rabbitmqqueues:latest --resource-type Applications.Messaging/rabbitMQQueues - fi - - name: Configure cloud credentials - if: steps.gen-id.outputs.RUN_TEST == 'true' && ( matrix.credential == 'azure' || matrix.credential == 'aws') - run: | - if [[ "${{ matrix.credential }}" == "azure" ]]; then - rad env update ${{ matrix.env }} --azure-subscription-id ${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }} --azure-resource-group ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} - rad credential register azure --client-id ${{ secrets.AZURE_SANDBOX_APP_ID }} --client-secret ${{ secrets.AZURE_SANDBOX_PASSWORD }} --tenant-id ${{ secrets.AZURE_SANDBOX_TENANT_ID }} - fi - if [[ "${{ matrix.credential }}" == "aws" ]]; then - rad env update ${{ matrix.env }} --aws-region ${{ env.AWS_REGION }} --aws-account-id ${{ secrets.AWS_ACCOUNT_ID }} - rad credential register aws --access-key-id ${{ secrets.AWS_ACCESS_KEY_ID }} --secret-access-key ${{ secrets.AWS_SECRET_ACCESS_KEY }} - fi - # Deploy application and run tests - - name: Deploy app - if: steps.gen-id.outputs.RUN_TEST == 'true' - id: deploy-app - uses: nick-fields/retry@v3 - with: - timeout_minutes: 30 - max_attempts: 3 - retry_wait_seconds: 30 - command: rad deploy ${{ matrix.path }} ${{ matrix.deployArgs }} -e ${{ matrix.env }} - - name: Run Playwright Test - if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.uiTestFile != '' - id: run-playwright-test - run: | - if [[ "${{ matrix.container }}" != "" ]]; then - rad resource expose containers ${{ matrix.container }} ${{ matrix.exposeArgs }} --port ${{ matrix.port }} & - export ENDPOINT="http://localhost:3000/" - else - endpoint="$(rad app status -a ${{ matrix.app }} | sed 's/ /\n/g' | grep http)" - echo "Endpoint: $endpoint" - export ENDPOINT=$endpoint - fi - - cd playwright/ - npm ci - npx playwright install --with-deps - npx playwright test ${{ matrix.uiTestFile }} --retries 3 - # Upload Playwright test results even if the workflow is cancelled. - - name: Upload Playwright Results - uses: actions/upload-artifact@v3 - if: always() && (steps.run-playwright-test.outcome == 'success' || steps.run-playwright-test.outcome == 'failure') - with: - name: playwright-report-${{ matrix.name }} - path: playwright/playwright-report/ - retention-days: 30 - if-no-files-found: error - # Upload Playwright test videos in case of test failure even if the workflow is cancelled. - - name: Upload Playwright Videos - uses: actions/upload-artifact@v4 - if: always() && steps.run-playwright-test.outcome == 'failure' - with: - name: playwright-video-${{ matrix.name }} - path: playwright/test-results/ - retention-days: 30 - if-no-files-found: error - # Handle failures - - name: Get Pod logs for failed tests - id: get-pod-logs - if: failure() && (steps.run-playwright-test.outcome == 'failure' || steps.deploy-app.outcome == 'failure') - run: | - # Create pod-logs directory - mkdir -p playwright/pod-logs/${{ matrix.name }} - # Get pod logs and save to file - namespace="${{ matrix.env }}-${{ matrix.app }}" - label="radapp.io/application=${{ matrix.app }}" - pod_names=($(kubectl get pods -l $label -n $namespace -o jsonpath='{.items[*].metadata.name}')) - for pod_name in "${pod_names[@]}"; do - kubectl logs $pod_name -n $namespace > playwright/pod-logs/${{ matrix.name }}/${pod_name}.txt - done - echo "Pod logs saved to playwright/pod-logs/${{ matrix.name }}/" - # Get kubernetes events and save to file - kubectl get events -n $namespace > playwright/pod-logs/${{ matrix.name }}/events.txt - - name: Upload Pod logs for failed tests - uses: actions/upload-artifact@v3 - if: failure() && steps.get-pod-logs.outcome == 'success' - with: - name: ${{ matrix.name }}-pod-logs - path: playwright/pod-logs/${{ matrix.name }} - retention-days: 30 - if-no-files-found: error - - name: Create GitHub issue on failure - if: failure() && github.event_name == 'schedule' - run: gh issue create --title "Samples deployment failed for ${{ matrix.name }}" --body "Test failed on ${{ github.repository }}. See [workflow logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for more details." --repo ${{ github.repository }} --label test-failure - # Cleanup - - name: Delete app and environment - if: steps.gen-id.outputs.RUN_TEST == 'true' && steps.deploy-app.outcome == 'success' - run: | - if command -v rad &> /dev/null; then - rad app delete ${{ matrix.app }} -y - rad env delete ${{ matrix.env }} -y - fi - - name: Delete Azure resource group - if: steps.gen-id.outputs.RUN_TEST == 'true' && steps.create-azure-resource-group.outcome == 'success' - run: | - # Delete Azure resources created by the test - # if deletion fails, purge workflow will purge the resource group and its resources later - az group delete \ - --subscription ${{ secrets.AZURE_SANDBOX_SUBSCRIPTION_ID }} \ - --name ${{ steps.gen-id.outputs.TEST_AZURE_RESOURCE_GROUP }} \ - --yes - - name: Delete AWS Resources - if: steps.gen-id.outputs.RUN_TEST == 'true' && matrix.credential == 'aws' && steps.deploy-app.outcome == 'success' - run: | - # Delete all AWS resources created by the test - ./.github/scripts/delete-aws-resources.sh '/planes/radius/local/resourcegroups/${{ matrix.env }}/providers/Applications.Core/applications/${{ matrix.app }}' - - name: Delete EKS Cluster ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} - if: steps.create-eks.outcome == 'success' - run: | - echo "Deleting EKS cluster: ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }}" - eksctl delete cluster --name ${{ steps.gen-id.outputs.TEST_EKS_CLUSTER_NAME }} --region ${{ env.AWS_REGION }} --wait --force diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index da9fc36c..66d9b61b 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -20,8 +20,8 @@ on: branches: - v*.* - edge - schedule: # Run every 2 hours - - cron: "0 */2 * * *" + schedule: # Run every day at 12 PM + - cron: "0 12 * * *" env: RUN_IDENTIFIER: samplestest-${{ github.run_id }}-${{ github.run_attempt }} jobs: