diff --git a/.github/workflows/int-test-azure-workflow.yml b/.github/workflows/int-test-azure-workflow.yml index 97b2e7da7..577a71d07 100644 --- a/.github/workflows/int-test-azure-workflow.yml +++ b/.github/workflows/int-test-azure-workflow.yml @@ -38,11 +38,7 @@ jobs: password: ${{ secrets.AZURE_ACR_DOCKER_PASSWORD }} - name: Make Splunk Operator Image run: | - make docker-build IMG=${{ secrets.AZURE_ACR_LOGIN_SERVER }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA - - name: Push Splunk Operator Image to the Container Registry - run: | - echo "Uploading Image to the Container Registry :: ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA" - make docker-push IMG=${{ secrets.AZURE_ACR_LOGIN_SERVER }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + make docker-buildx IMG=${{ secrets.AZURE_ACR_LOGIN_SERVER }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA setup-aks-cluster: runs-on: ubuntu-latest needs: build-operator-image diff --git a/.github/workflows/int-test-gcp-workflow.yml b/.github/workflows/int-test-gcp-workflow.yml new file mode 100644 index 000000000..21e0e1448 --- /dev/null +++ b/.github/workflows/int-test-gcp-workflow.yml @@ -0,0 +1,277 @@ +name: Integration Test on GCP Workflow + +on: + push: + branches: + - develop + - main + +jobs: + build-operator-image: + runs-on: ubuntu-latest + env: + SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + ARTIFACT_REGISTRY: ${{ secrets.GCP_ARTIFACT_REGISTRY }} # Updated for Artifact Registry + steps: + - name: Checkout Code + uses: actions/checkout@v2 + + - name: Load Environment Variables + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + with: + path: .env # Adjust the path if your dotenv file is located elsewhere + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + + - name: Install Operator SDK + run: | + ARCH=$(case $(uname -m) in + x86_64) echo -n amd64 ;; + aarch64) echo -n arm64 ;; + *) echo -n $(uname -m) ;; + esac) + OS=$(uname | awk '{print tolower($0)}') + OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/${{ steps.dotenv.outputs.OPERATOR_SDK_VERSION }} + curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} + chmod +x operator-sdk_${OS}_${ARCH} + sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk + + - name: Authenticate to GCP + uses: google-github-actions/auth@v1 + with: + credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + + - name: Login to GCR + uses: docker/login-action@v3 + with: + registry: ${{ secrets.GCP_ARTIFACT_REGISTRY }} + username: _json_key + password: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + + - name: Build Splunk Operator Image + run: | + make docker-buildx IMG=${{ secrets.GCP_ARTIFACT_REGISTRY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + + create-cluster-and-run-tests: + strategy: + matrix: + test_focus: + - { order: 1, name: "c3_gcp_sanity" } + - { order: 2, name: "c3_mgr_gcp_sanity" } + - { order: 3, name: "m4_gcp_sanity" } + - { order: 4, name: "m4_mgr_gcp_sanity" } + - { order: 5, name: "s1_gcp_sanity" } + runs-on: ubuntu-latest + needs: build-operator-image + env: + CLUSTER_WORKERS: 5 + TEST_CLUSTER_PLATFORM: gcp + CLUSTER_PROVIDER: gcp + ARTIFACT_REGISTRY: ${{ secrets.GCP_ARTIFACT_REGISTRY }} + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + GCP_REGION: ${{ secrets.GCP_REGION }} + AWS_S3_REGION: ${{ secrets.GCP_REGION }} + GCP_ZONE: ${{ secrets.GCP_ZONE }} + GCP_NETWORK: default # Adjust if using a custom network + GCP_SUBNETWORK: default # Adjust if using a custom subnetwork + TEST_FOCUS: ${{ matrix.test_focus.name }} + CLUSTER_NODES: 2 + SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} + SPLUNK_ENTERPRISE_RELEASE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_RELEASE_IMAGE }} + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + SPLUNK_OPERATOR_IMAGE_FILENAME: splunk-operator + TEST_TO_SKIP: "^(?:[^s]+|s(?:$|[^m]|m(?:$|[^o]|o(?:$|[^k]|k(?:$|[^e])))))*$" + TEST_BUCKET: ${{ secrets.TEST_BUCKET }} + TEST_S3_BUCKET: ${{ secrets.TEST_BUCKET }} + TEST_INDEXES_S3_BUCKET: ${{ secrets.TEST_INDEXES_S3_BUCKET }} + INDEXES_S3_BUCKET: ${{ secrets.TEST_INDEXES_S3_BUCKET }} + GCP_ENTERPRISE_LICENSE_LOCATION: "test_licenses" + ENTERPRISE_LICENSE_LOCATION: "test_licenses" + ENTERPRISE_LICENSE_S3_PATH: "test_licenses" + REGISTRY_REPOSITORY: ${{ secrets.GCP_ARTIFACT_REGISTRY }} + CLUSTER_WIDE: "true" + GCP_SERVICE_ACCOUNT_ENABLED: "false" + PRIVATE_REGISTRY: ${{ secrets.GCP_ARTIFACT_REGISTRY }} + GCP_STORAGE_ACCOUNT: ${{ secrets.GCP_STORAGE_ACCOUNT }} + GCP_STORAGE_ACCOUNT_KEY: ${{ secrets.GCP_STORAGE_ACCOUNT_KEY }} + GCP_TEST_CONTAINER: ${{ secrets.GCP_TEST_CONTAINER}} + GCP_INDEXES_CONTAINER: ${{ secrets.GCP_INDEXES_CONTAINER}} + ECR_REPOSITORY: ${{ secrets.GCP_ARTIFACT_REGISTRY }} + GCP_CONTAINER_REGISTRY_LOGIN_SERVER: ${{ secrets.AZURE_ACR_LOGIN_SERVER }} + steps: + - name: Set Test Cluster Name + run: | + echo "CLUSTER_NAME=gke-${{ matrix.test_focus.order }}-$GITHUB_RUN_ID" >> $GITHUB_ENV + echo "TEST_CLUSTER_NAME=gke-${{ matrix.test_focus.order }}-$GITHUB_RUN_ID" >> $GITHUB_ENV + - name: Checkout Code + uses: actions/checkout@v2 + + - name: Load Environment Variables + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + with: + path: .env + + - name: Authenticate to GCP + uses: google-github-actions/auth@v1 + with: + credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + with: + project_id: ${{ secrets.GCP_PROJECT_ID }} + install_components: 'kubectl' + + - name: Set GCP Project + run: | + gcloud config set project ${{ env.GCP_PROJECT_ID }} + + - name: Create GKE Cluster + run: | + export EKS_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + export GKE_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + make cluster-up + + - name: Get Kubernetes Credentials + run: | + gcloud container clusters get-credentials ${{ env.CLUSTER_NAME }} --zone ${{ env.GCP_ZONE }} --project ${{ env.GCP_PROJECT_ID }} + + - name: Allow Pulling from Artifact Registry + run: | + gcloud auth configure-docker ${{ secrets.GCP_ARTIFACT_REGISTRY }} + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1 + with: + project_id: ${{ secrets.GCP_PROJECT_ID }} + install_components: 'kubectl' + + - name: Change Splunk Enterprise Image on Main Branches + if: github.ref == 'refs/heads/main' + run: | + echo "SPLUNK_ENTERPRISE_IMAGE=${{ steps.dotenv.outputs.SPLUNK_ENTERPRISE_RELEASE_IMAGE }}" >> $GITHUB_ENV + + - name: Authenticate to GCP + uses: google-github-actions/auth@v1 + with: + credentials_json: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + + - name: Set GCP Project + run: | + gcloud config set project ${{ env.GCP_PROJECT_ID }} + + - name: Install Kubectl + uses: azure/setup-kubectl@v3 + with: + version: ${{ steps.dotenv.outputs.KUBECTL_VERSION }} + + - name: Install Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' # Specify the Python version if needed + + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + + - name: Install Go Lint + run: | + go version + go install golang.org/x/lint/golint@latest + + - name: Install Ginkgo + run: | + make setup/ginkgo + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + + - name: Login to GCR + uses: docker/login-action@v3 + with: + registry: ${{ secrets.GCP_ARTIFACT_REGISTRY }} + username: _json_key + password: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + + - name: Pull Splunk Enterprise Image + run: docker pull ${{ env.SPLUNK_ENTERPRISE_IMAGE }} + + - name: Pull Splunk Operator Image Locally + run: | + docker pull ${{ secrets.GCP_ARTIFACT_REGISTRY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + docker tag ${{ secrets.GCP_ARTIFACT_REGISTRY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + echo "SPLUNK_OPERATOR_IMAGE=${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA" >> $GITHUB_ENV + + - name: Tag and Push Splunk Enterprise Image to Artifact Registry + run: | + docker tag ${{ env.SPLUNK_ENTERPRISE_IMAGE }} ${{ secrets.GCP_ARTIFACT_REGISTRY }}/${{ env.SPLUNK_ENTERPRISE_IMAGE }} + docker push ${{ secrets.GCP_ARTIFACT_REGISTRY }}/${{ env.SPLUNK_ENTERPRISE_IMAGE }} + + - name: Get Kubernetes Credentials + run: | + gcloud container clusters get-credentials ${{ env.CLUSTER_NAME }} --zone ${{ env.GCP_ZONE }} --project ${{ env.GCP_PROJECT_ID }} + + - name: Get GKE Credentials + uses: google-github-actions/get-gke-credentials@v1 + with: + cluster_name: ${{ env.CLUSTER_NAME }} + location: ${{ env.GCP_ZONE }} + + - name: Install Metrics Server + run: | + kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + + - name: Install Kubernetes Dashboard + run: | + kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml + + - name: Setup Kustomize + run: | + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash + sudo mv kustomize /usr/local/bin/ + + - name: Verify kubectl Configuration + run: | + kubectl config current-context + + - name: Apply StorageClass + run: | + kubectl apply -f test/gcp-storageclass.yaml + + - name: Run Integration Tests + run: | + export GCP_SERVICE_ACCOUNT_KEY=${{ secrets.GCP_SERVICE_ACCOUNT_KEY_BASE64 }} + make int-test + + - name: Collect Test Logs + if: ${{ always() }} + run: | + mkdir -p /tmp/pod_logs + find ./test -name "*.log" -exec cp {} /tmp/pod_logs \; + + - name: Archive Pod Logs + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: splunk-pods-logs-artifacts-${{ matrix.test_focus.name }} + path: /tmp/pod_logs/** + - name: Cleanup Test Case Artifacts + if: ${{ always() }} + run: | + export EKS_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + export GKE_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + tools/cleanup.sh + - name: Cleanup up EKS cluster + if: ${{ always() }} + run: | + make cluster-down diff --git a/Makefile b/Makefile index 94853d581..1e4e1e23c 100644 --- a/Makefile +++ b/Makefile @@ -156,7 +156,7 @@ docker-buildx: test ## Build and push docker image for the manager for cross-pla sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross - docker buildx create --name project-v3-builder docker buildx use project-v3-builder - - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . - docker buildx rm project-v3-builder rm Dockerfile.cross diff --git a/api/v4/common_types.go b/api/v4/common_types.go index 968ecd8ed..5bba9c0cd 100644 --- a/api/v4/common_types.go +++ b/api/v4/common_types.go @@ -308,10 +308,10 @@ type VolumeSpec struct { // Secret object name SecretRef string `json:"secretRef"` - // Remote Storage type. Supported values: s3, blob. s3 works with aws or minio providers, whereas blob works with azure provider. + // Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp. Type string `json:"storageType"` - // App Package Remote Store provider. Supported values: aws, minio, azure. + // App Package Remote Store provider. Supported values: aws, minio, azure, gcp. Provider string `json:"provider"` // Region of the remote storage volume where apps reside. Used for aws, if provided. Not used for minio and azure. diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml index 4a8bb045f..da20f98ca 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: clustermanagers.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -969,7 +969,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -981,8 +981,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -2089,7 +2089,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -2101,8 +2101,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -4083,7 +4083,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -4095,8 +4095,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array @@ -4352,7 +4353,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -4364,8 +4365,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml index 418fd8e46..f9098524f 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: clustermasters.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -965,7 +965,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -977,8 +977,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -2085,7 +2085,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -2097,8 +2097,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -4079,7 +4079,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -4091,8 +4091,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array @@ -4345,7 +4346,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -4357,8 +4358,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 571efa7a1..88ada520e 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: indexerclusters.enterprise.splunk.com spec: group: enterprise.splunk.com diff --git a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml index bcdd2bbfb..6d232fcf9 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: licensemanagers.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -959,7 +959,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -971,8 +971,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -3957,7 +3957,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -3969,8 +3969,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array diff --git a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml index 3371844b9..fc3c568d4 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: licensemasters.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -954,7 +954,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -966,8 +966,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -3952,7 +3952,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -3964,8 +3964,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array diff --git a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml index e97e022fc..820f80b10 100644 --- a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml +++ b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: monitoringconsoles.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -961,7 +961,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -973,8 +973,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -3958,7 +3958,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -3970,8 +3970,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array @@ -5080,7 +5081,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -5092,8 +5093,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -8077,7 +8078,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -8089,8 +8090,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index b165366e0..ff65337c9 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: searchheadclusters.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -967,7 +967,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -979,8 +979,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -3981,7 +3981,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -3993,8 +3993,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array @@ -5173,7 +5174,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -5185,8 +5186,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -8187,7 +8188,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -8199,8 +8200,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array diff --git a/config/crd/bases/enterprise.splunk.com_standalones.yaml b/config/crd/bases/enterprise.splunk.com_standalones.yaml index 99f02ab66..c715ee12d 100644 --- a/config/crd/bases/enterprise.splunk.com_standalones.yaml +++ b/config/crd/bases/enterprise.splunk.com_standalones.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: (devel) name: standalones.enterprise.splunk.com spec: group: enterprise.splunk.com @@ -962,7 +962,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -974,8 +974,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -2086,7 +2086,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -2098,8 +2098,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -4081,7 +4081,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -4093,8 +4093,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array @@ -4344,7 +4345,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -4356,8 +4357,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -5325,7 +5326,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -5337,8 +5338,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -6449,7 +6450,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -6461,8 +6462,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array @@ -8444,7 +8445,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where @@ -8456,8 +8457,9 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: - s3, blob. s3 works with aws or minio providers, whereas - blob works with azure provider.' + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' type: string type: object type: array @@ -8710,7 +8712,7 @@ spec: type: string provider: description: 'App Package Remote Store provider. Supported - values: aws, minio, azure.' + values: aws, minio, azure, gcp.' type: string region: description: Region of the remote storage volume where apps @@ -8722,8 +8724,8 @@ spec: type: string storageType: description: 'Remote Storage type. Supported values: s3, - blob. s3 works with aws or minio providers, whereas blob - works with azure provider.' + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' type: string type: object type: array diff --git a/docs/AppFramework.md b/docs/AppFramework.md index f7fad5910..7f9c61144 100644 --- a/docs/AppFramework.md +++ b/docs/AppFramework.md @@ -7,9 +7,10 @@ The Splunk Operator provides support for Splunk app and add-on deployment using Utilizing the App Framework requires one of the following remote storage providers: * An Amazon S3 or S3-API-compliant remote object storage location * Azure blob storage + * GCP Cloud Storage ### Prerequisites common to both remote storage providers -* The App framework requires read-only access to the path used to host the apps. DO NOT give any other access to the operator to maintain the integrity of data in S3 bucket or Azure blob container. +* The App framework requires read-only access to the path used to host the apps. DO NOT give any other access to the operator to maintain the integrity of data in S3 bucket , Azure blob container or GCP bucket. * Splunk apps and add-ons in a .tgz or .spl archive format. * Connections to the remote object storage endpoint need to be secured using a minimum version of TLS 1.2. * A persistent storage volume and path for the Operator Pod. See [Add a persistent storage volume to the Operator pod](#add-a-persistent-storage-volume-to-the-operator-pod). @@ -23,6 +24,24 @@ Utilizing the App Framework requires one of the following remote storage provide * The remote object storage credentials provided as a kubernetes secret. * OR, Use "Managed Indentity" role assigment to the Azure blob container. See [Setup Azure bob access with Managed Indentity](#setup-azure-bob-access-with-managed-indentity) +### Prerequisites for GCP bucket based remote object storage +To use GCP storage in the App Framework, follow these setup requirements: + +### Role & Role Binding for Access: +Create a role and role-binding for the splunk-operator service account. This allows read-only access to the GCP bucket to retrieve Splunk apps. Access should be limited to read-only for the security of data within the GCP bucket. + +### Credentials via Kubernetes Secret or Workload Identity: +Configure credentials through either a Kubernetes secret (e.g., storing a GCP service account key in key.json) or use Workload Identity for secure access: + +* **Kubernetes Secret**: Create a Kubernetes secret using the service account JSON key file for GCP access. +* **Workload Identity**: Use Workload Identity to associate the Kubernetes service account used by the Splunk Operator with a GCP service account that has the Storage Object Viewer IAM role for the required bucket. + +## Example for creating the secret + +```shell +kubectl create secret generic gcs-secret --from-file=key.json=path/to/your-service-account-key.json +``` + Splunk apps and add-ons deployed or installed outside of the App Framework are not managed, and are unsupported. Note: For the App Framework to detect that an app or add-on had changed, the updated app must use the same archive file name as the previously deployed one. @@ -47,12 +66,16 @@ In this example, you'll deploy a Standalone CR with a remote storage volume, the * Configuring an IAM through "Managed Indentity" role assigment to give read access for your bucket (azure blob container). For more details see [Setup Azure bob access with Managed Indentity](#setup-azure-bob-access-with-managed-indentity) * Or, create a Kubernetes Secret Object with the static storage credentials. * Example: `kubectl create secret generic azureblob-secret --from-literal=azure_sa_name=mystorageaccount --from-literal=azure_sa_secret_key=wJalrXUtnFEMI/K7MDENG/EXAMPLE_AZURE_SHARED_ACCESS_KEY` - + * GCP bucket: + * Configure credentials through either a Kubernetes secret (e.g., storing a GCP service account key in key.json) or use Workload Identity for secure access: + * Kubernetes Secret: Create a Kubernetes secret using the service account JSON key file for GCP access. + * Example: `kubectl create secret generic gcs-secret --from-file=key.json=path/to/your-service-account-key.json` + * Workload Identity: Use Workload Identity to associate the Kubernetes service account used by the Splunk Operator with a GCP service account that has the Storage Object Viewer IAM role for the required bucket. 3. Create unique folders on the remote storage volume to use as App Source locations. * An App Source is a folder on the remote storage volume containing a select subset of Splunk apps and add-ons. In this example, the network and authentication Splunk Apps are split into different folders and named `networkApps` and `authApps`. 4. Copy your Splunk App or Add-on archive files to the App Source. - * In this example, the Splunk Apps are located at `bucket-app-framework/Standalone-us/networkAppsLoc/` and `bucket-app-framework/Standalone-us/authAppsLoc/`, and are both accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3 and https://mystorageaccount.blob.core.windows.net for azure blob. + * In this example, the Splunk Apps are located at `bucket-app-framework/Standalone-us/networkAppsLoc/` and `bucket-app-framework/Standalone-us/authAppsLoc/`, and are both accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3, https://mystorageaccount.blob.core.windows.net for azure blob and https://storage.googleapis.com for GCP bucket. 5. Update the standalone CR specification and append the volume, App Source configuration, and scope. * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CRs where the Splunk Enterprise instance will run the apps locally, set the `scope: local ` The Standalone, Monitoring Console and License Manager CRs always use a local scope. @@ -118,6 +141,36 @@ spec: secretRef: azureblob-secret ``` +Example using GCP blob: Standalone.yaml + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: Standalone +metadata: + name: stdln + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + replicas: 1 + appRepo: + appsRepoPollIntervalSeconds: 600 + defaults: + volumeName: volume_app_repo + scope: local + appSources: + - name: networkApps + location: networkAppsLoc/ + - name: authApps + location: authAppsLoc/ + volumes: + - name: volume_app_repo + storageType: gcs + provider: gcp + path: bucket-app-framework/Standalone-us/ + endpoint: https://storage.googleapis.com + secretRef: gcs-secret +``` + 6. Apply the Custom Resource specification: `kubectl apply -f Standalone.yaml` The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys them to the standalone instance path for local use. @@ -143,13 +196,19 @@ This example describes the installation of apps on an Indexer Cluster and Cluste * Configuring an IAM through "Managed Indentity" role assigment to give read access for your bucket (azure blob container). For more details see [Setup Azure bob access with Managed Indentity](#setup-azure-bob-access-with-managed-indentity) * Or, create a Kubernetes Secret Object with the static storage credentials. * Example: `kubectl create secret generic azureblob-secret --from-literal=azure_sa_name=mystorageaccount --from-literal=azure_sa_secret_key=wJalrXUtnFEMI/K7MDENG/EXAMPLE_AZURE_SHARED_ACCESS_KEY` + * GCP bucket: + * Configure credentials through either a Kubernetes secret (e.g., storing a GCP service account key in key.json) or use Workload Identity for secure access: + * Kubernetes Secret: Create a Kubernetes secret using the service account JSON key file for GCP access. + * Example: `kubectl create secret generic gcs-secret --from-file=key.json=path/to/your-service-account-key.json` + * Workload Identity: Use Workload Identity to associate the Kubernetes service account used by the Splunk Operator with a GCP service account that has the Storage Object Viewer IAM role for the required bucket. 3. Create unique folders on the remote storage volume to use as App Source locations. * An App Source is a folder on the remote storage volume containing a select subset of Splunk apps and add-ons. In this example, there are Splunk apps installed and run locally on the cluster manager, and select apps that will be distributed to all cluster peers by the cluster manager. * The apps are split across three folders named `networkApps`, `clusterBase`, and `adminApps`. The apps placed into `networkApps` and `clusterBase` are distributed to the cluster peers, but the apps in `adminApps` are for local use on the cluster manager instance only. 4. Copy your Splunk app or add-on archive files to the App Source. - * In this example, the Splunk apps for the cluster peers are located at `bucket-app-framework/idxcAndCmApps/networkAppsLoc/`, `bucket-app-framework/idxcAndCmApps/clusterBaseLoc/`, and the apps for the cluster manager are located at`bucket-app-framework/idxcAndCmApps/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3 and https://mystorageaccount.blob.core.windows.net for azure blob. + * In this example, the Splunk apps for the cluster peers are located at `bucket-app-framework/idxcAndCmApps/networkAppsLoc/`, `bucket-app-framework/idxcAndCmApps/clusterBaseLoc/`, and the apps for the cluster manager are located at`bucket-app-framework/idxcAndCmApps/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3, https://mystorageaccount.blob.core.windows.net for azure blob and https://storage.googleapis.com for GCP bucket. + 5. Update the ClusterManager CR specification and append the volume, App Source configuration, and scope. * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CRs where the Splunk Enterprise instance will deploy the apps to cluster peers, set the `scope: cluster`. The ClusterManager and SearchHeadCluster CRs support both cluster and local scopes. @@ -219,6 +278,38 @@ spec: endpoint: https://mystorageaccount.blob.core.windows.net secretRef: azureblob-secret ``` + +Example using GCP Bucket: ClusterManager.yaml +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + appRepo: + appsRepoPollIntervalSeconds: 900 + defaults: + volumeName: volume_app_repo_us + scope: cluster + appSources: + - name: networkApps + location: networkAppsLoc/ + - name: clusterBase + location: clusterBaseLoc/ + - name: adminApps + location: adminAppsLoc/ + scope: local + volumes: + - name: volume_app_repo_us + storageType: gcs + provider: gcp + path: bucket-app-framework/idxcAndCmApps/ + endpoint: https://storage.googleapis.com + secretRef: gcs-secret +``` + 6. Apply the Custom Resource specification: `kubectl apply -f ClusterManager.yaml` The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the cluster manager instance for local use. @@ -248,6 +339,11 @@ This example describes the installation of apps on the Deployer and the Search H * Configuring an IAM through "Managed Indentity" role assigment to give read access for your bucket (azure blob container). For more details see [Setup Azure bob access with Managed Indentity](#setup-azure-bob-access-with-managed-indentity) * Or, create a Kubernetes Secret Object with the static storage credentials. * Example: `kubectl create secret generic azureblob-secret --from-literal=azure_sa_name=mystorageaccount --from-literal=azure_sa_secret_key=wJalrXUtnFEMI/K7MDENG/EXAMPLE_AZURE_SHARED_ACCESS_KEY` + * GCP bucket: + * Configure credentials through either a Kubernetes secret (e.g., storing a GCP service account key in key.json) or use Workload Identity for secure access: + * Kubernetes Secret: Create a Kubernetes secret using the service account JSON key file for GCP access. + * Example: `kubectl create secret generic gcs-secret --from-file=key.json=path/to/your-service-account-key.json` + * Workload Identity: Use Workload Identity to associate the Kubernetes service account used by the Splunk Operator with a GCP service account that has the Storage Object Viewer IAM role for the required bucket. 3. Create unique folders on the remote storage volume to use as App Source locations. @@ -255,7 +351,7 @@ This example describes the installation of apps on the Deployer and the Search H * The apps are split across three folders named `searchApps`, `machineLearningApps` and `adminApps`. The apps placed into `searchApps` and `machineLearningApps` are distributed to the search heads, but the apps in `adminApps` are for local use on the Deployer instance only. 4. Copy your Splunk app or add-on archive files to the App Source. - * In this example, the Splunk apps for the search heads are located at `bucket-app-framework/shcLoc-us/searchAppsLoc/`, `bucket-app-framework/shcLoc-us/machineLearningAppsLoc/`, and the apps for the Deployer are located at `bucket-app-framework/shcLoc-us/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3 and https://mystorageaccount.blob.core.windows.net for azure blob. + * In this example, the Splunk apps for the search heads are located at `bucket-app-framework/shcLoc-us/searchAppsLoc/`, `bucket-app-framework/shcLoc-us/machineLearningAppsLoc/`, and the apps for the Deployer are located at `bucket-app-framework/shcLoc-us/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com` for s3, https://mystorageaccount.blob.core.windows.net for azure blob and and https://storage.googleapis.com for GCP bucket. 5. Update the SearchHeadCluster CR specification, and append the volume, App Source configuration, and scope. * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. @@ -328,6 +424,40 @@ spec: endpoint: https://mystorageaccount.blob.core.windows.net secretRef: azureblob-secret ``` + +Example using GCP bucket: SearchHeadCluster.yaml + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + appRepo: + appsRepoPollIntervalSeconds: 900 + defaults: + volumeName: volume_app_repo_us + scope: cluster + appSources: + - name: networkApps + location: networkAppsLoc/ + - name: clusterBase + location: clusterBaseLoc/ + - name: adminApps + location: adminAppsLoc/ + scope: local + volumes: + - name: volume_app_repo_us + storageType: gcs + provider: gcp + path: bucket-app-framework/idxcAndCmApps/ + endpoint: https://storage.googleapis.com + secretRef: gcs-secret + +``` + 6. Apply the Custom Resource specification: `kubectl apply -f SearchHeadCluster.yaml` The App Framework detects the Splunk app or add-on archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the Deployer instance for local use. @@ -438,7 +568,7 @@ Here is a typical App framework configuration in a Custom Resource definition: * `name` uniquely identifies the remote storage volume name within a CR. This is used by the Operator to identify the local volume. * `storageType` describes the type of remote storage. Currently, `s3`, `blob` are the supported storage type. -* `provider` describes the remote storage provider. Currently, `aws`, `minio` and `azure` are the supported providers. Use `s3` with `aws` or `minio` and use `blob` with `azure`. +* `provider` describes the remote storage provider. Currently, `aws`, `minio` `gcp` and `azure` are the supported providers. Use `s3` with `aws` or `minio`, use `blob` with `azure` or `gcp` * `endpoint` describes the URI/URL of the remote storage endpoint that hosts the apps. * `secretRef` refers to the K8s secret object containing the static remote storage access key. This parameter is not required if using IAM role based credentials. * `path` describes the path (including the folder) of one or more app sources on the remote store. @@ -622,91 +752,843 @@ The App Framework does not preview, analyze, verify versions, or enable Splunk A 2. The App Framework defines one worker per CR type. For example, if you have multiple clusters receiveing app updates, a delay while managing one cluster will delay the app updates to the other cluster. -## Setup Azure bob access with Managed Indentity +## Setup Azure Blob Access with Managed Identity -Azure Managed identities can be used to provide IAM access to the blobs. With managed identities, the AKS nodes, that host the pods, can retrieve a OAuth token that provides authorization for the Splunk operator pod to read the app packages stored in the Azure Storage account. The key point here is that the AKS node is associated with a Managed Identity and this managed identity is given a `role` for read access called `Storage Blob Data Reader` to the azure storage account. +Azure Managed Identities can be used to provide IAM access to the blobs. With managed identities, the AKS nodes that host the pods can retrieve an OAuth token that provides authorization for the Splunk Operator pod to read the app packages stored in the Azure Storage account. The key point here is that the AKS node is associated with a Managed Identity, and this managed identity is given a `role` for read access called `Storage Blob Data Reader` to the Azure Storage account. -Here are the steps showing an example of assiging managed identity: +### **Assumptions:** -*Assumptions:* +- Familiarize yourself with [AKS managed identity concepts](https://learn.microsoft.com/en-us/azure/aks/use-managed-identity) +- The names used below, such as resource-group name and AKS cluster name, are for example purposes only. Please change them to the values as per your setup. +- These steps cover creating a resource group and AKS cluster; you can skip them if you already have them created. -Familiarize yourself with [AKS managed identity concepts](https://learn.microsoft.com/en-us/azure/aks/use-managed-identity) +### **Steps to Assign Managed Identity:** -The names used below, such as resource-group name and AKS cluster name, are for examples purpose, please change them to the values as per your setup. +1. **Create an Azure Resource Group** -These steps cover creating resource group and AKS cluster also but you can skip them if you already have them created. + ```bash + az group create --name splunkOperatorResourceGroup --location westus2 + ``` -1. Create an Azure resource group +2. **Create AKS Cluster with Managed Identity Enabled** -``` -az group create --name splunkOperatorResourceGroup --location westus2 -``` + ```bash + az aks create -g splunkOperatorResourceGroup -n splunkOperatorCluster --enable-managed-identity + ``` -2. Create AKS Cluster +3. **Get Credentials to Access Cluster** -``` -az aks create -g splunkOperatorResourceGroup -n splunkOperatorCluster --enable-managed-identity -``` + ```bash + az aks get-credentials --resource-group splunkOperatorResourceGroup --name splunkOperatorCluster + ``` -3. Get credentials to access cluster -``` -az aks get-credentials --resource-group splunkOperatorResourceGroup --name splunkOperatorCluster -``` -4. Get the Kubelet user managed identity +4. **Get the Kubelet User Managed Identity** -Run -``` -$ az identity list -``` + Run: -Find the section that has -agentpool under name + ```bash + az identity list + ``` -That is look for the block that contains "name": "splunkOperatorCluster-agentpool" + Find the section that has `-agentpool` under `name`. For example, look for the block that contains: -``` -{ -"clientId": "a5890776-24e6-4f5b-9b6c-**************", -"id": "/subscriptions/f428689e-c379-4712--**************",/resourcegroups/MC_splunkOperatorResourceGroup_splunkOperatorCluster_westus2/providers/Microsoft.ManagedIdentity/userAssignedIdentities/splunkOperatorCluster-agentpool", -"location": "westus2", -"name": "splunkOperatorCluster-agentpool", -"principalId": "f0f04120-6a36-49bc--**************",", -"resourceGroup": "MC_splunkOperatorResourceGroup_splunkOperatorCluster_westus2", -"tags": {}, -"tenantId": "8add7810-b62a--**************",", -"type": "Microsoft.ManagedIdentity/userAssignedIdentities" -} -``` + ```json + { + "clientId": "a5890776-24e6-4f5b-9b6c-**************", + "id": "/subscriptions//resourceGroups/MC_splunkOperatorResourceGroup_splunkOperatorCluster_westus2/providers/Microsoft.ManagedIdentity/userAssignedIdentities/splunkOperatorCluster-agentpool", + "location": "westus2", + "name": "splunkOperatorCluster-agentpool", + "principalId": "f0f04120-6a36-49bc--**************", + "resourceGroup": "MC_splunkOperatorResourceGroup_splunkOperatorCluster_westus2", + "tags": {}, + "tenantId": "8add7810-b62a--**************", + "type": "Microsoft.ManagedIdentity/userAssignedIdentities" + } + ``` -Extract the principalId value from the outout above. Or you can use the following command to get the principalId -``` -$ az identity show --name --resource-group "" --query 'principalId' --output tsv -``` -Example: -``` -$ principalId=$(az identity show --name splunkOperatorCluster-agentpool --resource-group "MC_splunkOperatorResourceGroup_splunkOperatorCluster_westus2" --query 'principalId' --output tsv) -$ echo $principalId -``` -f0f04120-6a36-49bc--************** + Extract the `principalId` value from the output above. Alternatively, use the following command to get the `principalId`: -5. Assign read access for Kubelet user managed identity to the storage account + ```bash + az identity show --name --resource-group "" --query 'principalId' --output tsv + ``` -Use the `principalId` from the above section and assign it to the storage account -``` -az role assignment create --assignee "" --role 'Storage Blob Data Reader' --scope /subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/ -``` -For my example, if is splunkOperatorResourceGroup + **Example:** -and is mystorageaccount then the command would be: -``` -$ az role assignment create --assignee "f0f04120-6a36-49bc--**************" --role 'Storage Blob Data Reader' --scope /subscriptions/f428689e-c379-4712--**************/resourceGroups/splunkOperatorResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount -``` -After this command, you can use App framework for Azure blob without secrets. + ```bash + principalId=$(az identity show --name splunkOperatorCluster-agentpool --resource-group "MC_splunkOperatorResourceGroup_splunkOperatorCluster_westus2" --query 'principalId' --output tsv) + echo $principalId + ``` + + Output: + + ``` + f0f04120-6a36-49bc--************** + ``` + +5. **Assign Read Access for Kubelet User Managed Identity to the Storage Account** + + Use the `principalId` from the above section and assign it to the storage account: + + ```bash + az role assignment create --assignee "" --role 'Storage Blob Data Reader' --scope /subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/ + ``` + + **For Example:** + + If `` is `splunkOperatorResourceGroup` and `` is `mystorageaccount`, the command would be: + + ```bash + az role assignment create --assignee "f0f04120-6a36-49bc--**************" --role 'Storage Blob Data Reader' --scope /subscriptions/f428689e-c379-4712--**************/resourceGroups/splunkOperatorResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount + ``` + + After this command, you can use the App Framework for Azure Blob without secrets. + +### **Azure Blob Authorization Recommendations:** + +- **Granular Access:** Azure allows **"Managed Identities"** assignment at the **"storage accounts"** level as well as at specific containers (buckets) levels. A managed identity assigned read permissions at a storage account level will have read access for all containers within that storage account. As a good security practice, assign the managed identity to only the specific containers it needs to access, rather than the entire storage account. + +- **Avoid Shared Access Keys:** In contrast to **"Managed Identities"**, Azure allows **"shared access keys"** configurable only at the storage accounts level. When using the `secretRef` configuration in the CRD, the underlying secret key will allow both read and write access to the storage account (and all containers within it). Based on your security needs, consider using "Managed Identities" instead of secrets. Additionally, there's no automated way to rotate the secret key, so if you're using these keys, rotate them regularly (e.g., every 90 days). + +--- -Azure Blob Authorization Recommendations: +## Setup Azure Blob Access with Azure Workload Identity -Azure allows "Managed Identities" assignment at the "storage accounts" level as well as at specific buckets levels. A managed identity that is assigned read permissions at a storage account level will have read access for all the buckets within that storage account. As a good security practice, you should assign the managed identity to only the specific buckets and not to the whole storage account. +Azure Workload Identity provides a Kubernetes-native approach to authenticate workloads running in your cluster to Azure services, such as Azure Blob Storage, without managing credentials manually. This section outlines how to set up Azure Workload Identity to securely access Azure Blob Storage from the Splunk Operator running on AKS. -In contrast to "Managed Identities", Azure allows the "shared access keys" configurable only at the storage accounts level. When using the "secretRef" configuration in the CRD, the underlying secret key will allow both read and write access to the storage account (and all the buckets within it). So, based on your security needs, you may want to consider using "Managed Identities" instead of secrets. Also note that there isn't an automated way of rotating the secret key, so in case you are using these keys, please rotate them at regular intervals of times such as 90 days interval. +### **Assumptions:** + +- Familiarize yourself with [Azure AD Workload Identity concepts](https://learn.microsoft.com/en-us/azure/active-directory/workload-identity/overview) +- The names used below, such as resource-group name and AKS cluster name, are for example purposes only. Please change them to the values as per your setup. +- These steps cover creating a resource group and AKS cluster with Azure Workload Identity enabled; skip if already created. + +### **Steps to Assign Azure Workload Identity:** + +1. **Create an Azure Resource Group** + + ```bash + az group create --name splunkOperatorWorkloadIdentityRG --location westus2 + ``` + +2. **Create AKS Cluster with Azure Workload Identity Enabled** + + ```bash + az aks create -g splunkOperatorWorkloadIdentityRG -n splunkOperatorWICluster --enable-oidc-issuer --enable-managed-identity + ``` + + **Parameters:** + - `--enable-oidc-issuer`: Enables the OIDC issuer required for Workload Identity. + - `--enable-managed-identity`: Enables Managed Identity for the cluster. + +3. **Get Credentials to Access Cluster** + + ```bash + az aks get-credentials --resource-group splunkOperatorWorkloadIdentityRG --name splunkOperatorWICluster + ``` + +4. **Install Azure AD Workload Identity in Kubernetes** + + Azure AD Workload Identity requires installing specific components into your Kubernetes cluster. + + **Using Helm:** + + ```bash + helm repo add azure-workload-identity https://azure.github.io/azure-workload-identity/charts + helm repo update + + # Create a namespace for workload identity (optional but recommended) + kubectl create namespace workload-identity-system + + # Install the Azure Workload Identity Helm chart + helm install azure-workload-identity azure-workload-identity/azure-workload-identity \ + --namespace workload-identity-system \ + --set azureIdentityBindingSelector="splunk-operator" + ``` + + **Parameters:** + - `azureIdentityBindingSelector`: Selector used to bind `AzureIdentityBinding` resources to specific Kubernetes service accounts. In this case, it's set to `"splunk-operator"`. + +5. **Create a User-Assigned Managed Identity** + + ```bash + az identity create \ + --name splunkOperatorWIIdentity \ + --resource-group splunkOperatorWorkloadIdentityRG \ + --location westus2 + ``` + + **Retrieve Managed Identity Details:** + + ```bash + az identity show \ + --name splunkOperatorWIIdentity \ + --resource-group splunkOperatorWorkloadIdentityRG \ + --query "{clientId: clientId, principalId: principalId, id: id}" \ + --output json + ``` + + **Sample Output:** + + ```json + { + "clientId": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", + "principalId": "yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy", + "id": "/subscriptions//resourceGroups/splunkOperatorWorkloadIdentityRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/splunkOperatorWIIdentity" + } + ``` + +6. **Assign the `Storage Blob Data Contributor` Role to the Managed Identity** + + ```bash + az role assignment create \ + --assignee \ + --role "Storage Blob Data Contributor" \ + --scope /subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts/ + ``` + + **Example:** + + ```bash + az role assignment create \ + --assignee "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" \ + --role "Storage Blob Data Contributor" \ + --scope /subscriptions/f428689e-c379-4712--**************/resourceGroups/splunkOperatorResourceGroup/providers/Microsoft.Storage/storageAccounts/mystorageaccount + ``` + +7. **Create Kubernetes Service Account for Splunk Operator** + + Create a Kubernetes Service Account annotated to use Azure Workload Identity. + + ```yaml + # splunk-operator-wi-serviceaccount.yaml + + apiVersion: v1 + kind: ServiceAccount + metadata: + name: bucket-admin-test-wi + namespace: your-splunk-operator-namespace + labels: + azure.workload.identity/client-id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx # clientId from the Managed Identity + ``` + + **Apply the Service Account:** + + ```bash + kubectl apply -f splunk-operator-wi-serviceaccount.yaml + ``` + +8. **Create AzureIdentity and AzureIdentityBinding Resources** + + These resources link the Kubernetes Service Account to the Azure Managed Identity. + + ```yaml + # azureidentity-wi.yaml + + apiVersion: workloadidentity.azure.com/v1alpha1 + kind: AzureIdentity + metadata: + name: splunkOperatorWIIdentity + namespace: workload-identity-system + spec: + type: 0 # 0 for User Assigned Managed Identity + resourceID: /subscriptions//resourceGroups/splunkOperatorWorkloadIdentityRG/providers/Microsoft.ManagedIdentity/userAssignedIdentities/splunkOperatorWIIdentity + clientID: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx # clientId from the Managed Identity + ``` + + ```yaml + # azureidentitybinding-wi.yaml + + apiVersion: workloadidentity.azure.com/v1alpha1 + kind: AzureIdentityBinding + metadata: + name: splunkOperatorWIIdentityBinding + namespace: workload-identity-system + spec: + azureIdentity: splunkOperatorWIIdentity + selector: splunk-operator-wi + ``` + + **Apply the Resources:** + + ```bash + kubectl apply -f azureidentity-wi.yaml + kubectl apply -f azureidentitybinding-wi.yaml + ``` + +9. **Annotate Kubernetes Service Account to Use Workload Identity** + + Update the Splunk Operator Deployment to use the annotated Service Account. + + ```yaml + # splunk-operator-deployment-wi.yaml + + apiVersion: apps/v1 + kind: Deployment + metadata: + name: splunk-operator + namespace: your-splunk-operator-namespace + labels: + app: splunk-operator + spec: + replicas: 1 + selector: + matchLabels: + app: splunk-operator + template: + metadata: + labels: + app: splunk-operator + annotations: + azure.workload.identity/use: "true" + spec: + serviceAccountName: bucket-admin-test-wi + containers: + - name: splunk-operator + image: your-splunk-operator-image + # ... other configurations + ``` + + **Apply the Updated Deployment:** + + ```bash + kubectl apply -f splunk-operator-deployment-wi.yaml + ``` + +10. **Verify the Setup** + + - **Check Pod Annotations:** + + ```bash + kubectl get pods -n your-splunk-operator-namespace -o jsonpath='{.items[*].metadata.annotations}' + ``` + + You should see an annotation similar to: + + ```json + { + "azure.workload.identity/use": "true" + } + ``` + + - **Test Azure Blob Storage Access from the Pod:** + + ```bash + kubectl exec -it -n your-splunk-operator-namespace -- /bin/bash + ``` + + Inside the pod, use the Azure CLI or Azure SDK to list blobs: + + ```bash + az storage blob list --account-name mystorageaccount --container-name mycontainer --output table + ``` + + **Note:** Ensure that the Azure CLI is installed in the pod or use appropriate Azure SDK commands within your application code. + + - **Check Logs for Authentication Success:** + + ```bash + kubectl logs deployment/splunk-operator -n your-splunk-operator-namespace + ``` + + Look for log entries indicating successful authentication and blob storage access. + +### **Azure Workload Identity Authorization Recommendations:** + +- **Granular Role Assignments:** Assign the Managed Identity the least privilege necessary. Prefer roles like `Storage Blob Data Reader` at the container level instead of the entire storage account to minimize exposure. + +- **Avoid Shared Access Keys:** Similar to Managed Identities, avoid using shared access keys when possible. They grant broader access and require manual rotation. + +- **Secure Service Accounts:** Ensure that Kubernetes Service Accounts used with Workload Identity are restricted to only the necessary namespaces and roles. + +--- + +### **Azure Workload Identity Authorization Recommendations:** + +Azure Workload Identity allows you to assign IAM roles at more granular levels, enhancing security by limiting access only to the necessary resources. + +- **Granular Role Assignments:** Assign the Managed Identity the least privilege necessary. Prefer roles like `Storage Blob Data Reader` at the container level instead of the entire storage account to minimize exposure. + +- **Avoid Shared Access Keys:** Similar to Managed Identities, avoid using shared access keys when possible. They grant broader access and require manual rotation. + +- **Secure Service Accounts:** Ensure that Kubernetes Service Accounts used with Workload Identity are restricted to only the necessary namespaces and roles. + +### **Benefits of Using Azure Workload Identity:** + +- **Kubernetes-Native:** Seamlessly integrates with Kubernetes Service Accounts, allowing workloads to authenticate without managing secrets. + +- **Enhanced Security:** Eliminates the need to store credentials in pods or Kubernetes secrets, reducing the attack surface. + +- **Scalability:** Easily assign the same identity to multiple pods or workloads, simplifying management. + +### **Comparison Between Managed Identity and Workload Identity:** + +| Feature | Managed Identity | Workload Identity | +|-----------------------------|--------------------------------------------------|-----------------------------------------------------| +| **Scope** | Tied to the Azure resource (e.g., AKS node) | Tied to Kubernetes Service Accounts | +| **Credential Management** | Azure manages credentials | Kubernetes manages Service Account credentials | +| **Flexibility** | Limited to Azure resources | More flexible, integrates with Kubernetes-native identities | +| **Granularity** | Role assignments at Azure resource level | Role assignments at Kubernetes namespace or service account level | +| **Use Cases** | Simple scenarios where workloads share identities | Complex scenarios requiring granular access controls | + +### **When to Use Which:** + +- **Managed Identity:** Suitable for scenarios where workloads are tightly coupled with specific Azure resources and require straightforward IAM access. + +- **Workload Identity:** Ideal for Kubernetes-native environments where fine-grained access control and integration with Kubernetes Service Accounts are essential. + + + + + +## Setup Google Cloud Storage Access for App Framework + +The Splunk Operator requires access to Google Cloud Storage (GCS) buckets to retrieve app packages and add-ons. You can configure this access using one of the following two methods: + +1. **Using a Kubernetes Secret with a GCP Service Account JSON Key File** +2. **Using Workload Identity for Secure Access Without Service Account Keys** + +### **Prerequisites** + +Before proceeding, ensure you have the following: + +- **Google Cloud Platform (GCP) Account**: Access to a GCP project with permissions to create and manage service accounts and IAM roles. +- **Kubernetes Cluster**: A running Kubernetes cluster (e.g., GKE) with `kubectl` configured. +- **Splunk Operator Installed**: The Splunk Operator should be installed and running in your Kubernetes cluster. +- **Google Cloud SDK (`gcloud`)**: Installed and authenticated with your GCP account. [Install Google Cloud SDK](https://cloud.google.com/sdk/docs/install) + +--- + +## Option 1: Using a Kubernetes Secret for GCP Access + + +### Setup Google Cloud Storage Access for App Framework +The Splunk Operator requires access to Google Cloud Storage (GCS) buckets to retrieve app packages and add-ons. You can configure this access using one of the following two methods: + +1. Using a Kubernetes Secret with a GCP Service Account JSON Key File +2. Using Workload Identity for Secure Access Without Service Account Keys + +### Prerequisites +Before proceeding, ensure you have the following: + +- Google Cloud Platform (GCP) Account: Access to a GCP project with permissions to create and manage service accounts and IAM roles. +- Kubernetes Cluster: A running Kubernetes cluster (e.g., GKE) with kubectl configured. +- Splunk Operator Installed: The Splunk Operator should be installed and running in your Kubernetes cluster. +- Google Cloud SDK (gcloud): Installed and authenticated with your GCP account. Install Google Cloud SDK + +### Option 1: Using a Kubernetes Secret for GCP Access + +This method involves creating a Kubernetes Secret that stores a GCP service account JSON key file. The Splunk Operator will use this secret to authenticate and access the GCS bucket. + +#### **Steps to Configure Access Using a Kubernetes Secret** + +1. **Create a GCP Service Account** + + - **Navigate to GCP Console**: + - Go to the [Google Cloud Console](https://console.cloud.google.com/). + + - **Create Service Account**: + - Navigate to **IAM & Admin > Service Accounts**. + - Click **Create Service Account**. + - **Service Account Details**: + - **Name**: `splunk-app-framework-sa` + - **Description**: (Optional) e.g., `Service account for Splunk Operator to access GCS buckets` + - Click **Create and Continue**. + + - **Grant Service Account Permissions**: + - Assign the **Storage Object Viewer** role to grant read access to the required GCS buckets. + - Click **Done**. + +2. **Download the Service Account Key** + + - **Locate the Service Account**: + - In the **Service Accounts** page, find `splunk-app-framework-sa`. + + - **Generate Key**: + - Click on **Actions (â‹®) > Manage Keys**. + - Click **Add Key > Create New Key**. + - **Key Type**: Select **JSON**. + - Click **Create**. + - A JSON key file (`splunk-app-framework-sa-key.json`) will be downloaded. **Store this file securely**, as it contains sensitive credentials. + +3. **Create a Kubernetes Secret** + + - **Upload the Service Account Key as a Secret**: + - Use the downloaded JSON key file to create a Kubernetes Secret in the namespace where the Splunk Operator is installed (e.g., `splunk-operator`). + + ```bash + kubectl create secret generic gcs-secret \ + --from-file=key.json=/path/to/splunk-app-framework-sa-key.json \ + -n splunk-operator + ``` + + - **Parameters**: + - `gcs-secret`: Name of the Kubernetes Secret. + - `/path/to/splunk-app-framework-sa-key.json`: Path to your downloaded JSON key file. + - `-n splunk-operator`: Namespace where the Splunk Operator is deployed. + +4. **Configure Splunk Operator to Use the Kubernetes Secret** + + - **Update Custom Resource Definition (CRD)**: + - Ensure that your Splunk Operator's CRD references the `gcs-secret` for GCS access. + + ```yaml + apiVersion: enterprise.splunk.com/v3 + kind: Standalone + metadata: + name: example-splunk-app + namespace: splunk-operator + spec: + appRepo: + appInstallPeriodSeconds: 90 + appSources: + - location: c3appfw-idxc-mj00 + name: appframework-idxc-clusterypt + premiumAppsProps: + esDefaults: {} + scope: cluster + volumeName: appframework-test-volume-idxc-k3r + appsRepoPollIntervalSeconds: 60 + defaults: + premiumAppsProps: + esDefaults: {} + scope: cluster + volumeName: appframework-test-volume-idxc-k3r + installMaxRetries: 2 + volumes: + - endpoint: https://storage.googleapis.com + name: appframework-test-volume-idxc-k3r + path: splk-integration-test-bucket + provider: gcp + region: "" + secretRef: splunk-s3-index-masterc3appfw-iwz-vzv + storageType: gcs + # ... other configurations + ``` + + - **Explanation of Key Fields**: + - **`secretRef`**: References the Kubernetes Secret (`gcs-secret`) created earlier, allowing the Splunk Operator to access the GCS bucket securely without embedding credentials directly in the CRD. + - **`endpoint`**: Specifies the GCS endpoint. + - **`path`**: Path to the GCS bucket (`splk-integration-test-bucket` in this example). + - **`provider`**: Specifies the cloud provider (`gcp` for Google Cloud Platform). + - **`storageType`**: Indicates the type of storage (`gcs` for Google Cloud Storage). + +5. **Deploy or Update Splunk Operator Resources** + + - **Apply the Updated CRD**: + + ```bash + kubectl apply -f splunk-app-crd.yaml + ``` + + - Replace `splunk-app-crd.yaml` with the path to your updated CRD file. + +6. **Verify the Configuration** + + - **Check Pods**: + + ```bash + kubectl get pods -n splunk-operator + ``` + + - Ensure that the Splunk Operator pods are running without errors. + + - **Inspect Logs**: + + ```bash + kubectl logs -n splunk-operator + ``` + + - Look for logs indicating successful access to the GCS bucket. + +#### **Security Recommendations** + +- **Least Privilege Principle**: + - Assign only the necessary roles to the service account. In this case, `Storage Object Viewer` grants read access. If write access is required, consider `Storage Object Admin`. + +- **Secure Storage of Keys**: + - Protect the JSON key file and the Kubernetes Secret to prevent unauthorized access. + +- **Regular Rotation of Keys**: + - Periodically rotate the service account keys to enhance security. + +--- + +### Option 2: Using Workload Identity for GCP Access + +Workload Identity allows Kubernetes workloads to authenticate to GCP services without the need for managing service account keys. This method leverages GCP's Workload Identity to securely bind Kubernetes service accounts to GCP service accounts. + +#### **Advantages of Using Workload Identity** + +- **Enhanced Security**: Eliminates the need to handle service account keys, reducing the risk of key leakage. +- **Simplified Management**: Simplifies the authentication process by integrating directly with Kubernetes service accounts. +- **Automatic Key Rotation**: GCP manages the credentials, including rotation, ensuring up-to-date security practices. + +#### **Steps to Configure Access Using Workload Identity** + +1. **Enable Workload Identity on Your GKE Cluster** + + - **Prerequisite**: Ensure your GKE cluster is created with Workload Identity enabled. If not, enable it during cluster creation or update an existing cluster. + + - **During Cluster Creation**: + + ```bash + gcloud container clusters create splunkOperatorWICluster \ + --resource-group splunkOperatorWorkloadIdentityRG \ + --workload-pool=.svc.id.goog \ + --enable-workload-identity + ``` + + - Replace `` with your GCP project ID. + + - **For Existing Clusters**: + + ```bash + gcloud container clusters update splunkOperatorWICluster \ + --resource-group splunkOperatorWorkloadIdentityRG \ + --workload-pool=.svc.id.goog + ``` + + - **Note**: Enabling Workload Identity on an existing cluster might require cluster reconfiguration and could lead to temporary downtime. + +2. **Create a GCP Service Account and Assign Permissions** + + - **Create Service Account**: + + ```bash + gcloud iam service-accounts create splunk-app-framework-sa \ + --display-name "Splunk App Framework Service Account" + ``` + + - **Grant Required Roles**: + + ```bash + gcloud projects add-iam-policy-binding \ + --member "serviceAccount:splunk-app-framework-sa@.iam.gserviceaccount.com" \ + --role "roles/storage.objectViewer" + ``` + + - Replace `` with your GCP project ID. + +3. **Create a Kubernetes Service Account** + + - **Define Service Account**: + + ```bash + kubectl create serviceaccount splunk-operator-sa \ + -n splunk-operator + ``` + + - **Parameters**: + - `splunk-operator-sa`: Name of the Kubernetes Service Account. + - `-n splunk-operator`: Namespace where the Splunk Operator is deployed. + +4. **Associate the GCP Service Account with the Kubernetes Service Account** + + - **Establish IAM Policy Binding**: + + ```bash + gcloud iam service-accounts add-iam-policy-binding splunk-app-framework-sa@.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:.svc.id.goog[splunk-operator/splunk-operator-sa]" + ``` + + - **Parameters**: + - ``: Your GCP project ID. + - `splunk-operator`: Kubernetes namespace. + - `splunk-operator-sa`: Kubernetes Service Account name. + +5. **Annotate the Kubernetes Service Account** + + - **Add Annotation to Link Service Accounts**: + + ```bash + kubectl annotate serviceaccount splunk-operator-sa \ + --namespace splunk-operator \ + iam.gke.io/gcp-service-account=splunk-app-framework-sa@.iam.gserviceaccount.com + ``` + + - **Parameters**: + - `splunk-operator-sa`: Kubernetes Service Account name. + - `splunk-operator`: Kubernetes namespace. + - ``: Your GCP project ID. + +6. **Update Splunk Operator Deployment to Use the Annotated Service Account** + + - **Modify Deployment YAML**: + + Replace the existing deployment configuration with the following YAML to use the annotated Kubernetes Service Account (`splunk-operator-sa`): + + ```yaml + # splunk-operator-deployment-wi.yaml + + apiVersion: enterprise.splunk.com/v3 + kind: Standalone + metadata: + name: example-splunk-app + namespace: splunk-operator + spec: + serviceAccount: splunk-operator-sa + appRepo: + appInstallPeriodSeconds: 90 + appSources: + - location: c3appfw-idxc-mj00 + name: appframework-idxc-clusterypt + premiumAppsProps: + esDefaults: {} + scope: cluster + volumeName: appframework-test-volume-idxc-k3r + appsRepoPollIntervalSeconds: 60 + defaults: + premiumAppsProps: + esDefaults: {} + scope: cluster + volumeName: appframework-test-volume-idxc-k3r + installMaxRetries: 2 + volumes: + - endpoint: https://storage.googleapis.com + name: appframework-test-volume-idxc-k3r + path: splk-integration-test-bucket + provider: gcp + region: "" + storageType: gcs + # ... other configurations + ``` + + - **Explanation of Key Fields**: + - **`serviceAccount`**: References the Kubernetes Service Account (`splunk-operator-sa`) that is associated with the GCP Service Account via Workload Identity. + - **`endpoint`**: Specifies the GCS endpoint. + - **`path`**: Path to the GCS bucket (`splk-integration-test-bucket` in this example). + - **`provider`**: Specifies the cloud provider (`gcp` for Google Cloud Platform). + - **`storageType`**: Indicates the type of storage (`gcs` for Google Cloud Storage). + + - **Apply the Updated Deployment**: + + ```bash + kubectl apply -f splunk-operator-deployment-wi.yaml + ``` + +7. **Configure Splunk Operator to Use Workload Identity** + + - **Update Custom Resource Definition (CRD)**: + - Ensure that your Splunk Operator's CRD is configured to utilize the Kubernetes Service Account `splunk-operator-sa` for GCS access. + + ```yaml + apiVersion: enterprise.splunk.com/v3 + kind: Standalone + metadata: + name: example-splunk-app + namespace: splunk-operator + spec: + appRepo: + appInstallPeriodSeconds: 90 + appSources: + - location: c3appfw-idxc-mj00 + name: appframework-idxc-clusterypt + premiumAppsProps: + esDefaults: {} + scope: cluster + volumeName: appframework-test-volume-idxc-k3r + appsRepoPollIntervalSeconds: 60 + defaults: + premiumAppsProps: + esDefaults: {} + scope: cluster + volumeName: appframework-test-volume-idxc-k3r + installMaxRetries: 2 + volumes: + - endpoint: https://storage.googleapis.com + name: appframework-test-volume-idxc-k3r + path: splk-integration-test-bucket + provider: gcp + region: "" + serviceAccount: splunk-operator-sa + storageType: gcs + # ... other configurations + ``` + + - **Parameters**: + - `serviceAccount`: Name of the Kubernetes Service Account (`splunk-operator-sa`). + +8. **Verify the Configuration** + + - **Check Pods**: + + ```bash + kubectl get pods -n splunk-operator + ``` + + - Ensure that the Splunk Operator pods are running without errors. + + - **Inspect Logs**: + + ```bash + kubectl logs -n splunk-operator + ``` + + - Look for logs indicating successful access to the GCS bucket via Workload Identity. + +#### **Security Recommendations** + +- **Least Privilege Principle**: + - Assign only the necessary roles to the GCP Service Account. Here, `Storage Object Viewer` grants read access. If write access is required, consider `Storage Object Admin`. + +- **Secure Namespace Configuration**: + - Ensure that the Kubernetes Service Account (`splunk-operator-sa`) is restricted to the `splunk-operator` namespace to prevent unauthorized access. + +- **Regular Auditing**: + - Periodically review IAM roles and permissions to ensure that they adhere to the least privilege principle. + +- **Avoid Hardcoding Credentials**: + - With Workload Identity, there's no need to manage or store service account keys, enhancing security. + +--- + +### Comparison Between Service Account Keys and Workload Identity + +| Feature | Service Account Keys | Workload Identity | +|-----------------------------|-------------------------------------------------|-----------------------------------------------------| +| **Credential Management** | Requires handling and securely storing JSON keys.| Eliminates the need to manage credentials manually. | +| **Security** | Higher risk due to potential key leakage. | Enhanced security by using Kubernetes-native identities. | +| **Ease of Rotation** | Manual rotation of keys is necessary. | GCP manages credential rotation automatically. | +| **Granularity** | Access is tied to the service account key. | Fine-grained access control via Kubernetes Service Accounts. | +| **Integration Complexity** | Simpler to set up initially but harder to manage.| Requires additional setup but offers better security and manageability. | +| **Use Cases** | Suitable for simpler setups or legacy systems. | Ideal for Kubernetes-native environments requiring enhanced security. | + +#### **When to Use Which:** + +- **Service Account Keys**: + - Use when simplicity is a priority, and the security implications are manageable. + - Suitable for environments where Workload Identity is not supported or feasible. + +- **Workload Identity**: + - Preferable for Kubernetes-native deployments requiring robust security. + - Ideal for scenarios where automatic credential management and rotation are beneficial. + +--- + +### Best Practices for Google Cloud Storage Access + +1. **Adhere to the Least Privilege Principle**: + - Assign only the necessary roles to service accounts or Managed Identities to minimize security risks. + +2. **Use Workload Identity Where Possible**: + - Leverage Workload Identity for Kubernetes deployments to enhance security and simplify credential management. + +3. **Secure Namespace Configuration**: + - Limit Service Accounts to specific namespaces to prevent unauthorized access across the cluster. + +4. **Regularly Audit IAM Roles and Permissions**: + - Periodically review and adjust roles to ensure they align with current access requirements. + +5. **Monitor Access Logs**: + - Utilize GCP's logging and monitoring tools to track access patterns and detect any anomalies. + +6. **Automate Infrastructure as Code (IaC)**: + - Use tools like Terraform or Helm to manage service accounts, IAM roles, and Kubernetes configurations for consistency and repeatability. + +7. **Implement Network Security Controls**: + - Configure VPC Service Controls or firewall rules to restrict access to GCS buckets from authorized sources only. + +--- ## App Framework Troubleshooting diff --git a/go.mod b/go.mod index 641eeb05b..966cf2f57 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,24 @@ module github.com/splunk/splunk-operator go 1.23.0 require ( + cloud.google.com/go/storage v1.10.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 github.com/aws/aws-sdk-go v1.47.11 github.com/go-logr/logr v1.4.2 github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.6.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 github.com/onsi/ginkgo/v2 v2.21.0 github.com/onsi/gomega v1.35.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/wk8/go-ordered-map/v2 v2.1.7 go.uber.org/zap v1.24.0 + google.golang.org/api v0.30.0 k8s.io/api v0.26.2 k8s.io/apiextensions-apiserver v0.26.2 k8s.io/apimachinery v0.26.2 @@ -24,6 +30,9 @@ require ( ) require ( + cloud.google.com/go v0.65.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/buger/jsonparser v1.1.1 // indirect @@ -40,18 +49,21 @@ require ( github.com/go-openapi/swag v0.19.14 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/klauspost/compress v1.13.5 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/minio/md5-simd v1.1.0 // indirect @@ -61,6 +73,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect @@ -68,11 +81,16 @@ require ( github.com/rs/xid v1.2.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.2 // indirect + go.opencensus.io v0.22.4 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.28.0 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect golang.org/x/text v0.19.0 // indirect @@ -80,6 +98,8 @@ require ( golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect + google.golang.org/grpc v1.49.0 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect diff --git a/go.sum b/go.sum index a6732a725..edf4e3250 100644 --- a/go.sum +++ b/go.sum @@ -12,6 +12,7 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= @@ -29,8 +30,21 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 h1:cf+OIKbkmMHBaC3u78AXomweqM0oxQSgBXRZf3WH4yM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQKJxSMNiGJcq4QuUQkOynyD93gLw6MDF7ek= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -38,6 +52,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.47.11 h1:Dol+MA+hQblbnXUI3Vk9qvoekU6O1uDEuAItezjiWNQ= @@ -61,6 +76,11 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -75,6 +95,8 @@ github.com/emicklei/go-restful/v3 v3.10.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= @@ -83,6 +105,7 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -114,6 +137,8 @@ github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -158,12 +183,15 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -172,18 +200,19 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -205,6 +234,7 @@ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -221,10 +251,14 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -254,16 +288,13 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= -github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -296,7 +327,10 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -311,14 +345,16 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/wk8/go-ordered-map/v2 v2.1.7 h1:aUZ1xBMdbvY8wnNt77qqo4nyT3y0pX4Usat48Vm+hik= github.com/wk8/go-ordered-map/v2 v2.1.7/go.mod h1:9Xvgm2mV2kSq2SAm0Y608tBmu8akTzI7c2bz7/G7ZN4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -329,7 +365,9 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -347,8 +385,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -372,6 +408,7 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -381,6 +418,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -410,11 +449,10 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -435,6 +473,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -469,22 +509,22 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -492,10 +532,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -547,8 +586,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -572,6 +609,7 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -604,6 +642,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -611,6 +650,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -623,6 +664,11 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -635,16 +681,17 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -653,6 +700,7 @@ gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/pkg/splunk/client/azureblobclient.go b/pkg/splunk/client/azureblobclient.go index f308e607c..679a113f4 100644 --- a/pkg/splunk/client/azureblobclient.go +++ b/pkg/splunk/client/azureblobclient.go @@ -16,517 +16,333 @@ package client import ( - "bytes" "context" - "crypto/hmac" - "crypto/sha256" - "crypto/tls" - "encoding/base64" - "encoding/json" - "encoding/xml" - "errors" "fmt" "io" - "net/http" - "net/url" "os" - "sort" - "strconv" - "strings" - "time" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "sigs.k8s.io/controller-runtime/pkg/log" ) -// blank assignment to verify that AzureBlobClient implements RemoteDataClient var _ RemoteDataClient = &AzureBlobClient{} -// AzureBlobClient is a client to implement Azure Blob specific APIs -type AzureBlobClient struct { - BucketName string - StorageAccountName string - SecretAccessKey string - Prefix string - StartAfter string - Endpoint string - HTTPClient SplunkHTTPClient +// ContainerClientInterface abstracts the methods used from the Azure SDK's ContainerClient. +type ContainerClientInterface interface { + NewListBlobsFlatPager(options *container.ListBlobsFlatOptions) *runtime.Pager[azblob.ListBlobsFlatResponse] + NewBlobClient(blobName string) BlobClientInterface } -// ContainerProperties represents blob properties -type ContainerProperties struct { - CreationTime string `xml:"Creation-Time"` - LastModified string `xml:"Last-Modified"` - ETag string `xml:"Etag"` - ContentLength string `xml:"Content-Length"` +// BlobClientInterface abstracts the methods used from the Azure SDK's BlobClient. +type BlobClientInterface interface { + DownloadStream(ctx context.Context, options *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) } -// Blob represents a single blob -type Blob struct { - XMLName xml.Name `xml:"Blob"` - Name string `xml:"Name"` - Properties ContainerProperties `xml:"Properties"` +func (c *ContainerClientWrapper) NewListBlobsFlatPager(options *azblob.ListBlobsFlatOptions) *runtime.Pager[azblob.ListBlobsFlatResponse] { + return c.Client.NewListBlobsFlatPager(options) } -// Blobs represents a slice of blobs -type Blobs struct { - XMLName xml.Name `xml:"Blobs"` - Blob []Blob `xml:"Blob"` +// ContainerClientWrapper wraps the Azure SDK's ContainerClient and implements ContainerClientInterface. +type ContainerClientWrapper struct { + *container.Client } -// EnumerationResults holds unmarshaled data from listing APIs -type EnumerationResults struct { - XMLName xml.Name `xml:"EnumerationResults"` - Blobs Blobs `xml:"Blobs"` +// NewBlobClient wraps the Azure SDK's NewBlobClient method to return BlobClientInterface. +func (w *ContainerClientWrapper) NewBlobClient(blobName string) BlobClientInterface { + return &BlobClientWrapper{w.Client.NewBlobClient(blobName)} } -// TokenResponse holds the unmarshaled oauth token -type TokenResponse struct { - AccessToken string `json:"access_token"` - ClientID string `json:"client_id"` +// BlobClientWrapper wraps the Azure SDK's BlobClient and implements BlobClientInterface. +type BlobClientWrapper struct { + *blob.Client } -// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. -func ComputeHMACSHA256(message string, base64DecodedAccountKey []byte) (base64String string) { - // Signature=Base64(HMAC-SHA256(UTF8(StringToSign), Base64.decode())) - h := hmac.New(sha256.New, base64DecodedAccountKey) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) +// DownloadStream wraps the Azure SDK's DownloadStream method. +func (w *BlobClientWrapper) DownloadStream(ctx context.Context, options *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + return w.Client.DownloadStream(ctx, options) } -// buildStringToSign is a helper API for adding auth signature to HTTP headers -func buildStringToSign(request http.Request, accountName string) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - headers := request.Header - contentLength := headers.Get(headerContentLength) - if contentLength == "0" { - contentLength = "" - } +// CredentialType defines the type of credential used for authentication. +type CredentialType int - canonicalizedResource, err := buildCanonicalizedResource(request.URL, accountName) - if err != nil { - return "", err - } +const ( + // CredentialTypeSharedKey indicates Shared Key authentication. + CredentialTypeSharedKey CredentialType = iota + // CredentialTypeAzureAD indicates Azure AD authentication. + CredentialTypeAzureAD +) - stringToSign := strings.Join([]string{ - request.Method, - headers.Get(headerContentEncoding), - headers.Get(headerContentLanguage), - contentLength, - headers.Get(headerContentMD5), - headers.Get(headerContentType), - "", // Empty date because x-ms-date is expected (as per web page above) - headers.Get(headerIfModifiedSince), - headers.Get(headerIfMatch), - headers.Get(headerIfNoneMatch), - headers.Get(headerIfUnmodifiedSince), - headers.Get(headerRange), - buildCanonicalizedHeader(headers), - canonicalizedResource, - }, "\n") - return stringToSign, nil +// AzureBlobClient implements the RemoteDataClient interface for Azure Blob Storage. +type AzureBlobClient struct { + BucketName string + StorageAccountName string + Prefix string + StartAfter string + Endpoint string + ContainerClient ContainerClientInterface + CredentialType CredentialType } -// buildCanonicalizedHeader is a helper API for adding auth signature to HTTP headers -func buildCanonicalizedHeader(headers http.Header) string { - cm := map[string][]string{} - for k, v := range headers { - headerName := strings.TrimSpace(strings.ToLower(k)) - if strings.HasPrefix(headerName, "x-ms-") { - cm[headerName] = v // NOTE: the value must not have any whitespace around it. - } - } - if len(cm) == 0 { - return "" - } +// NewAzureBlobClient initializes and returns an AzureBlobClient. +// It supports both Shared Key and Azure AD authentication based on provided credentials. +// NewAzureBlobClient initializes a new AzureBlobClient with the provided parameters. +// It supports both Shared Key and Azure AD authentication methods. +// +// Parameters: +// - ctx: The context for the operation. +// - bucketName: The name of the Azure Blob container. +// - storageAccountName: The name of the Azure Storage account. +// - secretAccessKey: The shared key for authentication (optional; leave empty to use Azure AD). +// - prefix: The prefix for blob listing (optional). +// - startAfter: The marker for blob listing (optional). +// - region: The Azure region (e.g., "eastus"). +// - endpoint: A custom endpoint (optional). +// - initFunc: An initialization function to be executed (optional). +// +// Returns: +// - RemoteDataClient: An interface representing the remote data client. +// - error: An error object if the initialization fails. +// +// The function logs the initialization process and selects the appropriate +// authentication method based on the presence of the secretAccessKey. If the +// secretAccessKey is provided, Shared Key authentication is used; otherwise, +// Azure AD authentication is used. +func NewAzureBlobClient( + ctx context.Context, + bucketName string, // Azure Blob container name + storageAccountName string, // Azure Storage account name + secretAccessKey string, // Shared Key (optional; leave empty to use Azure AD) + prefix string, // Prefix for blob listing (optional) + startAfter string, // Marker for blob listing (optional) + region string, // Azure region (e.g., "eastus") + endpoint string, // Custom endpoint (optional) + initFunc GetInitFunc, // Initialization function +) (RemoteDataClient, error) { // Matches GetRemoteDataClient signature + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("NewAzureBlobClient") - keys := make([]string, 0, len(cm)) - for key := range cm { - keys = append(keys, key) - } - sort.Strings(keys) - ch := bytes.NewBufferString("") - for i, key := range keys { - if i > 0 { - ch.WriteRune('\n') - } - ch.WriteString(key) - ch.WriteRune(':') - ch.WriteString(strings.Join(cm[key], ",")) + scopedLog.Info("Initializing AzureBlobClient") + + // Execute the initialization function if provided. + if initFunc != nil { + initResult := initFunc(ctx, endpoint, storageAccountName, secretAccessKey) + // Currently, no action is taken with initResult. Modify if needed. + _ = initResult } - return ch.String() -} -// buildCanonicalizedResource is a helper API for adding auth signature to HTTP headers -func buildCanonicalizedResource(u *url.URL, accountName string) (string, error) { - // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services - cr := bytes.NewBufferString("/") - cr.WriteString(accountName) - - if len(u.Path) > 0 { - // Any portion of the CanonicalizedResource string that is derived from - // the resource's URI should be encoded exactly as it is in the URI. - // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx - cr.WriteString(u.EscapedPath()) + // Construct the service URL. + var serviceURL string + if endpoint != "" { + serviceURL = endpoint + } else if region != "" { + serviceURL = fmt.Sprintf("https://%s.blob.%s.core.windows.net", storageAccountName, region) } else { - // a slash is required to indicate the root path - cr.WriteString("/") + serviceURL = fmt.Sprintf("https://%s.blob.core.windows.net", storageAccountName) } - // params is a map[string][]string; param name is key; params values is []string - params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values - if err != nil { - return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") - } + var containerClient ContainerClientInterface + var credentialType CredentialType + + if secretAccessKey != "" { + // Use Shared Key authentication. + scopedLog.Info("Using Shared Key authentication") - if len(params) > 0 { // There is at least 1 query parameter - paramNames := []string{} // We use this to sort the parameter key names - for paramName := range params { - paramNames = append(paramNames, paramName) // paramNames must be lowercase + // Create a Shared Key Credential. + sharedKeyCredential, err := azblob.NewSharedKeyCredential(storageAccountName, secretAccessKey) + if err != nil { + scopedLog.Error(err, "Failed to create SharedKeyCredential") + return nil, fmt.Errorf("failed to create SharedKeyCredential: %w", err) } - sort.Strings(paramNames) - for _, paramName := range paramNames { - paramValues := params[paramName] - sort.Strings(paramValues) + // Initialize the container client with Shared Key Credential. + rawContainerClient, err := container.NewClientWithSharedKeyCredential( + fmt.Sprintf("%s/%s", serviceURL, bucketName), + sharedKeyCredential, + nil, + ) + if err != nil { + scopedLog.Error(err, "Failed to create ContainerClient with SharedKeyCredential") + return nil, fmt.Errorf("failed to create ContainerClient with SharedKeyCredential: %w", err) + } + + // Wrap the container client. + containerClient = &ContainerClientWrapper{rawContainerClient} + + credentialType = CredentialTypeSharedKey + } else { + // Use Azure AD authentication. + scopedLog.Info("Using Azure AD authentication") + + // Create a Token Credential using DefaultAzureCredential. + // The Azure SDK uses environment variables to configure authentication when using DefaultAzureCredential. + // For Workload Identity, by adding annotations to the pod's service account: + // azure.workload.identity/client-id: + // the following environment variables are typically used: + // AZURE_AUTHORITY_HOST: The Azure Active Directory endpoint (default is https://login.microsoftonline.com/). + // AZURE_CLIENT_ID: The client ID of the Azure AD application linked to the pod's service account. + // AZURE_TENANT_ID: The tenant ID of the Azure Active Directory where the Azure AD application resides. + // AZURE_FEDERATED_TOKEN_FILE: The path to the file containing the token issued by Kubernetes, usually mounted as a volume. + // when using Azure AD Pod Identity (deprecated), the following environment variables are typically used: + // AZURE_POD_IDENTITY_AUTHORITY_HOST: The Azure Active Directory endpoint (default is https://login.microsoftonline.com/). + // AZURE_POD_IDENTITY_CLIENT_ID: The client ID of the Azure AD application linked to the pod's service account. + // AZURE_POD_IDENTITY_TENANT_ID: The tenant ID of the Azure Active Directory where the Azure AD application resides. + // AZURE_POD_IDENTITY_TOKEN_FILE: The path to the file containing the token issued by Kubernetes, usually mounted as a volume. + // AZURE_POD_IDENTITY_RESOURCE_ID: The resource ID of the Azure resource to access. + // AZURE_POD_IDENTITY_USE_MSI: Set to "true" to use Managed Service Identity (MSI) for authentication. + // AZURE_POD_IDENTITY_USER_ASSIGNED_ID + + tokenCredential, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + scopedLog.Error(err, "Failed to create DefaultAzureCredential") + return nil, fmt.Errorf("failed to create DefaultAzureCredential: %w", err) + } - // Join the sorted key values separated by ',' - // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + // Initialize the container client with Token Credential. + rawContainerClient, err := container.NewClient( + fmt.Sprintf("%s%s", serviceURL, bucketName), + tokenCredential, + nil, + ) + if err != nil { + scopedLog.Error(err, "Failed to create ContainerClient with TokenCredential") + return nil, fmt.Errorf("failed to create ContainerClient with TokenCredential: %w", err) } + + // Wrap the container client. + containerClient = &ContainerClientWrapper{rawContainerClient} + + credentialType = CredentialTypeAzureAD } - return cr.String(), nil -} -// NewAzureBlobClient returns an AzureBlob client -func NewAzureBlobClient(ctx context.Context, bucketName string, storageAccountName string, secretAccessKey string, prefix string, startAfter string, region string, endpoint string, fn GetInitFunc) (RemoteDataClient, error) { - // Get http client - azureHTTPClient := fn(ctx, endpoint, storageAccountName, secretAccessKey) + scopedLog.Info("AzureBlobClient initialized successfully", + "CredentialType", credentialType, + "BucketName", bucketName, + "StorageAccountName", storageAccountName, + ) return &AzureBlobClient{ BucketName: bucketName, StorageAccountName: storageAccountName, - SecretAccessKey: secretAccessKey, Prefix: prefix, StartAfter: startAfter, Endpoint: endpoint, - HTTPClient: azureHTTPClient.(SplunkHTTPClient), + ContainerClient: containerClient, + CredentialType: credentialType, }, nil } -// InitAzureBlobClientWrapper is a wrapper around InitAzureBlobClientSession -func InitAzureBlobClientWrapper(ctx context.Context, appAzureBlobEndPoint string, storageAccountName string, secretAccessKey string) interface{} { - return InitAzureBlobClientSession(ctx) -} - -// InitAzureBlobClientSession initializes and returns a client session object -func InitAzureBlobClientSession(ctx context.Context) SplunkHTTPClient { - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("InitAzureBlobClientSession") - - // Enforcing minimum version TLS1.2 - tr := &http.Transport{ - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - tr.ForceAttemptHTTP2 = true - - httpClient := http.Client{ - Transport: tr, - Timeout: appFrameworkHttpclientTimeout * time.Second, - } - - // Validate transport - tlsVersion := "Unknown" - if tr, ok := httpClient.Transport.(*http.Transport); ok { - tlsVersion = getTLSVersion(tr) - } - - scopedLog.Info("Azure Blob Client Session initialization successful.", "TLS Version", tlsVersion) - - return &httpClient -} - -// Update http request header with secrets info -func updateAzureHTTPRequestHeaderWithSecrets(ctx context.Context, client *AzureBlobClient, httpRequest *http.Request) error { - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("updateHttpRequestHeaderWithSecrets") - - scopedLog.Info("Updating Azure Http Request with secrets") - - // Update httpRequest header with data and version - httpRequest.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} - httpRequest.Header[headerXmsVersion] = []string{azureHTTPHeaderXmsVersion} - - // Get HMAC signature using storage account name and secret access key - stringToSign, err := buildStringToSign(*httpRequest, client.StorageAccountName) - if err != nil { - scopedLog.Error(err, "Azure Blob with secrets Failed to build string to sign") - return err - } - decodedAccountKey, err := base64.StdEncoding.DecodeString(client.SecretAccessKey) - if err != nil { - // failed to decode - scopedLog.Error(err, "Azure Blob with secrets failed to decode accountKey") - return err - } - signature := ComputeHMACSHA256(stringToSign, decodedAccountKey) - authHeader := strings.Join([]string{"SharedKey ", client.StorageAccountName, ":", signature}, "") - - // Update httpRequest header with the HMAC256 signature - httpRequest.Header[headerAuthorization] = []string{authHeader} - - return nil -} - -// Update http request header with IAM info -func updateAzureHTTPRequestHeaderWithIAM(ctx context.Context, client *AzureBlobClient, httpRequest *http.Request) error { - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("updateHttpRequestHeaderWithIAM") - - scopedLog.Info("Updating Azure Http Request with IAM") - - // Create http request to retrive IAM oauth token from metadata URL - oauthRequest, err := http.NewRequest("GET", azureTokenFetchURL, nil) - if err != nil { - scopedLog.Error(err, "Azure Blob Failed to create new token request") - return err - } - - // Mark metadata flag - oauthRequest.Header.Set("Metadata", "true") - - // Create raw query for http request - values := oauthRequest.URL.Query() - values.Add("api-version", azureIMDSApiVersion) - values.Add("resource", "https://storage.azure.com/") - oauthRequest.URL.RawQuery = values.Encode() - - // Retrieve oauth token - resp, err := client.HTTPClient.Do(oauthRequest) - if err != nil { - scopedLog.Error(err, "Azure blob,Errored when sending request to the server") - return err - } - - defer resp.Body.Close() - - // A response code other than 200 usually means that no managed indentity is - // configured with the aks cluster. - if resp.StatusCode != 200 { - return errors.New("please validate that your cluster is configured to use managed identity") - } - - // Read http response - responseBody, err := io.ReadAll(resp.Body) - if err != nil { - scopedLog.Error(err, "Azure blob,Errored when reading resp body") - return err - } - - // Extract the token from the http response - var azureOauthTokenResponse TokenResponse - err = json.Unmarshal(responseBody, &azureOauthTokenResponse) - if err != nil { - scopedLog.Error(err, "Unable to unmarshal response to token", "Response:", string(responseBody)) - return err - } - - // Update http request header with IAM access token - httpRequest.Header.Set(headerXmsVersion, azureHTTPHeaderXmsVersion) - httpRequest.Header.Set(headerAuthorization, "Bearer "+azureOauthTokenResponse.AccessToken) - - return nil -} - -// GetAppsList gets the list of apps from remote storage +// GetAppsList retrieves a list of blobs (apps) from the Azure Blob container. func (client *AzureBlobClient) GetAppsList(ctx context.Context) (RemoteDataListResponse, error) { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("AzureBlob:GetAppsList").WithValues("Endpoint", client.Endpoint, "Bucket", client.BucketName, - "Prefix", client.Prefix) + scopedLog := reqLogger.WithName("AzureBlob:GetAppsList").WithValues("Bucket", client.BucketName) - scopedLog.Info("Getting Apps list") + scopedLog.Info("Fetching list of apps") - // create rest request URL with storage account name, container, prefix - appsListFetchURL := fmt.Sprintf(azureBlobListAppFetchURL, client.Endpoint, client.BucketName, client.Prefix) - - // Create a http request with the URL - httpRequest, err := http.NewRequest("GET", appsListFetchURL, nil) - if err != nil { - scopedLog.Error(err, "Azure Blob Failed to create request for App fetch URL") - return RemoteDataListResponse{}, err + // Define options for listing blobs. + options := &container.ListBlobsFlatOptions{ + Prefix: &client.Prefix, } - // Setup the httpRequest with required authentication - if client.StorageAccountName != "" && client.SecretAccessKey != "" { - // Use Secrets - err = updateAzureHTTPRequestHeaderWithSecrets(ctx, client, httpRequest) - } else { - // No Secret provided, try using IAM - err = updateAzureHTTPRequestHeaderWithIAM(ctx, client, httpRequest) - } - if err != nil { - scopedLog.Error(err, "Failed to get http request authenticated") - return RemoteDataListResponse{}, err - } + // Set the Marker if StartAfter is provided. + //if client.StartAfter != "" { + // options.Marker = &client.StartAfter + //} - // List the apps - httpResponse, err := client.HTTPClient.Do(httpRequest) - if err != nil { - scopedLog.Error(err, "Azure blob, unable to execute list apps http request") - return RemoteDataListResponse{}, err - } - - defer httpResponse.Body.Close() - - // Authorization unsuccessul - if httpResponse.StatusCode != 200 { - err = errors.New("error authorizing the rest call. check your IAM/secret configuration") - return RemoteDataListResponse{}, err - } - - // Extract response - azureRemoteDataResponse, err := extractResponse(ctx, httpResponse) - if err != nil { - scopedLog.Error(err, "unable to extract app packages list from http response") - return azureRemoteDataResponse, err - } + // Create a pager to iterate through blobs. + pager := client.ContainerClient.NewListBlobsFlatPager(options) - // Successfully listed apps - scopedLog.Info("Listing apps successful") - - return azureRemoteDataResponse, err -} - -// Extract data from httpResponse and fill it in RemoteDataListResponse structs -func extractResponse(ctx context.Context, httpResponse *http.Response) (RemoteDataListResponse, error) { - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("AzureBlob:extractResponse") - - azureAppsRemoteData := RemoteDataListResponse{} - - // Read response body - responseBody, err := io.ReadAll(httpResponse.Body) - if err != nil { - scopedLog.Error(err, "Errored when reading resp body for app packages list rest call") - return azureAppsRemoteData, err - } - - // Variable to hold unmarshaled data - data := &EnumerationResults{} - - // Unmarshal http response - err = xml.Unmarshal(responseBody, data) - if err != nil { - scopedLog.Error(err, "Errored unmarshalling app packages list", "rest call response:", string(responseBody)) - return azureAppsRemoteData, err - } - - // Extract data from all blobs - for count := 0; count < len(data.Blobs.Blob); count++ { - // Extract blob - blob := data.Blobs.Blob[count] - - scopedLog.Info("Listing App package details", "Count:", count, "App package name", blob.Name, - "Etag", blob.Properties.ETag, "Created on", blob.Properties.CreationTime, - "Modified on", blob.Properties.LastModified, "Content Size", blob.Properties) - - // Extract properties - newETag := blob.Properties.ETag - newKey := blob.Name - newLastModified, errTime := time.Parse(http.TimeFormat, blob.Properties.LastModified) - if errTime != nil { - scopedLog.Error(err, "Unable to get lastModifiedTime, not adding to list", "App Package", newKey, "name", blob.Properties.LastModified) - continue + var blobs []*RemoteObject + for pager.More() { + resp, err := pager.NextPage(ctx) + if err != nil { + scopedLog.Error(err, "Error listing blobs") + return RemoteDataListResponse{}, fmt.Errorf("error listing blobs: %w", err) } - newSize, errInt := strconv.ParseInt(blob.Properties.ContentLength, 10, 64) - if errInt != nil { - scopedLog.Error(err, "Unable to get newSize, not adding to list", "App package", newKey, "name", blob.Properties.ContentLength) - continue - } - newStorageClass := "standard" //TODO : map to a azure blob field - // Create new object and append - newRemoteObject := RemoteObject{Etag: &newETag, Key: &newKey, LastModified: &newLastModified, Size: &newSize, StorageClass: &newStorageClass} - azureAppsRemoteData.Objects = append(azureAppsRemoteData.Objects, &newRemoteObject) + for _, blob := range resp.Segment.BlobItems { + etag := string(*blob.Properties.ETag) + name := *blob.Name + lastModified := blob.Properties.LastModified + size := blob.Properties.ContentLength + + remoteObject := &RemoteObject{ + Etag: &etag, + Key: &name, + LastModified: lastModified, + Size: size, + } + blobs = append(blobs, remoteObject) + } } - return azureAppsRemoteData, nil + scopedLog.Info("Successfully fetched list of apps", "TotalBlobs", len(blobs)) + + return RemoteDataListResponse{Objects: blobs}, nil } -// DownloadApp downloads an app package from remote storage +// DownloadApp downloads a specific blob from Azure Blob Storage to a local file. func (client *AzureBlobClient) DownloadApp(ctx context.Context, downloadRequest RemoteDataDownloadRequest) (bool, error) { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("AzureBlob:DownloadApp").WithValues("Endpoint", client.Endpoint, "Bucket", client.BucketName, - "Prefix", client.Prefix, "downloadRequest", downloadRequest) + scopedLog := reqLogger.WithName("AzureBlob:DownloadApp").WithValues( + "Bucket", client.BucketName, + "RemoteFile", downloadRequest.RemoteFile, + "LocalFile", downloadRequest.LocalFile, + ) - scopedLog.Info("Download App package") + scopedLog.Info("Initiating blob download") - // create rest request URL with storage account name, container, prefix - appPackageFetchURL := fmt.Sprintf(azureBlobDownloadAppFetchURL, client.Endpoint, client.BucketName, downloadRequest.RemoteFile) + // Create a blob client for the specific blob. + blobClient := client.ContainerClient.NewBlobClient(downloadRequest.RemoteFile) - // Create a http request with the URL - httpRequest, err := http.NewRequest("GET", appPackageFetchURL, nil) + // Download the blob content. + get, err := blobClient.DownloadStream(ctx, nil) if err != nil { - scopedLog.Error(err, "Azure Blob Failed to create request for App package fetch URL") - return false, err + scopedLog.Error(err, "Failed to download blob") + return false, fmt.Errorf("failed to download blob: %w", err) } + defer get.Body.Close() - // Setup the httpRequest with required authentication - if client.StorageAccountName != "" && client.SecretAccessKey != "" { - // Use Secrets - err = updateAzureHTTPRequestHeaderWithSecrets(ctx, client, httpRequest) - } else { - // No Secret provided, try using IAM - err = updateAzureHTTPRequestHeaderWithIAM(ctx, client, httpRequest) - } - if err != nil { - scopedLog.Error(err, "Failed to get http request authenticated") - return false, err - } - - scopedLog.Info("Calling the download rest request") - - // Download the app - httpResponse, err := client.HTTPClient.Do(httpRequest) - if err != nil { - scopedLog.Error(err, "Azure blob, unable to execute download apps http request") - return false, err - } - - defer httpResponse.Body.Close() - - // Authorization unsuccessul for download rest call - if httpResponse.StatusCode != 200 { - err = errors.New("error authorizing the rest call. check your IAM/secret configuration") - return false, err - } - - // Create local file on operator + // Create or truncate the local file. localFile, err := os.Create(downloadRequest.LocalFile) if err != nil { - scopedLog.Error(err, "Unable to open local file") - return false, err + scopedLog.Error(err, "Failed to create local file") + return false, fmt.Errorf("failed to create local file: %w", err) } defer localFile.Close() - scopedLog.Info("Copying the download response to localFile") - - // Copy the http response (app packages to the local file path) - _, err = io.Copy(localFile, httpResponse.Body) + // Write the content to the local file. + _, err = io.Copy(localFile, get.Body) if err != nil { - fmt.Println(err.Error(), "Failed when copying resp body for app download") - return false, err + scopedLog.Error(err, "Failed to write blob content to local file") + return false, fmt.Errorf("failed to write blob content to local file: %w", err) } - // Successfully downloaded app package - scopedLog.Info("Download app package successful") + scopedLog.Info("Blob downloaded successfully") - return true, err + return true, nil } -// RegisterAzureBlobClient will add the corresponding function pointer to the map +// NoOpInitFunc performs no additional initialization. +// It satisfies the GetInitFunc type and can be used when no extra setup is needed. +func NoOpInitFunc( + ctx context.Context, + appAzureBlobEndPoint string, + storageAccountName string, + secretAccessKey string, // Optional: can be empty +) interface{} { + // No additional initialization required. + return nil +} + +// RegisterAzureBlobClient registers the AzureBlobClient in the RemoteDataClientsMap. func RegisterAzureBlobClient() { - wrapperObject := GetRemoteDataClientWrapper{GetRemoteDataClient: NewAzureBlobClient, GetInitFunc: InitAzureBlobClientWrapper} + wrapperObject := GetRemoteDataClientWrapper{ + GetRemoteDataClient: NewAzureBlobClient, + GetInitFunc: NoOpInitFunc, // Use CustomInitFunc if additional initialization is needed + } RemoteDataClientsMap["azure"] = wrapperObject } diff --git a/pkg/splunk/client/azureblobclient_test.go b/pkg/splunk/client/azureblobclient_test.go index cd73f4160..e53a0193d 100644 --- a/pkg/splunk/client/azureblobclient_test.go +++ b/pkg/splunk/client/azureblobclient_test.go @@ -17,1188 +17,410 @@ package client import ( "context" - "encoding/json" - "encoding/xml" - "errors" "fmt" - "net/http" - "net/http/httptest" - "net/url" + "io" "os" "strings" "testing" "time" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - spltest "github.com/splunk/splunk-operator/pkg/splunk/test" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" ) -// Helpers for faulty http request/response -type errReader int - -func (errReader) Read(p []byte) (n int, err error) { - return 0, errors.New("test error") +// MockContainerClient is a mock implementation of ContainerClientInterface. +type MockContainerClient struct { + mock.Mock } -func TestInitAzureBlobClientWrapper(t *testing.T) { - ctx := context.TODO() - azureBlobClientSession := InitAzureBlobClientWrapper(ctx, "https://mystorageaccount.blob.core.windows.net", "abcd", "1234") - if azureBlobClientSession == nil { - t.Errorf("We should not have got a nil Azure Blob Client") - } +// NewListBlobsFlatPager mocks the NewListBlobsFlatPager method. +func (m *MockContainerClient) NewListBlobsFlatPager(options *azblob.ListBlobsFlatOptions) *runtime.Pager[azblob.ListBlobsFlatResponse] { + args := m.Called(options) + return args.Get(0).(*runtime.Pager[azblob.ListBlobsFlatResponse]) } -func TestNewAzureBlobClient(t *testing.T) { - ctx := context.TODO() - fn := InitAzureBlobClientWrapper - - azureBlobClient, err := NewAzureBlobClient(ctx, "sample_bucket", "abcd", "xyz", "admin/", "admin", "us-west-2", "https://mystorageaccount.blob.core.windows.net", fn) - if azureBlobClient == nil || err != nil { - t.Errorf("NewAzureBlobClient should have returned a valid Azure Blob client.") - } +// NewBlobClient mocks the NewBlobClient method. +func (m *MockContainerClient) NewBlobClient(blobName string) BlobClientInterface { + args := m.Called(blobName) + return args.Get(0).(BlobClientInterface) } -func TestBuildStringToSign(t *testing.T) { - hd := make(map[string][]string) - - hd["Content-Length"] = []string{"0"} - hreq := http.Request{ - Header: hd, - URL: &url.URL{ - Path: "", - RawQuery: ";", - }, - } - _, _ = buildStringToSign(hreq, "") - - // Test invalid scenario - hreq = http.Request{ - URL: &url.URL{ - Path: "", - RawQuery: ";", - }, - } - _, _ = buildStringToSign(hreq, "") -} - -func TestBuildCanonicalizedHeader(t *testing.T) { - hd := make(map[string][]string) - buildCanonicalizedHeader(hd) -} - -func TestUpdateAzureHTTPRequestHeaderWithSecrets(t *testing.T) { - ctx := context.TODO() - hd := make(map[string][]string) - - hd["Content-Length"] = []string{"0"} - hreq := http.Request{ - Header: hd, - URL: &url.URL{ - Path: "", - RawQuery: ";", - }, - } - - azClient := &AzureBlobClient{ - StorageAccountName: "saname", - SecretAccessKey: "skey", - } - updateAzureHTTPRequestHeaderWithSecrets(ctx, azClient, &hreq) - - hreq.URL.RawQuery = "validquery" - azClient.SecretAccessKey = "!;." - updateAzureHTTPRequestHeaderWithSecrets(ctx, azClient, &hreq) +// MockBlobClient is a mock implementation of BlobClientInterface. +type MockBlobClient struct { + mock.Mock } -func TestExtractResponse(t *testing.T) { - ctx := context.TODO() - testRequest := httptest.NewRequest(http.MethodPost, "/something", errReader(0)) - - httpRes := http.Response{ - Body: testRequest.Body, - } - - extractResponse(ctx, &httpRes) +// DownloadStream mocks the DownloadStream method. +func (m *MockBlobClient) DownloadStream(ctx context.Context, options *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) { + args := m.Called(ctx, options) + return args.Get(0).(blob.DownloadStreamResponse), args.Error(1) } -func TestAzureBlobGetAppsListShouldNotFail(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - respdata := &EnumerationResults{ - Blobs: Blobs{ - Blob: []Blob{ - { - Properties: ContainerProperties{ - CreationTime: time.Now().UTC().Format(http.TimeFormat), - LastModified: time.Now().UTC().Format(http.TimeFormat), - ETag: "abcd", - ContentLength: fmt.Sprint(64), +// TestAzureBlobClient_GetAppsList_SharedKey tests the GetAppsList method using Shared Key authentication. +func TestAzureBlobClient_GetAppsList_SharedKey(t *testing.T) { + // Initialize mocks. + mockContainerClient := new(MockContainerClient) + + // Create a runtime pager that returns the mockListResponse. + // Create a runtime pager for simulating paginated blob listing + runtimePager := runtime.NewPager(runtime.PagingHandler[azblob.ListBlobsFlatResponse]{ + More: func(resp azblob.ListBlobsFlatResponse) bool { + // If resp is zero value (before first fetch), we have more pages + if resp.Segment == nil && resp.NextMarker == nil { + return true + } + // If NextMarker is not empty, we have more pages + if resp.NextMarker != nil && *resp.NextMarker != "" { + return true + } + // No more pages + return false + }, + Fetcher: func(ctx context.Context, cur *azblob.ListBlobsFlatResponse) (azblob.ListBlobsFlatResponse, error) { + if cur == nil { + // Simulate the first page of blobs + return azblob.ListBlobsFlatResponse{ + ListBlobsFlatSegmentResponse: container.ListBlobsFlatSegmentResponse{ + ContainerName: to.Ptr("test-container"), + ServiceEndpoint: to.Ptr("https://test.blob.core.windows.net/"), + MaxResults: to.Ptr(int32(1)), + Segment: &container.BlobFlatListSegment{ + BlobItems: []*container.BlobItem{ + { + Name: to.Ptr("blob1"), + Properties: &container.BlobProperties{ + ETag: to.Ptr(azcore.ETag("etag1")), + LastModified: to.Ptr(time.Now()), + ContentLength: to.Ptr(int64(100)), + }, + }, + { + Name: to.Ptr("blob2"), + Properties: &container.BlobProperties{ + ETag: to.Ptr(azcore.ETag("etag2")), + LastModified: to.Ptr(time.Now()), + ContentLength: to.Ptr(int64(200)), + }, + }, + }, + }, + NextMarker: nil, }, - }, - }, - }, - } - mrespdata, _ := xml.Marshal(respdata) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Listing apps with secrets - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - - respList, err := azureBlobClient.GetAppsList(ctx) - if err != nil { - t.Errorf("GetAppsList should not return nil") - } - - if len(respList.Objects) != 1 { - t.Errorf("GetAppsList should have returned 1 blob object") - } - - // Out of two blobs one has Incorrect last modified time so the - // list should return only one blob - respdata = &EnumerationResults{ - Blobs: Blobs{ - Blob: []Blob{ - { - Properties: ContainerProperties{ - CreationTime: time.Now().UTC().Format(http.TimeFormat), - LastModified: fmt.Sprint(time.Now()), - ETag: "etag1", - ContentLength: fmt.Sprint(64), - }, - }, - { - Properties: ContainerProperties{ - CreationTime: time.Now().UTC().Format(http.TimeFormat), - LastModified: time.Now().UTC().Format(http.TimeFormat), - ETag: "etag2", - ContentLength: fmt.Sprint(64), - }, - }, - }, - }, - } - mrespdata, _ = xml.Marshal(respdata) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - // GetAppsList doesn't return error as we move onto the next blob - resp, err := azureBlobClient.GetAppsList(ctx) - if err != nil { - t.Errorf("Did not expect error but one blob should have been returned") - } - //check only one blob is returned as it has correct lastmodified date - if len(resp.Objects) != 1 { - t.Errorf("Expected only one blob to be returned") - } - - // GetAppsList covering code for incorrect content length - respdata.Blobs.Blob[0].Properties.ContentLength = "09999999999999999999" - respdata.Blobs.Blob[0].Properties.LastModified = time.Now().UTC().Format(http.TimeFormat) - mrespdata, _ = xml.Marshal(respdata) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - resp, err = azureBlobClient.GetAppsList(ctx) - if err != nil { - t.Errorf("Did not expect error but one blob should have been returned") - } - - // Test Listing Apps with IAM - azureBlobClient.StorageAccountName = "" - azureBlobClient.SecretAccessKey = "" - wantRequest, _ = http.NewRequest("GET", "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2021-10-01&resource=https%3A%2F%2Fstorage.azure.com%2F", nil) - respTokenData := &TokenResponse{ - AccessToken: "acctoken", - ClientID: "ClientId", - } - mrespdata, _ = json.Marshal(respTokenData) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - - _, err = azureBlobClient.GetAppsList(ctx) - if err != nil { - t.Errorf("GetAppsList should not return nil") - } - + }, nil + } + // Simulate no more pages + return azblob.ListBlobsFlatResponse{}, nil + }, + }) + + // Setup mock behavior to return the pager. + mockContainerClient.On("NewListBlobsFlatPager", mock.Anything).Return(runtimePager) + + // Initialize AzureBlobClient with the mock container client. + azureClient := &AzureBlobClient{ + BucketName: "test-container", + StorageAccountName: "test-account", + Prefix: "", + StartAfter: "", + Endpoint: "", + ContainerClient: mockContainerClient, + CredentialType: CredentialTypeSharedKey, + } + + // Execute GetAppsList. + ctx := context.Background() + resp, err := azureClient.GetAppsList(ctx) + + // Assertions. + require.NoError(t, err) + require.Len(t, resp.Objects, 2) + require.Equal(t, "blob1", *resp.Objects[0].Key) + require.Equal(t, "blob2", *resp.Objects[1].Key) + + // Verify that all expectations were met. + mockContainerClient.AssertExpectations(t) } -func TestAzureBlobGetAppsListShouldFail(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - respdata := &EnumerationResults{ - Blobs: Blobs{ - Blob: []Blob{ - { - Properties: ContainerProperties{ - CreationTime: time.Now().UTC().Format(http.TimeFormat), - LastModified: time.Now().UTC().Format(http.TimeFormat), - ETag: "abcd", - ContentLength: fmt.Sprint(64), +// TestAzureBlobClient_GetAppsList_AzureAD tests the GetAppsList method using Azure AD authentication. +func TestAzureBlobClient_GetAppsList_AzureAD(t *testing.T) { + // Initialize mocks. + mockContainerClient := new(MockContainerClient) + + // Create a runtime pager for simulating paginated blob listing + runtimePager := runtime.NewPager(runtime.PagingHandler[azblob.ListBlobsFlatResponse]{ + More: func(resp azblob.ListBlobsFlatResponse) bool { + // If resp is zero value (before first fetch), we have more pages + if resp.Segment == nil && resp.NextMarker == nil { + return true + } + // If NextMarker is not empty, we have more pages + if resp.NextMarker != nil && *resp.NextMarker != "" { + return true + } + // No more pages + return false + }, + Fetcher: func(ctx context.Context, cur *azblob.ListBlobsFlatResponse) (azblob.ListBlobsFlatResponse, error) { + if cur == nil { + // Simulate the first page of blobs + return azblob.ListBlobsFlatResponse{ + ListBlobsFlatSegmentResponse: container.ListBlobsFlatSegmentResponse{ + ContainerName: to.Ptr("test-container"), + ServiceEndpoint: to.Ptr("https://test.blob.core.windows.net/"), + MaxResults: to.Ptr(int32(1)), + Segment: &container.BlobFlatListSegment{ + BlobItems: []*container.BlobItem{ + { + Name: to.Ptr("blob3"), + Properties: &container.BlobProperties{ + ETag: to.Ptr(azcore.ETag("etag3")), + LastModified: to.Ptr(time.Now()), + ContentLength: to.Ptr(int64(100)), + }, + }, + }, + }, + NextMarker: nil, }, - }, - }, - }, - } - mrespdata, _ := xml.Marshal(respdata) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - - // Test Listing apps with secrets but bad end point - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - azureBlobClient.Endpoint = string(invalidUrlByteArray) - _, err = azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("Expected error for invalid endpoint") - } - - // Test Listing apps with secrets but bad end point - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - azureBlobClient.Endpoint = "not-a-valid-end-point" - _, err = azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("Expected error for invalid endpoint") - } - azureBlobClient.Endpoint = vol.Endpoint - // Test error conditions - - // Test error for Ouath request - azureBlobClient.StorageAccountName = "" - azureBlobClient.SecretAccessKey = "" - - _, err = azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("Expected error for incorrect oauth request") - } - - // Test error for get app list request - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - _, err = azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("Expected error for incorrect get apps list request") - } - - // Test error for extract response - wantRequest, _ = http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - mclient.AddHandler(wantRequest, 200, string("FailToUnmarshal"), nil) - _, err = azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("Expected error for incorrect http response from get apps list, unable to unmarshal") - } + }, nil + } + // Simulate no more pages + return azblob.ListBlobsFlatResponse{}, nil + }, + }) + + // Setup mock behavior to return the pager. + mockContainerClient.On("NewListBlobsFlatPager", mock.Anything).Return(runtimePager) + + // Initialize AzureBlobClient with the mock container client. + azureClient := &AzureBlobClient{ + BucketName: "test-container", + StorageAccountName: "test-account", + Prefix: "", + StartAfter: "", + Endpoint: "", + ContainerClient: mockContainerClient, + CredentialType: CredentialTypeAzureAD, + } + + // Execute GetAppsList. + ctx := context.Background() + resp, err := azureClient.GetAppsList(ctx) + + // Assertions. + require.NoError(t, err) + require.Len(t, resp.Objects, 1) + require.Equal(t, "blob3", *resp.Objects[0].Key) + + // Verify that all expectations were met. + mockContainerClient.AssertExpectations(t) } -func TestAzureBlobDownloadAppShouldNotFail(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1/adminAppsRepo/app1.tgz", nil) - respdata := "This is a test body of an app1.tgz package. In real use it would be a binary file but for test it is just a string data" - - mclient.AddHandler(wantRequest, 200, respdata, nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Download App package with secret - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - - // Create RemoteDownload request - downloadRequest := RemoteDataDownloadRequest{ - LocalFile: "app1.tgz", - RemoteFile: "adminAppsRepo/app1.tgz", - } - _, err = azureBlobClient.DownloadApp(ctx, downloadRequest) - if err != nil { - t.Errorf("DownloadApps should not return nil") - } - - downloadedAppData, err := os.ReadFile(downloadRequest.LocalFile) - if err != nil { - t.Errorf("DownloadApps failed reading downloaded file. Error is: %s", err.Error()) - } - - if strings.Compare(respdata, string(downloadedAppData)) != 0 { - t.Errorf("DownloadApps failed as it did not download correct data") - } - - os.Remove(downloadRequest.LocalFile) - - // Test Download App package with IAM - azureBlobClient.StorageAccountName = "" - azureBlobClient.SecretAccessKey = "" - wantRequest, _ = http.NewRequest("GET", "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2021-10-01&resource=https%3A%2F%2Fstorage.azure.com%2F", nil) - respTokenData := &TokenResponse{ - AccessToken: "acctoken", - ClientID: "ClientId", - } - mrespdata, _ := json.Marshal(respTokenData) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - - _, err = azureBlobClient.DownloadApp(ctx, downloadRequest) - if err != nil { - t.Errorf("DownloadApps should not return nil") - } - - if strings.Compare(respdata, string(downloadedAppData)) != 0 { - t.Errorf("DownloadApps failed usign IAM as it did not download correct data") - } - - os.Remove(downloadRequest.LocalFile) +// TestAzureBlobClient_GetAppsList_Error tests the GetAppsList method handling an error scenario. +func TestAzureBlobClient_GetAppsList_Error(t *testing.T) { + // Initialize mocks. + mockContainerClient := new(MockContainerClient) + + // Create a runtime pager for simulating paginated blob listing + runtimePager := runtime.NewPager(runtime.PagingHandler[azblob.ListBlobsFlatResponse]{ + More: func(resp azblob.ListBlobsFlatResponse) bool { + // If resp is zero value (before first fetch), we have more pages + if resp.Segment == nil && resp.NextMarker == nil { + return true + } + // If NextMarker is not empty, we have more pages + if resp.NextMarker != nil && *resp.NextMarker != "" { + return true + } + // No more pages + return false + }, + Fetcher: func(ctx context.Context, cur *azblob.ListBlobsFlatResponse) (azblob.ListBlobsFlatResponse, error) { + return container.ListBlobsFlatResponse{}, fmt.Errorf("failed to list blobs") + }, + }) + + // Setup mock behavior to return the pager. + mockContainerClient.On("NewListBlobsFlatPager", mock.Anything).Return(runtimePager) + + // Initialize AzureBlobClient with the mock container client. + azureClient := &AzureBlobClient{ + BucketName: "test-container", + StorageAccountName: "test-account", + Prefix: "", + StartAfter: "", + Endpoint: "", + ContainerClient: mockContainerClient, + CredentialType: CredentialTypeAzureAD, + } + + // Execute GetAppsList. + ctx := context.Background() + resp, err := azureClient.GetAppsList(ctx) + + // Assertions. + require.Error(t, err) + require.Equal(t, RemoteDataListResponse{}, resp) + + // Verify that all expectations were met. + mockContainerClient.AssertExpectations(t) } -func TestAzureBlobDownloadAppShouldFail(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1/adminAppsRepo/app1.tgz", nil) - respdata := "This is a test body of an app1.tgz package. In real use it would be a binary file but for test it is just a string data" - - mclient.AddHandler(wantRequest, 200, respdata, nil) +// TestAzureBlobClient_DownloadApp_SharedKey tests the DownloadApp method using Shared Key authentication. +func TestAzureBlobClient_DownloadApp_SharedKey(t *testing.T) { + // Initialize mocks. + mockContainerClient := new(MockContainerClient) + mockBlobClient := new(MockBlobClient) - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Download App package with secret - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - - // Create RemoteDownload request - downloadRequest := RemoteDataDownloadRequest{ - LocalFile: "app1.tgz", - RemoteFile: "adminAppsRepo/app1.tgz", - } - - // Test error conditions - - // Test error for http request to download - azureBlobClient.Endpoint = "dummy" - _, err = azureBlobClient.DownloadApp(ctx, downloadRequest) - if err == nil { - t.Errorf("Expected error for incorrect oauth request") - } - - // Test error for http request to download - azureBlobClient.Endpoint = string(invalidUrlByteArray) - _, err = azureBlobClient.DownloadApp(ctx, downloadRequest) - if err == nil { - t.Errorf("Expected error for incorrect oauth request") - } - - // Test empty local file - downloadRequest.LocalFile = "" - _, err = azureBlobClient.DownloadApp(ctx, downloadRequest) - if err == nil { - t.Errorf("Expected error for incorrect oauth request") - } -} - -func TestAzureBlobGetAppsListShouldFailBadSecret(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - - mclient.AddHandler(wantRequest, 403, "unauthorized", nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Listing apps with secrets - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - - respList, err := azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("GetAppsList should return err") - } - - if err.Error() != "error authorizing the rest call. check your IAM/secret configuration" { - t.Errorf("GetAppsList should return authorization error") - } - - // authorizing the rest call. check your IAM/secret configuration - - if len(respList.Objects) != 0 { - t.Errorf("GetAppsList should not return any response objects") - } -} - -// Test that although the rest call returned 200 response code -// but the response body was not as expected (unmarshelled failed) -func TestAzureBlobGetAppsListShouldFailBadXmlResponse(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - - mclient.AddHandler(wantRequest, 200, "I am not a valid app list response", nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Listing apps with secrets - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - - respList, err := azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("GetAppsList should return err") - } - - // Expecting error : "expected element type but have ..." - if !strings.Contains(err.Error(), "expected element type but have") { - t.Errorf("GetAppsList should return that it could not extract the app packages list") - } - - if len(respList.Objects) != 0 { - t.Errorf("GetAppsList should not return any response objects") - } -} - -func TestAzureBlobGetAppsListShouldFailNoIdentity(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // mock IAM token fetch call to a failed response - // no valid managed identity found - wantRequestIAMTokenFetch, _ := http.NewRequest("GET", "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2021-10-01&resource=https%3A%2F%2Fstorage.azure.com%2F", nil) - - mclient.AddHandler(wantRequestIAMTokenFetch, 400, "No managed identity", nil) - - // Add mock for the azure rest call for list apps - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - - mclient.AddHandler(wantRequest, 403, "unauthorized", nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Listing apps with secrets - azureBlobClient.StorageAccountName = vol.Path - azureBlobClient.SecretAccessKey = "abcd" - - respList, err := azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("GetAppsList should return err") - } - - if err.Error() != "error authorizing the rest call. check your IAM/secret configuration" { - t.Errorf("GetAppsList should return authorization error") - } - - // authorizing the rest call. check your IAM/secret configuration - - if len(respList.Objects) != 0 { - t.Errorf("GetAppsList should not return any response objects") - } - - mclient.RemoveHandlers() -} - -// check identity is assigned to AKS but it is not authorized -// to access the buckets -func TestAzureBlobGetAppsListShouldFailInvalidIdentity(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, + // Define the blob download response. + mockDownloadResponse := blob.DownloadStreamResponse{ + DownloadResponse: blob.DownloadResponse{ + Body: io.NopCloser(strings.NewReader("mock blob content")), }, } - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} + // Setup mock behavior. + mockContainerClient.On("NewBlobClient", "test-file-sharedkey.txt").Return(mockBlobClient) + mockBlobClient.On("DownloadStream", mock.Anything, mock.Anything).Return(mockDownloadResponse, nil) - // Identity call return a token - that means AKS cluster has an identity configured. - wantRequest, _ := http.NewRequest("GET", "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2021-10-01&resource=https%3A%2F%2Fstorage.azure.com%2F", nil) - respTokenData := &TokenResponse{ - AccessToken: "acctoken", - ClientID: "ClientId", + // Initialize AzureBlobClient with the mock container client. + azureClient := &AzureBlobClient{ + BucketName: "test-container", + StorageAccountName: "test-account", + Prefix: "", + StartAfter: "", + Endpoint: "", + ContainerClient: mockContainerClient, + CredentialType: CredentialTypeSharedKey, } - mrespdata, _ := json.Marshal(respTokenData) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - - // Add mock for the azure rest call for list apps - wantRequest, _ = http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1?prefix=adminAppsRepo&restype=container&comp=list&include=snapshots&include=metadata", nil) - // Expect the identity does not have authorization to access the buckets - mclient.AddHandler(wantRequest, 403, "identity not authorized", nil) + // Create a temporary file to simulate download. + tempFile, err := os.CreateTemp("", "test-download-sharedkey") + require.NoError(t, err) + defer os.Remove(tempFile.Name()) - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) + // Execute DownloadApp. + ctx := context.Background() + req := RemoteDataDownloadRequest{ + LocalFile: tempFile.Name(), + RemoteFile: "test-file-sharedkey.txt", } + success, err := azureClient.DownloadApp(ctx, req) - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) + // Assertions. + require.NoError(t, err) + require.True(t, success) - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - respList, err := azureBlobClient.GetAppsList(ctx) - if err == nil { - t.Errorf("GetAppsList should return err") - } + // Verify file content. + fileContent, err := os.ReadFile(tempFile.Name()) + require.NoError(t, err) + require.Equal(t, "mock blob content", string(fileContent)) - if err.Error() != "error authorizing the rest call. check your IAM/secret configuration" { - t.Errorf("GetAppsList should return authorization error") - } - - // authorizing the rest call. check your IAM/secret configuration - - if len(respList.Objects) != 0 { - t.Errorf("GetAppsList should not return any response objects") - } - mclient.RemoveHandlers() + // Verify that all expectations were met. + mockContainerClient.AssertExpectations(t) + mockBlobClient.AssertExpectations(t) } -func TestAzureBlobDownloadFailBadSecret(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - SecretRef: "blob-secret", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // Add handler for mock client(handles secrets case initially) - wantRequest, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1/adminAppsRepo/app1.tgz", nil) - - mclient.AddHandler(wantRequest, 403, "auth failed dummy response", nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Test Download App package with secret - azureBlobClient.StorageAccountName = "mystorageaccount" - azureBlobClient.SecretAccessKey = "abcd" - - // Create RemoteDownload request - downloadRequest := RemoteDataDownloadRequest{ - LocalFile: "app1.tgz", - RemoteFile: "adminAppsRepo/app1.tgz", - } - resp, err := azureBlobClient.DownloadApp(ctx, downloadRequest) - if err == nil { - t.Errorf("DownloadApps should return error") - } - if resp == true { - t.Errorf("DownloadApps should return false") - } - if err.Error() != "error authorizing the rest call. check your IAM/secret configuration" { - t.Errorf("DownloadApp should return authorization error") - } - mclient.RemoveHandlers() -} +// TestAzureBlobClient_DownloadApp_AzureAD tests the DownloadApp method using Azure AD authentication. +func TestAzureBlobClient_DownloadApp_AzureAD(t *testing.T) { + // Initialize mocks. + mockContainerClient := new(MockContainerClient) + mockBlobClient := new(MockBlobClient) -func TestAzureBlobDownloadAppShouldFailNoIdentity(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, + // Define the blob download response. + mockDownloadResponse := blob.DownloadStreamResponse{ + DownloadResponse: blob.DownloadResponse{ + Body: io.NopCloser(strings.NewReader("mock blob content AD")), }, } - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - //mock IAM token fetch call to a failed response - //no valid managed identity found - wantRequestIAMTokenFetch, _ := http.NewRequest("GET", "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2021-10-01&resource=https%3A%2F%2Fstorage.azure.com%2F", nil) + // Setup mock behavior. + mockContainerClient.On("NewBlobClient", "test-file-azuread.txt").Return(mockBlobClient) + mockBlobClient.On("DownloadStream", mock.Anything, mock.Anything).Return(mockDownloadResponse, nil) - mclient.AddHandler(wantRequestIAMTokenFetch, 400, "No managed identity", nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) + // Initialize AzureBlobClient with the mock container client. + azureClient := &AzureBlobClient{ + BucketName: "test-container", + StorageAccountName: "test-account", + Prefix: "", + StartAfter: "", + Endpoint: "", + ContainerClient: mockContainerClient, + CredentialType: CredentialTypeAzureAD, } - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) + // Create a temporary file to simulate download. + tempFile, err := os.CreateTemp("", "test-download-azuread") + require.NoError(t, err) + defer os.Remove(tempFile.Name()) - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient + // Execute DownloadApp. + ctx := context.Background() + req := RemoteDataDownloadRequest{ + LocalFile: tempFile.Name(), + RemoteFile: "test-file-azuread.txt", } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) + success, err := azureClient.DownloadApp(ctx, req) - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint + // Assertions. + require.NoError(t, err) + require.True(t, success) - // Create RemoteDownload request - downloadRequest := RemoteDataDownloadRequest{ - LocalFile: "app1.tgz", - RemoteFile: "adminAppsRepo/app1.tgz", - } + // Verify file content. + fileContent, err := os.ReadFile(tempFile.Name()) + require.NoError(t, err) + require.Equal(t, "mock blob content AD", string(fileContent)) - resp, err := azureBlobClient.DownloadApp(ctx, downloadRequest) - if err == nil { - t.Errorf("DownloadApps should return error") - } - if resp == true { - t.Errorf("DownloadApps should return false") - } - if err.Error() != "please validate that your cluster is configured to use managed identity" { - t.Errorf("DownloadApp should return authorization error") - } - mclient.RemoveHandlers() + // Verify that all expectations were met. + mockContainerClient.AssertExpectations(t) + mockBlobClient.AssertExpectations(t) } -func TestAzureBlobDownloadAppShouldFailInvalidIdentity(t *testing.T) { - ctx := context.TODO() - appFrameworkRef := enterpriseApi.AppFrameworkSpec{ - Defaults: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - VolList: []enterpriseApi.VolumeSpec{ - { - Name: "azure_vol1", - Endpoint: "https://mystorageaccount.blob.core.windows.net", - Path: "appscontainer1", - Type: "blob", - Provider: "azure", - }, - }, - AppSources: []enterpriseApi.AppSourceSpec{ - { - Name: "adminApps", - Location: "adminAppsRepo", - AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ - VolName: "azure_vol1", - Scope: enterpriseApi.ScopeLocal, - }, - }, - }, - } - - // Initialize clients - azureBlobClient := &AzureBlobClient{} - mclient := spltest.MockHTTPClient{} - - // mock for IAM token fetch is successful - // but later we see that the token does not give - // permission to access the bucket for downloading app package - wantRequest, _ := http.NewRequest("GET", "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2021-10-01&resource=https%3A%2F%2Fstorage.azure.com%2F", nil) - respTokenData := &TokenResponse{ - AccessToken: "acctoken", - ClientID: "ClientId", - } - mrespdata, _ := json.Marshal(respTokenData) - mclient.AddHandler(wantRequest, 200, string(mrespdata), nil) - - // Mock the download rest call to return 403 unauthorized emulating that - // the token did not give permission to read the bucket/app_package - wantRequestDownload, _ := http.NewRequest("GET", "https://mystorageaccount.blob.core.windows.net/appscontainer1/adminAppsRepo/app1.tgz", nil) - - mclient.AddHandler(wantRequestDownload, 403, "auth failed dummy response", nil) - - // Get App source and volume from spec - appSource := appFrameworkRef.AppSources[0] - vol, err := GetAppSrcVolume(ctx, appSource, &appFrameworkRef) - if err != nil { - t.Errorf("Unable to get volume for app source : %s", appSource.Name) - } - - // Update the GetRemoteDataClient function pointer - getClientWrapper := RemoteDataClientsMap[vol.Provider] - getClientWrapper.SetRemoteDataClientFuncPtr(ctx, vol.Provider, NewMockAzureBlobClient) - - // Update the GetRemoteDataClientInit function pointer - initFn := func(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { - return &mclient - } - getClientWrapper.SetRemoteDataClientInitFuncPtr(ctx, vol.Provider, initFn) - - // Init azure blob client - getRemoteDataClientFn := getClientWrapper.GetRemoteDataClientInitFuncPtr(ctx) - azureBlobClient.HTTPClient = getRemoteDataClientFn(ctx, "us-west-2", "abcd", "1234").(*spltest.MockHTTPClient) - azureBlobClient.BucketName = vol.Path - azureBlobClient.Prefix = appSource.Location - azureBlobClient.Endpoint = vol.Endpoint - - // Create RemoteDownload request - downloadRequest := RemoteDataDownloadRequest{ - LocalFile: "app1.tgz", - RemoteFile: "adminAppsRepo/app1.tgz", - } - - resp, err := azureBlobClient.DownloadApp(ctx, downloadRequest) - if err == nil { - t.Errorf("DownloadApps should return error") - } - if resp == true { - t.Errorf("DownloadApps should return false") - } - if err.Error() != "error authorizing the rest call. check your IAM/secret configuration" { - t.Errorf("DownloadApp should return authorization error") - } - mclient.RemoveHandlers() +// TestAzureBlobClient_DownloadApp_Error tests the DownloadApp method handling an error scenario. +func TestAzureBlobClient_DownloadApp_Error(t *testing.T) { + // Initialize mocks. + mockContainerClient := new(MockContainerClient) + mockBlobClient := new(MockBlobClient) + + // Setup mock behavior to return an error. + mockContainerClient.On("NewBlobClient", "nonexistent-file.txt").Return(mockBlobClient) + mockBlobClient.On("DownloadStream", mock.Anything, mock.Anything).Return(blob.DownloadStreamResponse{}, fmt.Errorf("blob not found")) + + // Initialize AzureBlobClient with the mock container client. + azureClient := &AzureBlobClient{ + BucketName: "test-container", + StorageAccountName: "test-account", + Prefix: "", + StartAfter: "", + Endpoint: "", + ContainerClient: mockContainerClient, + CredentialType: CredentialTypeAzureAD, + } + + // Create a temporary file to simulate download. + tempFile, err := os.CreateTemp("", "test-download-error") + require.NoError(t, err) + defer os.Remove(tempFile.Name()) + + // Execute DownloadApp. + ctx := context.Background() + req := RemoteDataDownloadRequest{ + LocalFile: tempFile.Name(), + RemoteFile: "nonexistent-file.txt", + } + success, err := azureClient.DownloadApp(ctx, req) + + // Assertions. + require.Error(t, err) + require.False(t, success) + + // Verify that all expectations were met. + mockContainerClient.AssertExpectations(t) + mockBlobClient.AssertExpectations(t) } diff --git a/pkg/splunk/client/gcpbucketclient.go b/pkg/splunk/client/gcpbucketclient.go new file mode 100644 index 000000000..1bda36d08 --- /dev/null +++ b/pkg/splunk/client/gcpbucketclient.go @@ -0,0 +1,265 @@ +// Copyright (c) 2018-2022 Splunk Inc. +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "io" + "os" + "strings" + + "cloud.google.com/go/storage" + //"golang.org/x/oauth2/google" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// blank assignment to verify that GCSClient implements RemoteDataClient +var _ RemoteDataClient = &GCSClient{} + +// GCSClientInterface defines the interface for GCS client operations +type GCSClientInterface interface { + Bucket(bucketName string) BucketHandleInterface +} + +// GCSClientWrapper wraps the actual GCS client to implement the interface +type GCSClientWrapper struct { + Client *storage.Client +} + +// Bucket is a wrapper around the actual GCS Bucket method +func (g *GCSClientWrapper) Bucket(bucketName string) BucketHandleInterface { + return &RealBucketHandleWrapper{BucketHandle: g.Client.Bucket(bucketName)} +} + +// BucketHandleInterface is an interface for wrapping both real and mock bucket handles +type BucketHandleInterface interface { + Objects(ctx context.Context, query *storage.Query) ObjectIteratorInterface + Object(name string) ObjectHandleInterface +} + +// RealBucketHandleWrapper wraps the real *storage.BucketHandle and implements BucketHandleInterface +type RealBucketHandleWrapper struct { + BucketHandle *storage.BucketHandle +} + +// Objects delegates to the real *storage.BucketHandle's Objects method +func (r *RealBucketHandleWrapper) Objects(ctx context.Context, query *storage.Query) ObjectIteratorInterface { + return &RealObjectIteratorWrapper{Iterator: r.BucketHandle.Objects(ctx, query)} +} + +// Object delegates to the real *storage.BucketHandle's Object method +func (r *RealBucketHandleWrapper) Object(name string) ObjectHandleInterface { + return &RealObjectHandleWrapper{ObjectHandle: r.BucketHandle.Object(name)} +} + +// ObjectIteratorInterface defines the interface for object iterators +type ObjectIteratorInterface interface { + Next() (*storage.ObjectAttrs, error) +} + +// RealObjectIteratorWrapper wraps the real *storage.ObjectIterator and implements ObjectIteratorInterface +type RealObjectIteratorWrapper struct { + Iterator *storage.ObjectIterator +} + +// Next delegates to the real *storage.ObjectIterator's Next method +func (r *RealObjectIteratorWrapper) Next() (*storage.ObjectAttrs, error) { + return r.Iterator.Next() +} + +// ObjectHandleInterface defines the interface for object handles +type ObjectHandleInterface interface { + NewReader(ctx context.Context) (io.ReadCloser, error) +} + +// RealObjectHandleWrapper wraps the real *storage.ObjectHandle and implements ObjectHandleInterface +type RealObjectHandleWrapper struct { + ObjectHandle *storage.ObjectHandle +} + +// NewReader delegates to the real *storage.ObjectHandle's NewReader method +func (r *RealObjectHandleWrapper) NewReader(ctx context.Context) (io.ReadCloser, error) { + return r.ObjectHandle.NewReader(ctx) +} + +// GCSClient is a client to implement GCS specific APIs +type GCSClient struct { + BucketName string + GCPCredentials string + Prefix string + StartAfter string + Client GCSClientInterface + BucketHandle BucketHandleInterface +} + +// InitGCSClient initializes and returns a GCS client implementing GCSClientInterface +func InitGCSClient(ctx context.Context, gcpCredentials string) (GCSClientInterface, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("InitGCSClient") + + var client *storage.Client + var err error + + if len(gcpCredentials) == 0 { + // The storage.NewClient(ctx) internally uses Application Default Credentials (ADC) to authenticate, + // and ADC works with Workload Identity when the required environment variables and setup are correctly configured. + // If the environment variables are not set, the client will use the default service account credentials. + // To use Google Workload Identity with storage.NewClient(ctx), ensure the following environment variables are properly set in your pod: + // GOOGLE_APPLICATION_CREDENTIALS (Optional): + // If you're not using the default workload identity path (/var/run/secrets/google.cloud/com.google.cloudsecrets/metadata/token), + // you can set GOOGLE_APPLICATION_CREDENTIALS to point to the federated token file manually. + // Otherwise, this can be left unset when Workload Identity is configured correctly. + // GOOGLE_CLOUD_PROJECT (Optional): + // Set this to your Google Cloud project ID if the SDK is not detecting it automatically. + // Additional Kubernetes Setup for Workload Identity: + // The Workload Identity configuration on your cluster ensures that the necessary tokens are automatically mounted for the pod and available without needing GOOGLE_APPLICATION_CREDENTIALS. + client, err = storage.NewClient(ctx) + } else { + client, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(gcpCredentials))) + } + + if err != nil { + scopedLog.Error(err, "Failed to initialize a GCS client.") + return nil, err + } + + scopedLog.Info("GCS Client initialization successful.") + return &GCSClientWrapper{Client: client}, nil +} + +// InitGcloudClientWrapper is a wrapper around InitGCSClient +func InitGcloudClientWrapper(ctx context.Context, region, accessKeyID, secretAccessKey string) interface{} { + client, _ := InitGCSClient(ctx, secretAccessKey) + return client +} + +// NewGCSClient returns a GCS client +func NewGCSClient(ctx context.Context, bucketName string, gcpCredentials string, secretAccessKey string, prefix string, startAfter string, region string, endpoint string, fn GetInitFunc) (RemoteDataClient, error) { + client, err := InitGCSClient(ctx, secretAccessKey) + if err != nil { + return nil, err + } + + bucketHandle := client.Bucket(bucketName) + + return &GCSClient{ + BucketName: bucketName, + GCPCredentials: secretAccessKey, + Prefix: prefix, + StartAfter: startAfter, + Client: client, + BucketHandle: bucketHandle, + }, nil +} + +// RegisterGCSClient will add the corresponding function pointer to the map +func RegisterGCSClient() { + wrapperObject := GetRemoteDataClientWrapper{GetRemoteDataClient: NewGCSClient, GetInitFunc: InitGcloudClientWrapper} + RemoteDataClientsMap["gcp"] = wrapperObject +} + +// GetAppsList gets the list of apps from remote storage +func (gcsClient *GCSClient) GetAppsList(ctx context.Context) (RemoteDataListResponse, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("GetAppsList") + + scopedLog.Info("Getting Apps list", "GCS Bucket", gcsClient.BucketName) + remoteDataClientResponse := RemoteDataListResponse{} + + query := &storage.Query{ + Prefix: gcsClient.Prefix, + Delimiter: "/", + } + + startAfterFound := gcsClient.StartAfter == "" // If StartAfter is empty, skip this check + it := gcsClient.BucketHandle.Objects(ctx, query) + + var objects []*RemoteObject + maxKeys := 4000 // Limit the number of objects manually + + if strings.HasSuffix(gcsClient.StartAfter, "/") { + startAfterFound = true + } + + for count := 0; count < maxKeys; { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + scopedLog.Error(err, "Error fetching object from GCS", "GCS Bucket", gcsClient.BucketName) + return remoteDataClientResponse, err + } + + // Implement "StartAfter" logic to skip objects until the desired one is found + if !startAfterFound { + if objAttrs.Name == gcsClient.StartAfter { + startAfterFound = true // Start adding objects after this point + } + continue + } + + // Map GCS object attributes to RemoteObject + remoteObj := &RemoteObject{ + Etag: &objAttrs.Etag, + Key: &objAttrs.Name, + LastModified: &objAttrs.Updated, + Size: &objAttrs.Size, + StorageClass: &objAttrs.StorageClass, + } + + objects = append(objects, remoteObj) + count++ + } + + remoteDataClientResponse.Objects = objects + + return remoteDataClientResponse, nil +} + +// DownloadApp downloads the app from remote storage to the local file system +func (gcsClient *GCSClient) DownloadApp(ctx context.Context, downloadRequest RemoteDataDownloadRequest) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("DownloadApp").WithValues("remoteFile", downloadRequest.RemoteFile, "localFile", + downloadRequest.LocalFile, "etag", downloadRequest.Etag) + + file, err := os.Create(downloadRequest.LocalFile) + if err != nil { + scopedLog.Error(err, "Unable to open local file") + return false, err + } + defer file.Close() + + objHandle := gcsClient.BucketHandle.Object(downloadRequest.RemoteFile) + reader, err := objHandle.NewReader(ctx) + if err != nil { + scopedLog.Error(err, "Unable to download item", "RemoteFile", downloadRequest.RemoteFile) + os.Remove(downloadRequest.LocalFile) + return false, err + } + defer reader.Close() + + if _, err := io.Copy(file, reader); err != nil { + scopedLog.Error(err, "Unable to copy data to local file") + return false, err + } + + scopedLog.Info("File downloaded") + + return true, nil +} diff --git a/pkg/splunk/client/gcpbucketclient_test.go b/pkg/splunk/client/gcpbucketclient_test.go new file mode 100644 index 000000000..eccf3a067 --- /dev/null +++ b/pkg/splunk/client/gcpbucketclient_test.go @@ -0,0 +1,264 @@ +// Copyright (c) 2018-2022 Splunk Inc. +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "errors" + "io" + "os" + "testing" + "time" + + "cloud.google.com/go/storage" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/api/iterator" +) + +// MockGCSClientInterface is a mock implementation of GCSClientInterface +type MockGCSClientInterface struct { + mock.Mock +} + +// Bucket mocks the Bucket method of GCSClientInterface +func (m *MockGCSClientInterface) Bucket(bucketName string) BucketHandleInterface { + args := m.Called(bucketName) + if args.Get(0) == nil { + return nil + } + return args.Get(0).(BucketHandleInterface) +} + +// MockBucketHandle is a mock implementation of BucketHandleInterface +type MockBucketHandle struct { + mock.Mock +} + +// Objects mocks the Objects method of BucketHandleInterface +func (m *MockBucketHandle) Objects(ctx context.Context, query *storage.Query) ObjectIteratorInterface { + args := m.Called(ctx, query) + if args.Get(0) == nil { + return nil + } + return args.Get(0).(ObjectIteratorInterface) +} + +// Object mocks the Object method of BucketHandleInterface +func (m *MockBucketHandle) Object(name string) ObjectHandleInterface { + args := m.Called(name) + if args.Get(0) == nil { + return nil + } + return args.Get(0).(ObjectHandleInterface) +} + +// MockObjectIterator is a mock implementation of ObjectIteratorInterface +type MockObjectIterator struct { + mock.Mock + Objects []*storage.ObjectAttrs +} + +// Next mocks the Next method of ObjectIteratorInterface +func (m *MockObjectIterator) Next() (*storage.ObjectAttrs, error) { + if len(m.Objects) == 0 { + return nil, iterator.Done + } + obj := m.Objects[0] + m.Objects = m.Objects[1:] + return obj, nil +} + +// MockObjectHandle is a mock implementation of ObjectHandleInterface +type MockObjectHandle struct { + mock.Mock +} + +// NewReader mocks the NewReader method of ObjectHandleInterface +func (m *MockObjectHandle) NewReader(ctx context.Context) (io.ReadCloser, error) { + args := m.Called(ctx) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(io.ReadCloser), args.Error(1) +} + +// MockReader is a mock implementation of io.ReadCloser +type MockReader struct { + mock.Mock +} + +// Read mocks the Read method of io.Reader +func (m *MockReader) Read(p []byte) (n int, err error) { + args := m.Called(p) + return args.Int(0), args.Error(1) +} + +// Close mocks the Close method of io.Closer +func (m *MockReader) Close() error { + args := m.Called() + return args.Error(0) +} + +// TestGetAppsList tests the GetAppsList method of GCSClient +func TestGetAppsList(t *testing.T) { + // Create a mock GCS client + mockClient := new(MockGCSClientInterface) + mockBucket := new(MockBucketHandle) + mockIterator := new(MockObjectIterator) + + // Setup mock objects + mockObjects := []*storage.ObjectAttrs{ + { + Name: "test-prefix/app1", + Etag: "etag1", + Updated: time.Now(), + Size: 1024, + StorageClass: "STANDARD", + }, + { + Name: "test-prefix/app2", + Etag: "etag2", + Updated: time.Now(), + Size: 2048, + StorageClass: "STANDARD", + }, + } + mockIterator.Objects = mockObjects + + // No need to set expectation on Bucket since it's not called + // mockClient.On("Bucket", "test-bucket").Return(mockBucket) + + // Mock the Objects method to return the custom MockObjectIterator + mockBucket.On("Objects", mock.Anything, mock.Anything).Return(mockIterator) + + // Create the GCSClient with the mock client + gcsClient := &GCSClient{ + BucketName: "test-bucket", + Prefix: "test-prefix/", + StartAfter: "test-prefix/app1", + Client: mockClient, + BucketHandle: mockBucket, // Set the mocked bucket handle + } + + // Call the GetAppsList method + resp, err := gcsClient.GetAppsList(context.Background()) + + // Assertions + assert.NoError(t, err) + assert.Equal(t, 1, len(resp.Objects)) // Only app2 should be returned due to StartAfter logic + assert.Equal(t, "test-prefix/app2", *resp.Objects[0].Key) + assert.Equal(t, int64(2048), *resp.Objects[0].Size) + assert.Equal(t, "etag2", *resp.Objects[0].Etag) + + // Verify expectations + mockBucket.AssertExpectations(t) +} + +// TestDownloadApp tests the DownloadApp method of GCSClient +func TestDownloadApp(t *testing.T) { + // Create a mock GCS client + mockClient := new(MockGCSClientInterface) + mockBucket := new(MockBucketHandle) + mockObject := new(MockObjectHandle) + mockReader := new(MockReader) + + // No need to set expectation on Bucket since it's not called + // mockClient.On("Bucket", "test-bucket").Return(mockBucket) + + // Mock the Object method to return the mock ObjectHandle + mockBucket.On("Object", "remote-file").Return(mockObject) + + // Mock the NewReader method to return the mock Reader + mockObject.On("NewReader", mock.Anything).Return(mockReader, nil) + + // Simulate reading from the mock Reader + mockReader.On("Read", mock.AnythingOfType("[]uint8")).Return(0, io.EOF) + mockReader.On("Close").Return(nil) + + // Create a temporary file to simulate local file + tmpFile, err := os.CreateTemp("", "testfile") + assert.NoError(t, err) + defer os.Remove(tmpFile.Name()) + + // Create the GCSClient with the mock client + gcsClient := &GCSClient{ + BucketName: "test-bucket", + Client: mockClient, + BucketHandle: mockBucket, // Set the mocked bucket handle + } + + // Prepare download request + downloadRequest := RemoteDataDownloadRequest{ + RemoteFile: "remote-file", + LocalFile: tmpFile.Name(), + Etag: "etag", + } + + // Call the DownloadApp method + success, err := gcsClient.DownloadApp(context.Background(), downloadRequest) + + // Assertions + assert.NoError(t, err) + assert.True(t, success) + + // Verify expectations + mockBucket.AssertExpectations(t) + mockObject.AssertExpectations(t) + mockReader.AssertExpectations(t) +} + +// TestDownloadAppError tests the DownloadApp method of GCSClient for error case +func TestDownloadAppError(t *testing.T) { + // Create a mock GCS client + mockClient := new(MockGCSClientInterface) + mockBucket := new(MockBucketHandle) + mockObject := new(MockObjectHandle) + + // No need to set expectation on Bucket since it's not called + // mockClient.On("Bucket", "test-bucket").Return(mockBucket) + + // Mock the Object method to return the mock ObjectHandle + mockBucket.On("Object", "remote-file").Return(mockObject) + + // Mock the NewReader method to return an error + mockObject.On("NewReader", mock.Anything).Return(nil, errors.New("failed to create reader")) + + // Create the GCSClient with the mock client + gcsClient := &GCSClient{ + BucketName: "test-bucket", + Client: mockClient, + BucketHandle: mockBucket, // Set the mocked bucket handle + } + + // Prepare download request + downloadRequest := RemoteDataDownloadRequest{ + RemoteFile: "remote-file", + LocalFile: "testfile", + Etag: "etag", + } + + // Call the DownloadApp method + success, err := gcsClient.DownloadApp(context.Background(), downloadRequest) + + // Assertions + assert.Error(t, err) + assert.False(t, success) + + // Verify expectations + mockBucket.AssertExpectations(t) + mockObject.AssertExpectations(t) +} diff --git a/pkg/splunk/client/remotedataclient.go b/pkg/splunk/client/remotedataclient.go index 7e3cbecd7..3120622ab 100644 --- a/pkg/splunk/client/remotedataclient.go +++ b/pkg/splunk/client/remotedataclient.go @@ -122,6 +122,8 @@ func RegisterRemoteDataClient(ctx context.Context, provider string) { RegisterMinioClient() case "azure": RegisterAzureBlobClient() + case "gcp": + RegisterGCSClient() default: scopedLog.Error(nil, "Invalid provider specified", "provider", provider) } diff --git a/pkg/splunk/client/util.go b/pkg/splunk/client/util.go index d53cd1f91..c8cadb58c 100644 --- a/pkg/splunk/client/util.go +++ b/pkg/splunk/client/util.go @@ -92,10 +92,8 @@ func NewMockAzureBlobClient(ctx context.Context, bucketName string, storageAccou return &AzureBlobClient{ BucketName: bucketName, StorageAccountName: storageAccountName, - SecretAccessKey: secretAccessKey, Prefix: prefix, Endpoint: endpoint, - HTTPClient: cl.(*spltest.MockHTTPClient), }, nil } diff --git a/pkg/splunk/common/names.go b/pkg/splunk/common/names.go index bdfd92e6d..32e892b96 100644 --- a/pkg/splunk/common/names.go +++ b/pkg/splunk/common/names.go @@ -17,6 +17,8 @@ package common import "fmt" +type contextKey string + const ( // namespace scoped secret name namespaceScopedSecretNameTemplateStr = "splunk-%s-secret" @@ -114,6 +116,8 @@ const ( // sgontla: ToDo: being a constant will be a blocker for the UT to pass. relaxing a bit. Find a better alternative var AppDownloadVolume string = "/opt/splunk/appframework/" +var EventPublisherKey contextKey = "eventPublisher" + // GetVersionedSecretName returns a versioned secret name func GetVersionedSecretName(versionedSecretIdentifier string, version string) string { return fmt.Sprintf(versionedSecretNameTemplateStr, versionedSecretIdentifier, version) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 92300bf47..78a5539c0 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -50,7 +50,9 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("ApplyClusterManager") eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) cr.Kind = "ClusterManager" + if cr.Status.ResourceRevMap == nil { cr.Status.ResourceRevMap = make(map[string]string) } @@ -351,7 +353,6 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr * // Reconciler can be called for multiple reasons. If we are waiting on configMap update to happen, // do not increment the Retry Count unless the last check was 5 seconds ago. // This helps, to wait for the required time - //eventPublisher, _ := newK8EventPublisher(c, cr) currentEpoch := time.Now().Unix() if cr.Status.BundlePushTracker.LastCheckInterval+5 > currentEpoch { @@ -386,7 +387,6 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr * cr.Status.BundlePushTracker.NeedToPushManagerApps = false } - //eventPublisher.Warning(ctx, "BundlePush", fmt.Sprintf("Bundle push failed %s", err.Error())) return err } diff --git a/pkg/splunk/enterprise/clustermaster.go b/pkg/splunk/enterprise/clustermaster.go index 7bb699f83..fc9602c1b 100644 --- a/pkg/splunk/enterprise/clustermaster.go +++ b/pkg/splunk/enterprise/clustermaster.go @@ -334,7 +334,6 @@ func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient, // Reconciler can be called for multiple reasons. If we are waiting on configMap update to happen, // do not increment the Retry Count unless the last check was 5 seconds ago. // This helps, to wait for the required time - //eventPublisher, _ := newK8EventPublisher(c, cr) currentEpoch := time.Now().Unix() if cr.Status.BundlePushTracker.LastCheckInterval+5 > currentEpoch { @@ -369,7 +368,6 @@ func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient, cr.Status.BundlePushTracker.NeedToPushMasterApps = false } - //eventPublisher.Warning(ctx, "BundlePush", fmt.Sprintf("Bundle push failed %s", err.Error())) return err } diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go index 42dcd34bb..bfeaa1d6b 100644 --- a/pkg/splunk/enterprise/configuration.go +++ b/pkg/splunk/enterprise/configuration.go @@ -1633,15 +1633,15 @@ func validateRemoteVolumeSpec(ctx context.Context, volList []enterpriseApi.Volum // For now, Smartstore supports only S3, which is by default. if isAppFramework { if !isValidStorageType(volume.Type) { - return fmt.Errorf("storageType '%s' is invalid. Valid values are 's3' and 'blob'", volume.Type) + return fmt.Errorf("storageType '%s' is invalid. Valid values are 's3', 'gcs' and 'blob'", volume.Type) } if !isValidProvider(volume.Provider) { - return fmt.Errorf("provider '%s' is invalid. Valid values are 'aws', 'minio' and 'azure'", volume.Provider) + return fmt.Errorf("provider '%s' is invalid. Valid values are 'aws', 'minio', 'gcp' and 'azure'", volume.Provider) } if !isValidProviderForStorageType(volume.Type, volume.Provider) { - return fmt.Errorf("storageType '%s' cannot be used with provider '%s'. Valid combinations are (s3,aws), (s3,minio) and (blob,azure)", volume.Type, volume.Provider) + return fmt.Errorf("storageType '%s' cannot be used with provider '%s'. Valid combinations are (s3,aws), (s3,minio), (gcs,gcp) and (blob,azure)", volume.Type, volume.Provider) } } } @@ -1650,19 +1650,20 @@ func validateRemoteVolumeSpec(ctx context.Context, volList []enterpriseApi.Volum // isValidStorageType checks if the storage type specified is valid and supported func isValidStorageType(storage string) bool { - return storage != "" && (storage == "s3" || storage == "blob") + return storage != "" && (storage == "s3" || storage == "blob" || storage == "gcs") } // isValidProvider checks if the provider specified is valid and supported func isValidProvider(provider string) bool { - return provider != "" && (provider == "aws" || provider == "minio" || provider == "azure") + return provider != "" && (provider == "aws" || provider == "minio" || provider == "azure" || provider == "gcp") } // Valid provider for s3 are aws and minio // Valid provider for blob is azure func isValidProviderForStorageType(storageType string, provider string) bool { return ((storageType == "s3" && (provider == "aws" || provider == "minio")) || - (storageType == "blob" && provider == "azure")) + (storageType == "blob" && provider == "azure") || + (storageType == "gcs" && provider == "gcp")) } // validateSplunkIndexesSpec validates the smartstore index spec diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index dc9d5959f..01dbcd260 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -969,14 +969,14 @@ func TestValidateAppFrameworkSpec(t *testing.T) { // Invalid remote volume type should return error. AppFramework.VolList[0].Type = "s4" err = ValidateAppFrameworkSpec(ctx, &AppFramework, &appFrameworkContext, false, "") - if err == nil || !strings.Contains(err.Error(), "storageType 's4' is invalid. Valid values are 's3' and 'blob'") { + if err == nil || !strings.Contains(err.Error(), "storageType 's4' is invalid. Valid values are 's3', 'gcs' and 'blob'") { t.Errorf("ValidateAppFrameworkSpec with invalid remote volume type should have returned error.") } AppFramework.VolList[0].Type = "s3" AppFramework.VolList[0].Provider = "invalid-provider" err = ValidateAppFrameworkSpec(ctx, &AppFramework, &appFrameworkContext, false, "") - if err == nil || !strings.Contains(err.Error(), "provider 'invalid-provider' is invalid. Valid values are 'aws', 'minio' and 'azure'") { + if err == nil || !strings.Contains(err.Error(), "provider 'invalid-provider' is invalid. Valid values are 'aws', 'minio', 'gcp' and 'azure'") { t.Errorf("ValidateAppFrameworkSpec with invalid provider should have returned error.") } @@ -984,7 +984,7 @@ func TestValidateAppFrameworkSpec(t *testing.T) { AppFramework.VolList[0].Type = "s3" AppFramework.VolList[0].Provider = "azure" err = ValidateAppFrameworkSpec(ctx, &AppFramework, &appFrameworkContext, false, "") - if err == nil || !strings.Contains(err.Error(), "storageType 's3' cannot be used with provider 'azure'. Valid combinations are (s3,aws), (s3,minio) and (blob,azure)") { + if err == nil || !strings.Contains(err.Error(), "storageType 's3' cannot be used with provider 'azure'. Valid combinations are (s3,aws), (s3,minio), (gcs,gcp) and (blob,azure)") { t.Errorf("ValidateAppFrameworkSpec with s3 and azure combination should have returned error.") } @@ -1012,11 +1012,18 @@ func TestValidateAppFrameworkSpec(t *testing.T) { t.Errorf("ValidateAppFrameworkSpec with s3 and minio combination should not have returned error.") } + // Validate gcs and gcp are right combination + AppFramework.VolList[0].Type = "gcs" + AppFramework.VolList[0].Provider = "gcp" + err = ValidateAppFrameworkSpec(ctx, &AppFramework, &appFrameworkContext, false, "") + if err != nil { + t.Errorf("ValidateAppFrameworkSpec with gcs and gcp combination should not have returned error.") + } // Validate blob and aws are not right combination AppFramework.VolList[0].Type = "blob" AppFramework.VolList[0].Provider = "aws" err = ValidateAppFrameworkSpec(ctx, &AppFramework, &appFrameworkContext, false, "") - if err == nil || !strings.Contains(err.Error(), "storageType 'blob' cannot be used with provider 'aws'. Valid combinations are (s3,aws), (s3,minio) and (blob,azure)") { + if err == nil || !strings.Contains(err.Error(), "storageType 'blob' cannot be used with provider 'aws'. Valid combinations are (s3,aws), (s3,minio), (gcs,gcp) and (blob,azure)") { t.Errorf("ValidateAppFrameworkSpec with blob and aws combination should have returned error.") } @@ -1024,7 +1031,7 @@ func TestValidateAppFrameworkSpec(t *testing.T) { AppFramework.VolList[0].Type = "blob" AppFramework.VolList[0].Provider = "minio" err = ValidateAppFrameworkSpec(ctx, &AppFramework, &appFrameworkContext, false, "") - if err == nil || !strings.Contains(err.Error(), "storageType 'blob' cannot be used with provider 'minio'. Valid combinations are (s3,aws), (s3,minio) and (blob,azure)") { + if err == nil || !strings.Contains(err.Error(), "storageType 'blob' cannot be used with provider 'minio'. Valid combinations are (s3,aws), (s3,minio), (gcs,gcp) and (blob,azure)") { t.Errorf("ValidateAppFrameworkSpec with blob and minio combination should have returned error.") } diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index b14944dcc..39ba7cca3 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -56,6 +56,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("ApplyIndexerClusterManager").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) cr.Kind = "IndexerCluster" var err error diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 1dfd4edd7..7d4c1c02e 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -46,6 +46,7 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("ApplyLicenseManager") eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) cr.Kind = "LicenseManager" var err error diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index f971a9902..baa835a0f 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -47,6 +47,7 @@ func ApplyLicenseMaster(ctx context.Context, client splcommon.ControllerClient, reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("ApplyLicenseMaster") eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) var err error // Initialize phase diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index a3d3c68ce..6eb8f779a 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -50,6 +50,7 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("ApplyMonitoringConsole") eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) cr.Kind = "MonitoringConsole" if cr.Status.ResourceRevMap == nil { diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index 26a0412a1..1c514e829 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -49,6 +49,8 @@ func ApplySearchHeadCluster(ctx context.Context, client splcommon.ControllerClie reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("ApplySearchHeadCluster") eventPublisher, _ := newK8EventPublisher(client, cr) + + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) cr.Kind = "SearchHeadCluster" var err error @@ -590,7 +592,6 @@ func (mgr *searchHeadClusterPodManager) updateStatus(ctx context.Context, statef } gotCaptainInfo := false for n := int32(0); n < statefulSet.Status.Replicas; n++ { - //c := mgr.getClient(ctx, n) memberName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), n) memberStatus := enterpriseApi.SearchHeadClusterMemberStatus{Name: memberName} memberInfo, err := GetSearchHeadClusterMemberInfo(ctx, mgr, n) @@ -615,6 +616,7 @@ func (mgr *searchHeadClusterPodManager) updateStatus(ctx context.Context, statef mgr.cr.Status.MaintenanceMode = captainInfo.MaintenanceMode gotCaptainInfo = true } else { + mgr.cr.Status.CaptainReady = false mgr.log.Error(err, "Unable to retrieve captain info", "memberName", memberName) } } diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go index ecd769d94..7c0d280e4 100644 --- a/pkg/splunk/enterprise/standalone.go +++ b/pkg/splunk/enterprise/standalone.go @@ -49,6 +49,7 @@ func ApplyStandalone(ctx context.Context, client splcommon.ControllerClient, cr cr.Status.ResourceRevMap = make(map[string]string) } eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) cr.Kind = "Standalone" var err error diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 72348f6ef..ddea34bde 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -140,6 +140,56 @@ ClusterManager: if clusterManager.Status.Phase != enterpriseApi.PhaseReady || cmImage != spec.Image { return false, nil } + goto IndexerCluster + } + +IndexerCluster: + if cr.GroupVersionKind().Kind == "IndexerCluster" { + + // if manager client is not defined, then assign current client + if mgr.c == nil { + mgr.c = c + } + + // check cluster info call using splunk rest api + clusterInfo, err := GetClusterInfoCall(ctx, mgr, false) + if err != nil { + return false, fmt.Errorf("could not get cluster info from cluster manager") + } + // check if cluster is multisite + if clusterInfo.MultiSite == "true" { + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + indexerList, err := getIndexerClusterList(ctx, c, cr, opts) + if err != nil { + return false, err + } + // get sorted current indexer site list + sortedList, _ := getIndexerClusterSortedSiteList(ctx, c, spec.ClusterManagerRef, indexerList) + + preIdx := enterpriseApi.IndexerCluster{} + + for i, v := range sortedList.Items { + if &v == cr { + if i > 0 { + preIdx = sortedList.Items[i-1] + } + break + + } + } + if len(preIdx.Name) != 0 { + // check if previous indexer have completed before starting next one + image, _ := getCurrentImage(ctx, c, &preIdx, SplunkIndexer) + if preIdx.Status.Phase != enterpriseApi.PhaseReady || image != spec.Image { + return false, nil + } + } + + } + return true, nil + } else { goto SearchHeadCluster } SearchHeadCluster: @@ -173,12 +223,12 @@ SearchHeadCluster: searchHeadList, err := getSearchHeadClusterList(ctx, c, cr, opts) if err != nil { if err.Error() == "NotFound" { - goto IndexerCluster + goto MonitoringConsole } return false, err } if len(searchHeadList.Items) == 0 { - goto IndexerCluster + goto MonitoringConsole } // check if instance has the ClusterManagerRef defined @@ -189,7 +239,7 @@ SearchHeadCluster: } } if len(searchHeadClusterInstance.GetName()) == 0 { - goto IndexerCluster + goto MonitoringConsole } shcImage, err := getCurrentImage(ctx, c, &searchHeadClusterInstance, SplunkSearchHead) @@ -204,53 +254,6 @@ SearchHeadCluster: if searchHeadClusterInstance.Status.Phase != enterpriseApi.PhaseReady || shcImage != spec.Image { return false, nil } - goto IndexerCluster - } -IndexerCluster: - if cr.GroupVersionKind().Kind == "IndexerCluster" { - - // if manager client is not defined, then assign current client - if mgr.c == nil { - mgr.c = c - } - - // check cluster info call using splunk rest api - clusterInfo, err := GetClusterInfoCall(ctx, mgr, false) - if err != nil { - return false, fmt.Errorf("could not get cluster info from cluster manager") - } - // check if cluster is multisite - if clusterInfo.MultiSite == "true" { - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), - } - indexerList, err := getIndexerClusterList(ctx, c, cr, opts) - if err != nil { - return false, err - } - // get sorted current indexer site list - sortedList, _ := getIndexerClusterSortedSiteList(ctx, c, spec.ClusterManagerRef, indexerList) - - preIdx := enterpriseApi.IndexerCluster{} - - for i, v := range sortedList.Items { - if &v == cr { - if i > 0 { - preIdx = sortedList.Items[i-1] - } - break - - } - } - if len(preIdx.Name) != 0 { - // check if previous indexer have completed before starting next one - image, _ := getCurrentImage(ctx, c, &preIdx, SplunkIndexer) - if preIdx.Status.Phase != enterpriseApi.PhaseReady || image != spec.Image { - return false, nil - } - } - - } goto MonitoringConsole } MonitoringConsole: @@ -339,5 +342,4 @@ MonitoringConsole: } EndLabel: return true, nil - } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 4a70f0d6b..8a98882ff 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -159,6 +159,9 @@ func GetRemoteStorageClient(ctx context.Context, client splcommon.ControllerClie if vol.Provider == "azure" { accessKeyID = string(remoteDataClientSecret.Data["azure_sa_name"]) secretAccessKey = string(remoteDataClientSecret.Data["azure_sa_secret_key"]) + } else if vol.Provider == "gcp" { + accessKeyID = "key.json" + secretAccessKey = string(remoteDataClientSecret.Data[accessKeyID]) } else { accessKeyID = string(remoteDataClientSecret.Data["s3_access_key"]) secretAccessKey = string(remoteDataClientSecret.Data["s3_secret_key"]) diff --git a/test/README.md b/test/README.md index 23fbb7e40..d01a8a331 100644 --- a/test/README.md +++ b/test/README.md @@ -79,6 +79,11 @@ STORAGE_ACCOUNT STORAGE_ACCOUNT_KEY CLUSTER_PROVIDER=[azure] +For Azure: +GCP_SERVICE_ACCOUNT_KEY +CLUSTER_PROVIDER=[gcp] +ECR_REGISTRY + ## Writing tests diff --git a/test/appframework_aws/m4/appframework_aws_test.go b/test/appframework_aws/m4/appframework_aws_test.go index c7c6c6e8d..342c109aa 100644 --- a/test/appframework_aws/m4/appframework_aws_test.go +++ b/test/appframework_aws/m4/appframework_aws_test.go @@ -155,7 +155,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -245,7 +245,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -373,7 +373,7 @@ var _ = Describe("m4appfw test", func() { // Upload V2 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -446,7 +446,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -550,7 +550,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -816,7 +816,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -877,7 +877,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -986,7 +986,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -1052,7 +1052,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -1207,7 +1207,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -1267,7 +1267,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -1882,7 +1882,7 @@ var _ = Describe("m4appfw test", func() { appVersion := "V1" testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -1974,7 +1974,7 @@ var _ = Describe("m4appfw test", func() { appVersion := "V1" testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2064,7 +2064,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2181,7 +2181,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2292,7 +2292,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -2367,7 +2367,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2399,7 +2399,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -2484,7 +2484,7 @@ var _ = Describe("m4appfw test", func() { testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) s3TestDirIdxc := "m4appfw-idxc-" + testenv.RandomDNSName(4) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirPVTestApps) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload apps to S3 for Search Head Cluster @@ -2653,7 +2653,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster diff --git a/test/appframework_aws/m4/manager_appframework_test.go b/test/appframework_aws/m4/manager_appframework_test.go index 5310000cb..9671e745d 100644 --- a/test/appframework_aws/m4/manager_appframework_test.go +++ b/test/appframework_aws/m4/manager_appframework_test.go @@ -154,7 +154,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -244,7 +244,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -372,7 +372,7 @@ var _ = Describe("m4appfw test", func() { // Upload V2 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -445,7 +445,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -549,7 +549,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -815,7 +815,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -876,7 +876,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -985,7 +985,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -1051,7 +1051,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -1206,7 +1206,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -1266,7 +1266,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -1881,7 +1881,7 @@ var _ = Describe("m4appfw test", func() { appVersion := "V1" testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -1973,7 +1973,7 @@ var _ = Describe("m4appfw test", func() { appVersion := "V1" testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2063,7 +2063,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2180,7 +2180,7 @@ var _ = Describe("m4appfw test", func() { appFileList := testenv.GetAppFileList(appListV1) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2291,7 +2291,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -2366,7 +2366,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster @@ -2398,7 +2398,7 @@ var _ = Describe("m4appfw test", func() { appFileList = testenv.GetAppFileList(appListV2) testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV2) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V2 apps to S3 for Search Head Cluster @@ -2483,7 +2483,7 @@ var _ = Describe("m4appfw test", func() { testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) s3TestDirIdxc := "m4appfw-idxc-" + testenv.RandomDNSName(4) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirPVTestApps) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload apps to S3 for Search Head Cluster @@ -2652,7 +2652,7 @@ var _ = Describe("m4appfw test", func() { // Upload V1 apps to S3 for Indexer Cluster testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3 for Indexer Cluster", appVersion)) uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDirIdxc, appFileList, downloadDirV1) - Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster", appVersion)) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3 test directory for Indexer Cluster %s", appVersion, testS3Bucket)) uploadedApps = append(uploadedApps, uploadedFiles...) // Upload V1 apps to S3 for Search Head Cluster diff --git a/test/appframework_gcp/c3/appframework_gcs_suite_test.go b/test/appframework_gcp/c3/appframework_gcs_suite_test.go new file mode 100644 index 000000000..9aa061bad --- /dev/null +++ b/test/appframework_gcp/c3/appframework_gcs_suite_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package c3gcpappfw + +import ( + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/splunk/splunk-operator/test/testenv" +) + +const ( + // PollInterval specifies the polling interval + PollInterval = 5 * time.Second + + // ConsistentPollInterval is the interval to use to consistently check a state is stable + ConsistentPollInterval = 200 * time.Millisecond + ConsistentDuration = 2000 * time.Millisecond +) + +var ( + testenvInstance *testenv.TestEnv + testSuiteName = "c3appfw-" + testenv.RandomDNSName(3) + appListV1 []string + appListV2 []string + testDataGcsBucket = os.Getenv("TEST_BUCKET") + testGcsBucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + gcsAppDirV1 = testenv.AppLocationV1 + gcsAppDirV2 = testenv.AppLocationV2 + gcsPVTestApps = testenv.PVTestAppsLocation + currDir, _ = os.Getwd() + downloadDirV1 = filepath.Join(currDir, "c3appfwV1-"+testenv.RandomDNSName(4)) + downloadDirV2 = filepath.Join(currDir, "c3appfwV2-"+testenv.RandomDNSName(4)) + downloadDirPVTestApps = filepath.Join(currDir, "c3appfwPVTestApps-"+testenv.RandomDNSName(4)) +) + +// TestBasic is the main entry point +func TestBasic(t *testing.T) { + + RegisterFailHandler(Fail) + + RunSpecs(t, "Running "+testSuiteName) +} + +var _ = BeforeSuite(func() { + var err error + testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) + Expect(err).ToNot(HaveOccurred()) + + if testenv.ClusterProvider == "gcp" { + // Create a list of apps to upload to Gcs + appListV1 = testenv.BasicApps + appFileList := testenv.GetAppFileList(appListV1) + + // Download V1 Apps from Gcs + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") + + // Create a list of apps to upload to Gcs after poll period + appListV2 = append(appListV1, testenv.NewAppsAddedBetweenPolls...) + appFileList = testenv.GetAppFileList(appListV2) + + // Download V2 Apps from Gcs + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download V2 app files") + } else { + testenvInstance.Log.Info("Skipping Before Suite Setup", "Cluster Provider", testenv.ClusterProvider) + } + +}) + +var _ = AfterSuite(func() { + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + // Delete locally downloaded app files + err := os.RemoveAll(downloadDirV1) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files.") + err = os.RemoveAll(downloadDirV2) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V2 app files.") +}) diff --git a/test/appframework_gcp/c3/appframework_gcs_test.go b/test/appframework_gcp/c3/appframework_gcs_test.go new file mode 100644 index 000000000..a56a97247 --- /dev/null +++ b/test/appframework_gcp/c3/appframework_gcs_test.go @@ -0,0 +1,721 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.s +package c3gcpappfw + +import ( + "context" + //"encoding/json" + "fmt" + "path/filepath" + //"strings" + //"time" + + //enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + //splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + testenv "github.com/splunk/splunk-operator/test/testenv" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("c3appfw test", func() { + + var testcaseEnvInst *testenv.TestCaseEnv + + var deployment *testenv.Deployment + var gcsTestDirShc string + var gcsTestDirIdxc string + //var gcsTestDirShcLocal string + //var gcsTestDirIdxcLocal string + //var gcsTestDirShcCluster string + //var gcsTestDirIdxcCluster string + var appSourceNameIdxc string + var appSourceNameShc string + var uploadedApps []string + var filePresentOnOperator bool + + ctx := context.TODO() + + BeforeEach(func() { + + var err error + name := fmt.Sprintf("%s-%s", "master"+testenvInstance.GetName(), testenv.RandomDNSName(3)) + testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) + Expect(err).To(Succeed(), "Unable to create testcaseenv") + testenv.SpecifiedTestTimeout = 5000 + deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testcaseEnvInst.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + + if testcaseEnvInst != nil { + Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred()) + } + + // Delete files uploaded to GCS + if !testcaseEnvInst.SkipTeardown { + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + } + + if filePresentOnOperator { + //Delete files from app-directory + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(testenv.AppDownloadVolume, "test_file.img") + testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + } + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It(" c3gcp, masterappframeworkc3gcp, appframeworkgcp, c3_gcp_sanity: can deploy a C3 SVA with App Framework enabled, install apps then upgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V1 apps to GCS for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Master and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### UPGRADE APPS ################ + * Upload V2 apps on GCS + * Wait for Monitoring Console and C3 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Upload V1 apps to GCS for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to GCS for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Bucket for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCS for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Verify no SH in disconnected status is present on CM + testenv.VerifyNoDisconnectedSHPresentOnCM(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + //######### INITIAL VERIFICATIONS ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete apps on GCS + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCS", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + resourceVersion = testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V2 apps to GCS for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCS for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCS for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ FINAL VERIFICATIONS ############ + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, ClusterMasterBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) with App Framework", func() { + It(" c3gcp, masterappframeworkc3gcp, appframeworkgcp, c3_gcp_sanity: can deploy a C3 SVA with App Framework enabled, install apps then downgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V2 apps to GCS for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V2 apps to GCS for Indexer Cluster and Search Head Cluster + * Create app source for Cluster Master and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ########### INITIAL VERIFICATIONS ########### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied, installed on Monitoring Console and also on Search Heads and Indexers pods + ############## DOWNGRADE APPS ############### + * Upload V1 apps on GCS + * Wait for Monitoring Console and C3 pods to be ready + ########### FINAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and downgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Upload V2 apps to GCS for Monitoring Console + appVersion := "V2" + appFileList := testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + // Monitoring Console AppFramework Spec + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V2 apps to GCS for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCS for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc := "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc := "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########### INITIAL VERIFICATIONS ########### + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############## DOWNGRADE APPS ############### + // Delete apps on GCS + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCS", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + resourceVersion = testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V1 apps to GCS for Indexer Cluster + appVersion = "V1" + appFileList = testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Indexers", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexers", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCS for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCS for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########### FINAL VERIFICATIONS ############# + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV1 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV1 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV1 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, ClusterMasterBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It(" c3gcp, masterappframeworkc3gcp, appframeworkgcp, c3_gcp_sanity: can deploy a C3 SVA and have apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS + * Create app source with local scope for C3 SVA (Cluster Master and Deployer) + * Prepare and deploy C3 CRD with app framework and wait for pods to be ready + ############# INITIAL VERIFICATIONS ########## + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Master and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ########### UPGRADE VERIFICATIONS ########### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Master and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCS for Indexer Cluster + appVersion := "V1" + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCS for Search Head Cluster + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + indexerReplicas := 3 + shReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCS + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCS", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCS + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS", appVersion)) + appFileList = testenv.GetAppFileList(appListV2) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########### UPGRADE VERIFICATIONS ########### + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It(" c3gcp, masterappframeworkc3gcp, appframeworkgcp, c3_gcp_sanity: can deploy a C3 SVA with App Framework enabled and check isDeploymentInProgressFlag for CM and SHC CR's", func() { + + /* + Test Steps + ################## SETUP ################## + * Upload V1 apps to GCS for Indexer Cluster and Search Head Cluster + * Prepare and deploy C3 CRD with app framework + * Verify IsDeploymentInProgress is set + * Wait for the pods to be ready + */ + + //################## SETUP #################### + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + + // Upload V1 apps to GCS for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCS for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify IsDeploymentInProgress Flag is set to true for Cluster Master CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind) + + // Ensure Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Verify IsDeploymentInProgress Flag is set to true for SHC CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, shc.Name, shc.Kind) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + }) + }) +}) diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go new file mode 100644 index 000000000..b1b518198 --- /dev/null +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -0,0 +1,3410 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.s +package c3gcpappfw + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + testenv "github.com/splunk/splunk-operator/test/testenv" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("c3appfw test", func() { + + var testcaseEnvInst *testenv.TestCaseEnv + + var deployment *testenv.Deployment + var gcsTestDirShc string + var gcsTestDirIdxc string + var gcsTestDirShcLocal string + var gcsTestDirIdxcLocal string + var gcsTestDirShcCluster string + var gcsTestDirIdxcCluster string + var appSourceNameIdxc string + var appSourceNameShc string + var uploadedApps []string + var filePresentOnOperator bool + + ctx := context.TODO() + + BeforeEach(func() { + + var err error + name := fmt.Sprintf("%s-%s", testenvInstance.GetName(), testenv.RandomDNSName(3)) + testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) + Expect(err).To(Succeed(), "Unable to create testcaseenv") + deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + testenv.SpecifiedTestTimeout = 100000 + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testcaseEnvInst.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + + if testcaseEnvInst != nil { + Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred()) + } + + // Delete files uploaded to Gcs + if !testcaseEnvInst.SkipTeardown { + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + } + + if filePresentOnOperator { + //Delete files from app-directory + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(testenv.AppDownloadVolume, "test_file.img") + testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + } + }) + + XContext("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It(" c3gcp, managerappframeworkc3gcp, appframeworkgcp, c3_mgr_gcp_sanity: can deploy a C3 SVA with App Framework enabled, install apps then upgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### UPGRADE APPS ################ + * Upload V2 apps on Gcs + * Wait for Monitoring Console and C3 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Upload V1 apps to Gcs for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Verify no SH in disconnected status is present on CM + testenv.VerifyNoDisconnectedSHPresentOnCM(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + //######### INITIAL VERIFICATIONS ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + resourceVersion = testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V2 apps to Gcs for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to Gcs for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ FINAL VERIFICATIONS ############ + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + XContext("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework and Image Upgrade", func() { + It(" c3gcp, managerappframeworkc3gcpt, appframeworkgcp, c3_mgr_gcp_sanity: can deploy a C3 SVA with App Framework enabled, install apps then upgrade the image and apps", func() { + + //################## SETUP #################### + + // Download License File + downloadDir := "licenseFolder" + switch testenv.ClusterProvider { + case "eks": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from Gcs") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "azure": + licenseFilePath, err := testenv.DownloadLicenseFromAzure(ctx, downloadDir) + Expect(err).To(Succeed(), "Unable to download license file from Azure") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + default: + fmt.Printf("Unable to download license file") + testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) + } + + // Upload V1 apps to Gcs for Monitoring Console + oldImage := "Refer to RELATED_SPLUNK_IMAGE_ENTERPRISE" + newImage := "splunk/splunk:latest" + + lm, err := deployment.DeployLicenseManager(ctx, deployment.GetName()) + cm, err := deployment.DeployClusterManager(ctx, deployment.GetName(), lm.GetName(), "", "") + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: cm.GetName(), + }, + }, + } + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + shcName := fmt.Sprintf("%s-shc", deployment.GetName()) + idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) + shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "") + + // Wait for License Manager to be in READY phase + testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Monitoring Console goes to Ready phase + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // // Verify no SH in disconnected status is present on CM + testenv.VerifyNoDisconnectedSHPresentOnCM(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE IMAGE ################ + + // Update LM Image + + testcaseEnvInst.Log.Info("Upgrading the License Manager Image", "Current Image", oldImage, "New Image", newImage) + lm.Spec.Image = newImage + err = deployment.UpdateCR(ctx, lm) + Expect(err).To(Succeed(), "Failed upgrade License Manager image") + + // Update CM image + + testcaseEnvInst.Log.Info("Upgrading the Cluster Manager Image", "Current Image", oldImage, "New Image", newImage) + cm.Spec.Image = newImage + err = deployment.UpdateCR(ctx, cm) + Expect(err).To(Succeed(), "Failed upgrade Cluster Manager image") + + // Update MC image + + testcaseEnvInst.Log.Info("Upgrading the Monitoring Console Image", "Current Image", oldImage, "New Image", newImage) + mc.Spec.Image = newImage + err = deployment.UpdateCR(ctx, mc) + Expect(err).To(Succeed(), "Failed upgrade Monitoring Console image") + + // Update SHC image + + testcaseEnvInst.Log.Info("Upgrading the Search Head Cluster Image", "Current Image", oldImage, "New Image", newImage) + shc.Spec.Image = newImage + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed upgrade Search Head Cluster image") + + // // Update IDXC image + + testcaseEnvInst.Log.Info("Upgrading the Indexer Cluster Image", "Current Image", oldImage, "New Image", newImage) + idxc.Spec.Image = newImage + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed upgrade Indexer Cluster image") + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Wait for License Manager to be in READY phase + testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) + + // // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) with App Framework", func() { + It(" c3gcp, managerappframeworkc3gcp, appframeworkgcp, c3_mgr_gcp_sanity: can deploy a C3 SVA with App Framework enabled, install apps then downgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V2 apps to Gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V2 apps to Gcs for Indexer Cluster and Search Head Cluster + * Create app source for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ########### INITIAL VERIFICATIONS ########### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied, installed on Monitoring Console and also on Search Heads and Indexers pods + ############## DOWNGRADE APPS ############### + * Upload V1 apps on Gcs + * Wait for Monitoring Console and C3 pods to be ready + ########### FINAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and downgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Upload V2 apps to Gcs for Monitoring Console + appVersion := "V2" + appFileList := testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + // Monitoring Console AppFramework Spec + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V2 apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc := "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc := "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########### INITIAL VERIFICATIONS ########### + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############## DOWNGRADE APPS ############### + // Delete apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + resourceVersion = testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V1 apps to Gcs for Indexer Cluster + appVersion = "V1" + appFileList = testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexers", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexers", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########### FINAL VERIFICATIONS ############# + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV1 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV1 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV1 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) with App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled, install apps, scale up clusters, install apps on new pods, scale down", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps on Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app config and wait for pods to be ready + ########## INITIAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied, installed on Search Heads and Indexers + ############# SCALING UP ################### + * Scale up indexers and Search Heads + * Wait for C3 to be ready + ########## SCALING UP VERIFICATIONS ######### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are copied and installed on all Search Heads and Indexers pods + ############### SCALING DOWN ################ + * Scale down Indexers and Search Heads + * Wait for C3 to be ready + ######## SCALING DOWN VERIFICATIONS ######### + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are still copied and installed on all Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V1 apps to Gcs for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + appFileList := testenv.GetAppFileList(appListV1) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc := "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc := "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + + //########## INITIAL VERIFICATIONS ############ + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + time.Sleep(60 * time.Second) + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //Delete configMap Object + err = testenv.DeleteConfigMap(testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to delete ConfigMao", "ConfigMap name", ConfigMapName) + + //############# SCALING UP ################### + // Get instance of current Search Head Cluster CR with latest config + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale up Search Head Cluster + defaultSHReplicas := shc.Spec.Replicas + scaledSHReplicas := defaultSHReplicas + 1 + testcaseEnvInst.Log.Info("Scale up Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of Search Head Cluster + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to scale up Search Head Cluster") + + // Ensure Search Head Cluster scales up and go to ScalingUp phase + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingUp) + + // Get instance of current Indexer CR with latest config + idxcName := deployment.GetName() + "-idxc" + idxc := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + defaultIndexerReplicas := idxc.Spec.Replicas + scaledIndexerReplicas := defaultIndexerReplicas + 1 + testcaseEnvInst.Log.Info("Scale up Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed to scale up Indexer Cluster") + + // Ensure Indexer Cluster scales up and go to ScalingUp phase + testenv.VerifyIndexerClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingUp, idxcName) + + // Ensure Indexer Cluster go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify New Indexer On Cluster Manager + indexerName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), scaledIndexerReplicas-1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Checking for New Indexer %s On Cluster Manager", indexerName)) + Expect(testenv.CheckIndexerOnCM(ctx, deployment, indexerName)).To(Equal(true)) + + // Ingest data on Indexers + for i := 0; i < int(scaledIndexerReplicas); i++ { + podName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), i) + logFile := fmt.Sprintf("test-log-%s.log", testenv.RandomDNSName(3)) + testenv.CreateMockLogfile(logFile, 2000) + testenv.IngestFileViaMonitor(ctx, logFile, "main", podName, deployment) + } + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Search for data on newly added indexer + searchPod := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), scaledSHReplicas-1) + searchString := fmt.Sprintf("index=%s host=%s | stats count by host", "main", indexerName) + searchResultsResp, err := testenv.PerformSearchSync(ctx, searchPod, searchString, deployment) + Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", searchPod, searchString) + + // Verify result + searchResponse := strings.Split(searchResultsResp, "\n")[0] + var searchResults map[string]interface{} + jsonErr := json.Unmarshal([]byte(searchResponse), &searchResults) + Expect(jsonErr).To(Succeed(), "Failed to unmarshal JSON Search Results from response '%s'", searchResultsResp) + + testcaseEnvInst.Log.Info("Search results :", "searchResults", searchResults["result"]) + Expect(searchResults["result"]).ShouldNot(BeNil(), "No results in search response '%s' on pod %s", searchResults, searchPod) + + resultLine := searchResults["result"].(map[string]interface{}) + testcaseEnvInst.Log.Info("Sync Search results host count:", "count", resultLine["count"].(string), "host", resultLine["host"].(string)) + testHostname := strings.Compare(resultLine["host"].(string), indexerName) + Expect(testHostname).To(Equal(0), "Incorrect search result hostname. Expect: %s Got: %s", indexerName, resultLine["host"].(string)) + + //########## SCALING UP VERIFICATIONS ######### + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + // Verify no pods reset by checking the pod age + shcPodNames = []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + shcPodNames = append(shcPodNames, testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1)...) + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, shcPodNames) + + //############### SCALING DOWN ################ + // Get instance of current Search Head Cluster CR with latest config + shc = &enterpriseApi.SearchHeadCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale down Search Head Cluster + defaultSHReplicas = shc.Spec.Replicas + scaledSHReplicas = defaultSHReplicas - 1 + testcaseEnvInst.Log.Info("Scale down Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of Search Head Cluster + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to scale down Search Head Cluster") + + // Ensure Search Head Cluster scales down and go to ScalingDown phase + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingDown) + + // Get instance of current Indexer CR with latest config + err = deployment.GetInstance(ctx, idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + defaultIndexerReplicas = idxc.Spec.Replicas + scaledIndexerReplicas = defaultIndexerReplicas - 1 + testcaseEnvInst.Log.Info("Scaling down Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed to Scale down Indexer Cluster") + + // Ensure Indexer Cluster scales down and go to ScalingDown phase + testenv.VerifyIndexerClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingDown, idxcName) + + // Ensure Indexer Cluster go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Search for data from removed indexer + searchPod = fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), scaledSHReplicas-1) + searchString = fmt.Sprintf("index=%s host=%s | stats count by host", "main", indexerName) + searchResultsResp, err = testenv.PerformSearchSync(ctx, searchPod, searchString, deployment) + Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", searchPod, searchString) + + // Verify result + searchResponse = strings.Split(searchResultsResp, "\n")[0] + jsonErr = json.Unmarshal([]byte(searchResponse), &searchResults) + Expect(jsonErr).To(Succeed(), "Failed to unmarshal JSON Search Results from response '%s'", searchResultsResp) + + testcaseEnvInst.Log.Info("Search results :", "searchResults", searchResults["result"]) + Expect(searchResults["result"]).ShouldNot(BeNil(), "No results in search response '%s' on pod %s", searchResults, searchPod) + + resultLine = searchResults["result"].(map[string]interface{}) + testcaseEnvInst.Log.Info("Sync Search results host count:", "count", resultLine["count"].(string), "host", resultLine["host"].(string)) + testHostname = strings.Compare(resultLine["host"].(string), indexerName) + Expect(testHostname).To(Equal(0), "Incorrect search result hostname. Expect: %s Got: %s", indexerName, resultLine["host"].(string)) + + //######## SCALING DOWN VERIFICATIONS ######### + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, shcPodNames) + + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It(" c3gcp, managerappframeworkc3gcp, appframeworkgcp, c3_mgr_gcp_sanity: can deploy a C3 SVA and have apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs + * Create app source with local scope for C3 SVA (Cluster Manager and Deployer) + * Prepare and deploy C3 CRD with app framework and wait for pods to be ready + ############# INITIAL VERIFICATIONS ########## + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ########### UPGRADE VERIFICATIONS ########### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to Gcs for Indexer Cluster + appVersion := "V1" + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + indexerReplicas := 3 + shReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to Gcs + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs", appVersion)) + appFileList = testenv.GetAppFileList(appListV2) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########### UPGRADE VERIFICATIONS ########### + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("c3, integration, managerappframeworkc3, appframework: can deploy a C3 SVA with apps installed locally on Cluster Manager and Deployer, cluster-wide on Peers and Search Heads, then upgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Split Applist into clusterlist and local list + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster for local and cluster scope + * Create app sources for Cluster Manager and Deployer with local and cluster scope + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### UPGRADE APPS ################ + * Upload V2 apps on Gcs + * Wait for all C3 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Split Applist into 2 lists for local and cluster install + appVersion := "V1" + appListLocal := appListV1[len(appListV1)/2:] + appListCluster := appListV1[:len(appListV1)/2] + + // Upload appListLocal list of apps to Gcs (to be used for local install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + + gcsTestDirIdxcLocal = "c3appfw-" + testenv.RandomDNSName(4) + localappFileList := testenv.GetAppFileList(appListLocal) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install for Indexers", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + + gcsTestDirShcLocal = "c3appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to Gcs (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for cluster-wide install (cluster scope)", appVersion)) + + gcsTestDirIdxcCluster = "c3appfw-cluster-" + testenv.RandomDNSName(4) + clusterappFileList := testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to Gcs (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for cluster-wide install (cluster scope)", appVersion)) + + gcsTestDirShcCluster = "c3appfw-cluster-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameLocalIdxc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameLocalShc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameClusterIdxc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameClusterShc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcLocal := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcLocal := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcCluster := "appframework-test-volume-idxc-cluster-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcCluster := "appframework-test-volume-shc-cluster-" + testenv.RandomDNSName(3) + + // Create App framework Spec for Cluster manager with scope local and append cluster scope + + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalIdxc, gcsTestDirIdxcLocal, 60) + volumeSpecCluster := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameIdxcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + + appFrameworkSpecIdxc.VolList = append(appFrameworkSpecIdxc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameIdxcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterIdxc, gcsTestDirIdxcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecIdxc.AppSources = append(appFrameworkSpecIdxc.AppSources, appSourceSpecCluster...) + + // Create App framework Spec for Search head cluster with scope local and append cluster scope + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalShc, gcsTestDirShcLocal, 60) + volumeSpecCluster = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameShcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + appFrameworkSpecShc.VolList = append(appFrameworkSpecShc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec = enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameShcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster = []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterShc, gcsTestDirShcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecShc.AppSources = append(appFrameworkSpecShc.AppSources, appSourceSpecCluster...) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + testcaseEnvInst.Log.Info("Deploy Single site Indexer Cluster with both Local and Cluster scope for apps installation") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameLocalIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcLocal, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + cmAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameClusterIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcCluster, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Redefine app lists as LDAP app isn't in V1 apps + appListLocal = appListV1[len(appListV1)/2:] + appListCluster = appListV1[:len(appListV1)/2] + + // Upload appListLocal list of V2 apps to Gcs (to be used for local install) + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + localappFileList = testenv.GetAppFileList(appListLocal) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install for Indexers", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of V2 apps to Gcs (to be used for cluster-wide install) + clusterappFileList = testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameClusterIdxc, clusterappFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATION ############# + cmAppSourceInfoLocal.CrAppVersion = appVersion + cmAppSourceInfoLocal.CrAppList = appListLocal + cmAppSourceInfoLocal.CrAppFileList = localappFileList + cmAppSourceInfoCluster.CrAppVersion = appVersion + cmAppSourceInfoCluster.CrAppList = appListCluster + cmAppSourceInfoCluster.CrAppFileList = clusterappFileList + shcAppSourceInfoLocal.CrAppVersion = appVersion + shcAppSourceInfoLocal.CrAppList = appListLocal + shcAppSourceInfoLocal.CrAppFileList = localappFileList + shcAppSourceInfoCluster.CrAppVersion = appVersion + shcAppSourceInfoCluster.CrAppList = appListCluster + shcAppSourceInfoCluster.CrAppFileList = clusterappFileList + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("c3, integration, managerappframeworkc3, appframework: can deploy a C3 SVA with apps installed locally on Cluster Manager and Deployer, cluster-wide on Peers and Search Heads, then downgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Split Applist into clusterlist and local list + * Upload V2 apps to Gcs for Indexer Cluster and Search Head Cluster for local and cluster scope + * Create app sources for Cluster Manager and Deployer with local and cluster scope + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### Downgrade APPS ################ + * Upload V1 apps on Gcs + * Wait for all C3 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Split Applist into 2 lists for local and cluster install + appVersion := "V2" + appListLocal := appListV2[len(appListV2)/2:] + appListCluster := appListV2[:len(appListV2)/2] + + // Upload appListLocal list of apps to Gcs (to be used for local install) for Idxc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + gcsTestDirIdxcLocal = "c3appfw-" + testenv.RandomDNSName(4) + localappFileList := testenv.GetAppFileList(appListLocal) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListLocal list of apps to Gcs (to be used for local install) for Shc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + gcsTestDirShcLocal = "c3appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to Gcs (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for cluster-wide install (cluster scope)", appVersion)) + gcsTestDirIdxcCluster = "c3appfw-cluster-" + testenv.RandomDNSName(4) + clusterappFileList := testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to Gcs (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for cluster-wide install (cluster scope)", appVersion)) + gcsTestDirShcCluster = "c3appfw-cluster-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameLocalIdxc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameLocalShc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameClusterIdxc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameClusterShc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcLocal := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcLocal := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcCluster := "appframework-test-volume-idxc-cluster-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcCluster := "appframework-test-volume-shc-cluster-" + testenv.RandomDNSName(3) + + // Create App framework Spec for Cluster manager with scope local and append cluster scope + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalIdxc, gcsTestDirIdxcLocal, 60) + volumeSpecCluster := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameIdxcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + appFrameworkSpecIdxc.VolList = append(appFrameworkSpecIdxc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameIdxcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterIdxc, gcsTestDirIdxcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecIdxc.AppSources = append(appFrameworkSpecIdxc.AppSources, appSourceSpecCluster...) + + // Create App framework Spec for Search head cluster with scope local and append cluster scope + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalShc, gcsTestDirShcLocal, 60) + volumeSpecCluster = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameShcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + appFrameworkSpecShc.VolList = append(appFrameworkSpecShc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec = enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameShcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster = []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterShc, gcsTestDirShcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecShc.AppSources = append(appFrameworkSpecShc.AppSources, appSourceSpecCluster...) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + testcaseEnvInst.Log.Info("Deploy Single site Indexer Cluster with both Local and Cluster scope for apps installation") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameLocalIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcLocal, CrPod: cmPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + cmAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameClusterIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcCluster, CrPod: cmPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: "V2", CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + time.Sleep(2 * time.Minute) // FIXME adding sleep to see if verification succeedes + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############# DOWNGRADE APPS ################ + // Delete apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Redefine app lists as LDAP app isn't in V1 apps + appListLocal = appListV1[len(appListV1)/2:] + appListCluster = appListV1[:len(appListV1)/2] + + // Upload appListLocal list of V1 apps to Gcs (to be used for local install) + appVersion = "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + localappFileList = testenv.GetAppFileList(appListLocal) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of V2 apps to Gcs (to be used for cluster-wide install) + clusterappFileList = testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameClusterIdxc, clusterappFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## DOWNGRADE VERIFICATION ############# + cmAppSourceInfoLocal.CrAppVersion = appVersion + cmAppSourceInfoLocal.CrAppList = appListLocal + cmAppSourceInfoLocal.CrAppFileList = localappFileList + cmAppSourceInfoCluster.CrAppVersion = appVersion + cmAppSourceInfoCluster.CrAppList = appListCluster + cmAppSourceInfoCluster.CrAppFileList = clusterappFileList + shcAppSourceInfoLocal.CrAppVersion = appVersion + shcAppSourceInfoLocal.CrAppList = appListLocal + shcAppSourceInfoLocal.CrAppFileList = localappFileList + shcAppSourceInfoCluster.CrAppVersion = appVersion + shcAppSourceInfoCluster.CrAppList = appListCluster + shcAppSourceInfoCluster.CrAppFileList = clusterappFileList + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA instance with App Framework enabled and install above 200MB of apps at once", func() { + + /* Test Steps + ################## SETUP #################### + * Create App Source for C3 SVA (Cluster Manager and Deployer) + * Add more apps than usual on Gcs for this test + * Prepare and deploy C3 CRD with app framework and wait for pods to be ready + ############### VERIFICATIONS ############### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Creating a bigger list of apps to be installed than the default one + appList := []string{"splunk_app_db_connect", "splunk_app_aws", "Splunk_TA_microsoft-cloudservices", "Splunk_ML_Toolkit", "Splunk_Security_Essentials"} + appFileList := testenv.GetAppFileList(appList) + appVersion := "V1" + + // Download apps from Gcs + testcaseEnvInst.Log.Info("Download bigger amount of apps from Gcs for this test") + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps files") + + // Create consolidated list of app files + appList = append(appListV1, appList...) + appFileList = testenv.GetAppFileList(appList) + + // Upload app to Gcs for Indexer Cluster + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to Gcs test directory for Indexer Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload app to Gcs for Search Head Cluster + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to Gcs test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + testcaseEnvInst.Log.Info("Create Single Site Indexer Cluster and Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############### VERIFICATIONS ############### + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) with App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled for manual update", func() { + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V1 apps to Gcs + * Create app source with manaul poll for M4 SVA (Cluster Manager and Deployer) + * Prepare and deploy C3 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the check + ############## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify apps are installed locally on Cluster Manager and Deployer + */ + + // ################## SETUP #################### + // Upload V1 apps to Gcs for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 0) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to Gcs for Indexer Cluster + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 0) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + indexerReplicas := 3 + shReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with App framework") + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //######### INITIAL VERIFICATIONS ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + // ############### UPGRADE APPS ################ + // Delete V1 apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to Gcs for C3 + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Uploading %s apps to Gcs", appVersion)) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to Gcs for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + appVersion = "V1" + allPodNames := append(idxcPodNames, shcPodNames...) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPodNames, appListV1, true, "enabled", false, true) + + // ############ ENABLE MANUAL POLL ############ + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["MonitoringConsole"] = strings.Replace(config.Data["MonitoringConsole"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info("Verify config map set back to off after poll trigger for app", "version", appVersion) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off") && strings.Contains(config.Data["MonitoringConsole"], "status: off")).To(Equal(true), "Config map update not complete") + + // ############## UPGRADE VERIFICATIONS ############ + appVersion = "V2" + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA and have apps installed and updated locally on Cluster Manager and Deployer for manual polling", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs + * Create app source with local scope for C3 SVA (Cluster Manager and Deployer) + * Prepare and deploy C3 CRD with app framework and wait for pods to be ready + ############# INITIAL VERIFICATION ########## + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the poll is triggered + ########### UPGRADE VERIFICATIONS ########### + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to Gcs for Indexer Cluster + appVersion := "V1" + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 0) + + // Deploy C3 CRD + indexerReplicas := 3 + shReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############## INITIAL VERIFICATION ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to Gcs + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs", appVersion)) + appFileList = testenv.GetAppFileList(appListV2) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ ENABLE MANUAL POLL ############ + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ########## Verify Manual Poll config map disabled after the poll is triggered ################# + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info("Verify config map set back to off after poll trigger for app", "version", appVersion) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //########### UPGRADE VERIFICATIONS ########### + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("c3, integration, managerappframeworkc3, appframework: can deploy a C3 SVA with apps installed locally on Cluster Manager and Deployer, cluster-wide on Peers and Search Heads, then upgrade them via a manual poll", func() { + + /* Test Steps + ################## SETUP #################### + * Split Applist into clusterlist and local list + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster for local and cluster scope + * Create app sources for Cluster Manager and Deployer with local and cluster scope + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### UPGRADE APPS ################ + * Upload V2 apps on Gcs + * Wait for all C3 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Split Applist into 2 lists for local and cluster install + appVersion := "V1" + appListLocal := appListV1[len(appListV1)/2:] + appListCluster := appListV1[:len(appListV1)/2] + + // Upload appListLocal list of apps to Gcs (to be used for local install) for Idxc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + gcsTestDirIdxcLocal = "c3appfw-" + testenv.RandomDNSName(4) + localappFileList := testenv.GetAppFileList(appListLocal) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListLocal list of apps to Gcs (to be used for local install) for Shc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + gcsTestDirShcLocal = "c3appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to Gcs (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for cluster-wide install (cluster scope)", appVersion)) + gcsTestDirIdxcCluster = "c3appfw-cluster-" + testenv.RandomDNSName(4) + clusterappFileList := testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to Gcs (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for cluster-wide install (cluster scope)", appVersion)) + gcsTestDirShcCluster = "c3appfw-cluster-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to Gcs test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameLocalIdxc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameLocalShc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameClusterIdxc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameClusterShc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcLocal := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcLocal := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcCluster := "appframework-test-volume-idxc-cluster-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcCluster := "appframework-test-volume-shc-cluster-" + testenv.RandomDNSName(3) + + // Create App framework Spec for Cluster manager with scope local and append cluster scope + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalIdxc, gcsTestDirIdxcLocal, 0) + volumeSpecCluster := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameIdxcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + appFrameworkSpecIdxc.VolList = append(appFrameworkSpecIdxc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameIdxcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterIdxc, gcsTestDirIdxcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecIdxc.AppSources = append(appFrameworkSpecIdxc.AppSources, appSourceSpecCluster...) + + // Create App framework Spec for Search head cluster with scope local and append cluster scope + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalShc, gcsTestDirShcLocal, 0) + volumeSpecCluster = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameShcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + appFrameworkSpecShc.VolList = append(appFrameworkSpecShc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec = enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameShcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster = []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterShc, gcsTestDirShcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecShc.AppSources = append(appFrameworkSpecShc.AppSources, appSourceSpecCluster...) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + testcaseEnvInst.Log.Info("Deploy Single site Indexer Cluster with both Local and Cluster scope for apps installation") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameLocalIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcLocal, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + cmAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameClusterIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcCluster, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete apps on Gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on Gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Redefine app lists as LDAP app isn't in V1 apps + appListLocal = appListV1[len(appListV1)/2:] + appListCluster = appListV1[:len(appListV1)/2] + + // Upload appListLocal list of V2 apps to Gcs (to be used for local install) + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for local install (local scope)", appVersion)) + localappFileList = testenv.GetAppFileList(appListLocal) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of V2 apps to Gcs (to be used for cluster-wide install) + clusterappFileList = testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // ############ ENABLE MANUAL POLL ############ + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameClusterIdxc, clusterappFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ########## Verify Manual Poll config map disabled after the poll is triggered ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info("Verify config map set back to off after poll trigger for app", "version", appVersion) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //########## UPGRADE VERIFICATION ############# + cmAppSourceInfoLocal.CrAppVersion = appVersion + cmAppSourceInfoLocal.CrAppList = appListLocal + cmAppSourceInfoLocal.CrAppFileList = localappFileList + cmAppSourceInfoCluster.CrAppVersion = appVersion + cmAppSourceInfoCluster.CrAppList = appListCluster + cmAppSourceInfoCluster.CrAppFileList = clusterappFileList + shcAppSourceInfoLocal.CrAppVersion = appVersion + shcAppSourceInfoLocal.CrAppList = appListLocal + shcAppSourceInfoLocal.CrAppFileList = localappFileList + shcAppSourceInfoCluster.CrAppVersion = appVersion + shcAppSourceInfoCluster.CrAppList = appListCluster + shcAppSourceInfoCluster.CrAppFileList = clusterappFileList + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3, add new apps to app source while install is in progress and have all apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload big-size app to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework + ############## VERIFICATIONS ################ + * Verify app installation is in progress on Cluster Manager and Deployer + * Upload more apps from Gcs during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to Gcs for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Download all apps from Gcs + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload big-size app to Gcs for Cluster Manager + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to Gcs for Cluster Manager") + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to Gcs test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big-size app to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big-size app to Gcs for Search Head Cluster") + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to Gcs test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + cm, _, _, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify App installation is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to Gcs for Cluster Manager + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to Gcs for Cluster Manager") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to Gcs test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload more apps to Gcs for Deployer + testcaseEnvInst.Log.Info("Upload more apps to Gcs for Deployer") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to Gcs test directory for Deployer") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Verify all apps are installed on Cluster Manager + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Cluster Manager", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), cmPod, appList, true, "enabled", false, false) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify all apps are installed on Deployer + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Deployer", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), deployerPod, appList, true, "enabled", false, false) + }) + }) + // Vivek need testing + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3, add new apps to app source while install is in progress and have all apps installed cluster-wide", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload big-size app to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ############## VERIFICATIONS ################ + * Verify App installation is in progress on Cluster Manager and Deployer + * Upload more apps from Gcs during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to Gcs for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Monitoring Console", appVersion)) + gcsTestDirMC := "c3appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Download all apps from Gcs + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload big-size app to Gcs for Cluster Manager + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to Gcs for Cluster Manager") + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to Gcs test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big-size app to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big-size app to Gcs for Search Head Cluster") + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to Gcs test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to Gcs for Cluster Manager + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to Gcs for Cluster Manager") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to Gcs test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload more apps to Gcs for Deployer + testcaseEnvInst.Log.Info("Upload more apps to Gcs for Deployer") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to Gcs test directory for Deployer") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify all apps are installed on indexers + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + idxcPodNames := testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on indexers", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), idxcPodNames, appList, true, "enabled", false, true) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList) + + // Verify all apps are installed on Search Heads + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Search Heads", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), shcPodNames, appList, true, "enabled", false, true) + + }) + }) + // Vivek need testing + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled and reset operator pod while app install is in progress", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + * While app install is in progress, restart the operator + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Download all apps from Gcs + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload V1 apps to Gcs for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify App installation is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgInstallPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //######### VERIFICATIONS ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled and reset operator pod while app download is in progress", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + * While app download is in progress, restart the operator + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Download all apps from Gcs + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload V1 apps to Gcs for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify App Download is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgDownloadComplete, enterpriseApi.AppPkgDownloadPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //######### VERIFICATIONS ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled, install an app, then disable it by using a disabled version of the app and then remove it from app source", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + * Disable the app + * Delete the app from Gcs + * Check for repo state in App Deployment Info + */ + + //################## SETUP #################### + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + // Upload V1 apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // //######### INITIAL VERIFICATIONS ############# + idxcPodNames := testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify repo state on App to be disabled to be 1 (i.e app present on Gcs bucket) + appName := appListV1[0] + appFileName := testenv.GetAppFileList([]string{appName}) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind, appSourceNameIdxc, 1, appFileName[0]) + + // Disable the app + testenv.DisableAppsToGCP(downloadDirV1, appFileName, gcsTestDirIdxc) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileName) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Wait for App state to update after config file change + testenv.WaitforAppInstallState(ctx, deployment, testcaseEnvInst, idxcPodNames, testcaseEnvInst.GetName(), appName, "disabled", true) + + // Delete the file from Gcs + gcsFilepath := filepath.Join(gcsTestDirIdxc, appFileName[0]) + err = testenv.DeleteFileOnGCP(testGcsBucket, gcsFilepath) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to delete %s app on Gcs test directory", appFileName[0])) + + // Verify repo state is set to 2 (i.e app deleted from Gcs bucket) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind, appSourceNameIdxc, 2, appFileName[0]) + + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled and update apps after app download is completed", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + * While app download is completed, upload new versions of the apps + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Search Heads and Indexers pods + ######### UPGRADE VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Download all apps from Gcs + appVersion := "V1" + appListV1 := []string{appListV1[0]} + appFileList := testenv.GetAppFileList(appListV1) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload V1 apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 120) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 120) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify App Download is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyPending) + + // Upload V2 apps to Gcs for Indexer Cluster + appVersion = "V2" + appListV2 := []string{appListV2[0]} + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //######### VERIFICATIONS ############# + appVersion = "V1" + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())}, appListV1, false, "enabled", false, false) + + // Check for changes in App phase to determine if next poll has been triggered + appFileList = testenv.GetAppFileList(appListV2) + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + //############ UPGRADE VERIFICATIONS ############ + appVersion = "V2" + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("c3, integration, managerappframeworkc3, appframework: can deploy a C3 SVA and install a bigger volume of apps than the operator PV disk space", func() { + + /* Test Steps + ################## SETUP #################### + * Upload 15 apps of 100MB size each to Gcs for Indexer Cluster and Search Head Cluster for cluster scope + * Create app sources for Cluster Master and Deployer with cluster scope + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Create a large file on Operator pod + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + err := testenv.CreateDummyFileOnOperator(ctx, deployment, opPod, testenv.AppDownloadVolume, "1G", "test_file.img") + Expect(err).To(Succeed(), "Unable to create file on operator") + filePresentOnOperator = true + + // Download apps for test + appVersion := "V1" + appList := testenv.PVTestApps + appFileList := testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsPVTestApps, downloadDirPVTestApps, appFileList) + Expect(err).To(Succeed(), "Unable to download app files") + + // Upload apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc := "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search head Cluster", appVersion)) + gcsTestDirShc := "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 30 + + // Create App framework Spec for C3 + appSourceNameIdxc := "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc := "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecIdxc.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + appFrameworkSpecShc.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA with App Framework enabled and delete apps from app directory when download is complete", func() { + + /* Test Steps + ################## SETUP #################### + * Upload big-size app to Gcs for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy C3 CRD with app framework and wait for the pods to be ready + * When app download is complete, delete apps from app directory + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Download big size apps from Gcs + appList := testenv.BigSingleApp + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload big size app to Gcs for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info("Upload big size app to Gcs for Indexer Cluster") + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big size to Gcs test directory for Indexer Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big size app to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big size app to Gcs for Search Head Cluster") + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big size to Gcs test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + shReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify App Download is completed on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgPodCopyComplete, enterpriseApi.AppPkgPodCopyPending) + + //Delete apps from app directory when app download is complete + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(splcommon.AppDownloadVolume, "downloadedApps", testenvInstance.GetName(), cm.Kind, deployment.GetName(), enterpriseApi.ScopeCluster, appSourceNameIdxc, testenv.AppInfo[appList[0]]["filename"]) + err = testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + Expect(err).To(Succeed(), "Unable to delete file on pod") + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //######### VERIFICATIONS ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (C3) and App Framework", func() { + It(" c3gcp, managerappframeworkc3gcp, appframeworkgcp, c3_mgr_gcp_sanity: can deploy a C3 SVA with App Framework enabled and check isDeploymentInProgressFlag for CM and SHC CR's", func() { + + /* + Test Steps + ################## SETUP ################## + * Upload V1 apps to Gcs for Indexer Cluster and Search Head Cluster + * Prepare and deploy C3 CRD with app framework + * Verify IsDeploymentInProgress is set + * Wait for the pods to be ready + */ + + //################## SETUP #################### + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + + // Upload V1 apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Indexer Cluster", appVersion)) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to Gcs for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to Gcs for Search Head Cluster", appVersion)) + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcs test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for C3 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy C3 CRD + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster with Search Head Cluster") + indexerReplicas := 3 + cm, _, shc, err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx, deployment.GetName(), indexerReplicas, true, appFrameworkSpecIdxc, appFrameworkSpecShc, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Verify IsDeploymentInProgress Flag is set to true for Cluster Manager CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Verify IsDeploymentInProgress Flag is set to true for SHC CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, shc.Name, shc.Kind) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("integration, c3: can deploy a C3 SVA and a Standalone, then add that Standalone as a Search Head to the cluster", func() { + + /* Test Steps + ################## SETUP ################### + * Deploy C3 CRD + * Deploy Standalone with clusterMasterRef + ############# VERIFICATION ################# + * Verify clusterMasterRef is present in Standalone's server.conf file + */ + //################## SETUP #################### + // Deploy C3 CRD + indexerReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") + err := deployment.DeploySingleSiteCluster(ctx, deployment.GetName(), indexerReplicas, false, "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") + + // Create spec with clusterMasterRef for Standalone + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: deployment.GetName(), + }, + }, + } + + // Deploy Standalone with clusterMasterRef + testcaseEnvInst.Log.Info("Deploy Standalone with clusterManagerRef") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with clusterMasterRef") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Ensure that the Standalone goes to Ready phase + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############# VERIFICATION ################# + // Verify Standalone is configured as a Search Head for the Cluster Manager + standalonePodName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) + Expect(testenv.CheckSearchHeadOnCM(ctx, deployment, standalonePodName)).To(Equal(true)) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("integration, c3, managerappframeworkc3, appframework: can deploy a C3 SVA and have ES app installed on Search Head Cluster", func() { + + /* Test Steps + ################## SETUP #################### + * Upload ES app to Gcs + * Upload TA add-on app to location for Indexer cluster + * Create App Source with 'ScopeClusterWithPreConfig' scope for C3 SVA + * Prepare and deploy C3 CRD with app framework and wait for pods to be ready + ################## VERIFICATION ############# + * Verify ES app is installed on Deployer and on Search Heads + * Verify TA add-on app is installed on indexers + ################## UPGRADE VERIFICATION ############# + * Update ES app on Gcs location + * Verify updated ES app is installed on Deployer and on Search Heads + */ + + //################## SETUP #################### + // Download ES app from Gcs + appVersion := "V1" + testcaseEnvInst.Log.Info("Download ES app from Gcs") + esApp := []string{"SplunkEnterpriseSecuritySuite"} + appFileList := testenv.GetAppFileList(esApp) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download ES app file from Gcs") + + // Download Technology add-on app from Gcs + testcaseEnvInst.Log.Info("Download Technology add-on app from Gcs") + taApp := []string{"Splunk_TA_ForIndexers"} + appFileListIdxc := testenv.GetAppFileList(taApp) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileListIdxc) + Expect(err).To(Succeed(), "Unable to download ES app file from Gcs") + + // Create directory for file upload to Gcs + gcsTestDirShc = "c3appfw-shc-" + testenv.RandomDNSName(4) + gcsTestDirIdxc = "c3appfw-idxc-" + testenv.RandomDNSName(4) + + // Upload ES app to Gcs + testcaseEnvInst.Log.Info("Upload ES app to Gcs") + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload ES app to Gcs test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload Technology add-on apps to Gcs for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s Technology add-on app to Gcs for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileListIdxc, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s Technology add-on app to Gcs test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for SHC + appSourceNameShc = "appframework-shc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopePremiumApps, appSourceNameShc, gcsTestDirShc, 180) + appFrameworkSpecShc.AppSources[0].PremiumAppsProps = enterpriseApi.PremiumAppsProps{ + Type: enterpriseApi.PremiumAppsTypeEs, + EsDefaults: enterpriseApi.EsDefaults{ + SslEnablement: enterpriseApi.SslEnablementIgnore, + }, + } + + // Create App framework Spec for Indexer Cluster + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 180) + + // Deploy C3 SVA + // Deploy the Cluster Manager + testcaseEnvInst.Log.Info("Deploy Cluster Manager") + cmSpec := enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecIdxc, + } + cm, err := deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) + Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") + + // Deploy the Indexer Cluster + testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") + indexerReplicas := 3 + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") + + // Deploy the Search Head Cluster + testcaseEnvInst.Log.Info("Deploy Search Head Cluster") + shSpec := enterpriseApi.SearchHeadClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: deployment.GetName(), + }, + }, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpecShc, + } + shc, err := deployment.DeploySearchHeadClusterWithGivenSpec(ctx, deployment.GetName()+"-shc", shSpec) + Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //######### INITIAL VERIFICATIONS ############# + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), int(shSpec.Replicas), false, 1) + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: esApp, CrAppFileList: appFileList, CrReplicas: int(shSpec.Replicas), CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + idxcPodNames := testenv.GeneratePodNameSlice(testenv.IndexerPod, deployment.GetName(), indexerReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: taApp, CrAppFileList: appFileListIdxc, CrReplicas: indexerReplicas, CrClusterPods: idxcPodNames} + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // //############### UPGRADE APPS ################ + // // Download ES App from Gcs + // appVersion = "V2" + // testcaseEnvInst.Log.Info("Download updated ES app from Gcs") + // err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV2, downloadDirV2, appFileList) + // Expect(err).To(Succeed(), "Unable to download ES app") + + // // Upload V2 ES app to Gcs for Search Head Cluster + // testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s ES app to Gcs for Search Head Cluster", appVersion)) + // uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + // Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s ES app to Gcs test directory for Search Head Cluster", appVersion)) + // uploadedApps = append(uploadedApps, uploadedFiles...) + + // // Check for changes in App phase to determine if next poll has been triggered + // testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList) + + // // Ensure that the Cluster Manager goes to Ready phase + // testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // // Ensure Indexers go to Ready phase + // testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) + + // // Ensure Search Head Cluster go to Ready phase + // testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // // Verify RF SF is met + // testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // // Get Pod age to check for pod resets later + // splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // //############ FINAL VERIFICATIONS ############ + + // shcAppSourceInfo.CrAppVersion = appVersion + // shcAppSourceInfo.CrAppList = esApp + // shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(esApp) + // allAppSourceInfo = []testenv.AppSourceInfo{shcAppSourceInfo} + // testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) +}) diff --git a/test/appframework_gcp/m4/appframework_gcs_suite_test.go b/test/appframework_gcp/m4/appframework_gcs_suite_test.go new file mode 100644 index 000000000..8f4a28249 --- /dev/null +++ b/test/appframework_gcp/m4/appframework_gcs_suite_test.go @@ -0,0 +1,103 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package m4gcpappfw + +import ( + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/splunk/splunk-operator/test/testenv" +) + +const ( + // PollInterval specifies the polling interval + PollInterval = 5 * time.Second + + // ConsistentPollInterval is the interval to use to consistently check a state is stable + ConsistentPollInterval = 200 * time.Millisecond + ConsistentDuration = 2000 * time.Millisecond +) + +var ( + testenvInstance *testenv.TestEnv + testSuiteName = "m4appfw-" + testenv.RandomDNSName(3) + appListV1 []string + appListV2 []string + testDataGcsBucket = os.Getenv("TEST_BUCKET") + testGcsBucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + gcsAppDirV1 = testenv.AppLocationV1 + gcsAppDirV2 = testenv.AppLocationV2 + gcsPVTestApps = testenv.PVTestAppsLocation + currDir, _ = os.Getwd() + downloadDirV1 = filepath.Join(currDir, "m4appfwV1-"+testenv.RandomDNSName(4)) + downloadDirV2 = filepath.Join(currDir, "m4appfwV2-"+testenv.RandomDNSName(4)) + downloadDirPVTestApps = filepath.Join(currDir, "m4appfwPVTestApps-"+testenv.RandomDNSName(4)) +) + +// TestBasic is the main entry point +func TestBasic(t *testing.T) { + + RegisterFailHandler(Fail) + + RunSpecs(t, "Running "+testSuiteName) +} + +var _ = BeforeSuite(func() { + var err error + testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) + Expect(err).ToNot(HaveOccurred()) + + if testenv.ClusterProvider == "gcp" { + // Create a list of apps to upload to GCP + appListV1 = testenv.BasicApps + appFileList := testenv.GetAppFileList(appListV1) + + // Download V1 Apps from GCP + testenvInstance.Log.Info("logging download details", "bucket", testDataGcsBucket, "gcsAppDirV1", gcsAppDirV1, "downloadDirV1", downloadDirV1, "appFileList", appFileList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") + + // Create a list of apps to upload to GCP after poll period + appListV2 = append(appListV1, testenv.NewAppsAddedBetweenPolls...) + appFileList = testenv.GetAppFileList(appListV2) + + // Download V2 Apps from GCP + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download V2 app files") + } else { + testenvInstance.Log.Info("Skipping Before Suite Setup", "Cluster Provider", testenv.ClusterProvider) + } + +}) + +var _ = AfterSuite(func() { + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + // Delete locally downloaded app files + err := os.RemoveAll(downloadDirV1) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files") + err = os.RemoveAll(downloadDirV2) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V2 app files") +}) diff --git a/test/appframework_gcp/m4/appframework_gcs_test.go b/test/appframework_gcp/m4/appframework_gcs_test.go new file mode 100644 index 000000000..d5e2f16df --- /dev/null +++ b/test/appframework_gcp/m4/appframework_gcs_test.go @@ -0,0 +1,2703 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.s +package m4gcpappfw + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + testenv "github.com/splunk/splunk-operator/test/testenv" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("m4appfw test", func() { + + var testcaseEnvInst *testenv.TestCaseEnv + var deployment *testenv.Deployment + var uploadedApps []string + var appSourceNameIdxc string + var appSourceNameShc string + var gcsTestDirShc string + var gcsTestDirIdxc string + var appSourceVolumeNameIdxc string + var appSourceVolumeNameShc string + var gcsTestDirShcLocal string + var gcsTestDirIdxcLocal string + var gcsTestDirShcCluster string + var gcsTestDirIdxcCluster string + var filePresentOnOperator bool + + ctx := context.TODO() + + BeforeEach(func() { + var err error + name := fmt.Sprintf("%s-%s", "master"+testenvInstance.GetName(), testenv.RandomDNSName(3)) + testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) + Expect(err).To(Succeed(), "Unable to create testcaseenv") + deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + gcsTestDirIdxc = "m4appfw-idxc-" + testenv.RandomDNSName(4) + gcsTestDirShc = "m4appfw-shc-" + testenv.RandomDNSName(4) + appSourceVolumeNameIdxc = "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc = "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testcaseEnvInst.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + // Delete files uploaded to GCP + if !testcaseEnvInst.SkipTeardown { + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + } + if testcaseEnvInst != nil { + Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred()) + } + + if filePresentOnOperator { + //Delete files from app-directory + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(testenv.AppDownloadVolume, "test_file.img") + testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + } + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It(" m4gcp, masterappframeworkm4gcp, appframeworkgcp, m4_gcp_sanity: can deploy a M4 SVA with App Framework enabled, install apps and upgrade them", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ########## INITIAL VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + ############# UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for Monitoring Console and M4 pod to be ready + ########## UPGRADE VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + gcsTestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + volumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, volumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + //########## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############# UPGRADE APPS ################ + // Delete apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + _ = testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATIONS ########## + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, ClusterMasterBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled, install apps and downgrade them", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V2 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V2 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ########## INITIAL VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + ############ DOWNGRADE APPS ############### + * Downgrade apps in app sources + * Wait for Monitoring Console and M4 to be ready + ########## DOWNGRADE VERIFICATIONS ######## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and downgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V2 version of apps to GCP for Monitoring Console + appVersion := "V2" + appFileList := testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + gcsTestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + volumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, volumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console instance") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V2 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############# DOWNGRADE APPS ################ + // Delete V2 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V1 apps to GCP for Indexer Cluster + appVersion = "V1" + appFileList = testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## DOWNGRADE VERIFICATIONS ######## + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV1 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV1 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV1 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, ClusterMasterBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled, install apps, scale up clusters, install apps on new pods, scale down", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for M4 + * Create app source for M4 SVA (Cluster Master and Deployer) + * Prepare and deploy M4 CRD with app config and wait for pods to be ready + ########### INITIAL VERIFICATIONS ######### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are copied and installed on Monitoring Console and also on Search Heads and Indexers pods + ############### SCALING UP ################ + * Scale up Indexers and Search Head Cluster + * Wait for Monitoring Console and M4 to be ready + ######### SCALING UP VERIFICATIONS ######## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are copied and installed on new Search Heads and Indexers pods + ############### SCALING DOWN ############## + * Scale down Indexers and Search Head Cluster + * Wait for Monitoring Console and M4 to be ready + ######### SCALING DOWN VERIFICATIONS ###### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are still copied and installed on all Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + indexersPerSite := 1 + shReplicas := 3 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // Ingest data on Indexers + for i := 1; i <= siteCount; i++ { + podName := fmt.Sprintf(testenv.MultiSiteIndexerPod, deployment.GetName(), i, 0) + logFile := fmt.Sprintf("test-log-%s.log", testenv.RandomDNSName(3)) + testenv.CreateMockLogfile(logFile, 2000) + testenv.IngestFileViaMonitor(ctx, logFile, "main", podName, deployment) + } + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + + //########### INITIAL VERIFICATIONS ######### + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //Delete configMap Object + err = testenv.DeleteConfigMap(testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to delete ConfigMao", "ConfigMap name", ConfigMapName) + + //############### SCALING UP ################ + // Get instance of current Search Head Cluster CR with latest config + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale up Search Head Cluster + defaultSHReplicas := shc.Spec.Replicas + scaledSHReplicas := defaultSHReplicas + 1 + testcaseEnvInst.Log.Info("Scale up Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of Search Head Cluster + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to scale up Search Head Cluster") + + // Ensure Search Head Cluster scales up and go to ScalingUp phase + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingUp) + + // Get instance of current Indexer CR with latest config + idxcName := deployment.GetName() + "-" + "site1" + idxc := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + defaultIndexerReplicas := idxc.Spec.Replicas + scaledIndexerReplicas := defaultIndexerReplicas + 1 + testcaseEnvInst.Log.Info("Scale up Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed to Scale Up Indexer Cluster") + + // Ensure Indexer cluster scales up and go to ScalingUp phase + testenv.VerifyIndexerClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingUp, idxcName) + + // Ensure Indexer cluster go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ingest data on new Indexers + podName := fmt.Sprintf(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, 1) + logFile := fmt.Sprintf("test-log-%s.log", testenv.RandomDNSName(3)) + testenv.CreateMockLogfile(logFile, 2000) + testenv.IngestFileViaMonitor(ctx, logFile, "main", podName, deployment) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Search for data on newly added indexer + searchPod := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), 0) + indexerName := fmt.Sprintf(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, 1) + searchString := fmt.Sprintf("index=%s host=%s | stats count by host", "main", indexerName) + searchResultsResp, err := testenv.PerformSearchSync(ctx, searchPod, searchString, deployment) + Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", searchPod, searchString) + + // Verify result. + searchResponse := strings.Split(searchResultsResp, "\n")[0] + var searchResults map[string]interface{} + jsonErr := json.Unmarshal([]byte(searchResponse), &searchResults) + Expect(jsonErr).To(Succeed(), "Failed to unmarshal JSON Search Results from response '%s'", searchResultsResp) + + testcaseEnvInst.Log.Info("Search results :", "searchResults", searchResults["result"]) + Expect(searchResults["result"]).ShouldNot(BeNil(), "No results in search response '%s' on pod %s", searchResults, searchPod) + + resultLine := searchResults["result"].(map[string]interface{}) + testcaseEnvInst.Log.Info("Sync Search results host count:", "count", resultLine["count"].(string), "host", resultLine["host"].(string)) + testHostname := strings.Compare(resultLine["host"].(string), indexerName) + Expect(testHostname).To(Equal(0), "Incorrect search result hostname. Expect: %s Got: %s", indexerName, resultLine["host"].(string)) + + //######### SCALING UP VERIFICATIONS ######## + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + // Listing the Search Head cluster pods to exclude them from the 'no pod reset' test as they are expected to be reset after scaling + shcPodNames = []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + shcPodNames = append(shcPodNames, testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1)...) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, shcPodNames) + + //############### SCALING DOWN ############## + // Get instance of current Search Head Cluster CR with latest config + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale down Search Head Cluster + defaultSHReplicas = shc.Spec.Replicas + scaledSHReplicas = defaultSHReplicas - 1 + testcaseEnvInst.Log.Info("Scaling down Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of Search Head Cluster + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to scale down Search Head Cluster") + + // Ensure Search Head Cluster scales down and go to ScalingDown phase + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingDown) + + // Get instance of current Indexer CR with latest config + err = deployment.GetInstance(ctx, idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + defaultIndexerReplicas = idxc.Spec.Replicas + scaledIndexerReplicas = defaultIndexerReplicas - 1 + testcaseEnvInst.Log.Info("Scaling down Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed to Scale down Indexer Cluster") + + // Ensure Indexer cluster scales down and go to ScalingDown phase + testenv.VerifyIndexerClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingDown, idxcName) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexer cluster go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Search for data from removed indexer + searchString = fmt.Sprintf("index=%s host=%s | stats count by host", "main", indexerName) + searchResultsResp, err = testenv.PerformSearchSync(ctx, searchPod, searchString, deployment) + Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", searchPod, searchString) + + // Verify result. + searchResponse = strings.Split(searchResultsResp, "\n")[0] + jsonErr = json.Unmarshal([]byte(searchResponse), &searchResults) + Expect(jsonErr).To(Succeed(), "Failed to unmarshal JSON Search Results from response '%s'", searchResultsResp) + + testcaseEnvInst.Log.Info("Search results :", "searchResults", searchResults["result"]) + Expect(searchResults["result"]).ShouldNot(BeNil(), "No results in search response '%s' on pod %s", searchResults, searchPod) + + resultLine = searchResults["result"].(map[string]interface{}) + testcaseEnvInst.Log.Info("Sync Search results host count:", "count", resultLine["count"].(string), "host", resultLine["host"].(string)) + testHostname = strings.Compare(resultLine["host"].(string), indexerName) + Expect(testHostname).To(Equal(0), "Incorrect search result hostname. Expect: %s Got: %s", indexerName, resultLine["host"].(string)) + + //######### SCALING DOWN VERIFICATIONS ###### + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, shcPodNames) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA and have apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP + * Create app source with local scope for M4 SVA (Cluster Master and Deployer) + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Master and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ########## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Master and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Master and Deployer + siteCount := 3 + indexersPerSite := 1 + shReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATIONS ############ + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled for manual poll", func() { + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework and wait for the pod to be ready + * Upload V1 apps to GCP + * Create app source with manaul poll for M4 SVA (Cluster Master and Deployer) + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Master and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the check + ############## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify apps are installed locally on Cluster Master and Deployer + */ + + // ################## SETUP #################### + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + gcsTestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + volumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, volumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 0) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 0) + + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Multi Site Indexer Cluster with App framework") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + // ############### UPGRADE APPS ################ + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + appVersion = "V1" + allPodNames := append(idxcPodNames, shcPodNames...) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPodNames, appListV1, true, "enabled", false, true) + + // ############ ENABLE MANUAL POLL ############ + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterMaster"] = strings.Replace(config.Data["ClusterMaster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["MonitoringConsole"] = strings.Replace(config.Data["MonitoringConsole"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + time.Sleep(2 * time.Minute) + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ########## Verify Manual Poll disabled after the check ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify config map set back to off after poll trigger for %s app", appVersion)) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + + Expect(strings.Contains(config.Data["ClusterMaster"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off") && strings.Contains(config.Data["MonitoringConsole"], "status: off")).To(Equal(true), "Config map update not complete") + + // ############ VERIFY APPS UPDATED TO V2 ############# + appVersion = "V2" + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, ClusterMasterBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA and have apps installed and updated locally on Cluster Manager and Deployer via manual poll", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP + * Create app source with local scope for M4 SVA (Cluster Master and Deployer) + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Master and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the poll is triggered + ########## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Master and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 0) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Master and Deployer + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + appVersion = "V1" + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ ENABLE MANUAL POLL ############ + appVersion = "V2" + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterMaster"] = strings.Replace(config.Data["ClusterMaster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ########## Verify Manual Poll config map disabled after the poll is triggered ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify config map set back to off after poll trigger for %s app", appVersion)) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + + Expect(strings.Contains(config.Data["ClusterMaster"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //########## UPGRADE VERIFICATIONS ############ + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("m4, integration, masterappframeworkm4, appframework: can deploy a m4 SVA with apps installed locally on Cluster Manager and Deployer, cluster-wide on Peers and Search Heads, then upgrade them via a manual poll", func() { + + /* Test Steps + ################## SETUP #################### + * Split Applist into clusterlist and local list + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster for local and cluster scope + * Create app sources for Cluster Master and Deployer with local and cluster scope + * Prepare and deploy m4 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### UPGRADE APPS ################ + * Upload V2 apps on GCP + * Wait for all m4 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Split Applist into 2 lists for local and cluster install + appVersion := "V1" + appListLocal := appListV1[len(appListV1)/2:] + appListCluster := appListV1[:len(appListV1)/2] + + // Upload appListLocal list of apps to GCP (to be used for local install) for Idxc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for local install (local scope)", appVersion)) + gcsTestDirIdxcLocal = "m4appfw-" + testenv.RandomDNSName(4) + localappFileList := testenv.GetAppFileList(appListLocal) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListLocal list of apps to GCP (to be used for local install) for Shc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for local install (local scope)", appVersion)) + gcsTestDirShcLocal = "m4appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to GCP (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for cluster-wide install (cluster scope)", appVersion)) + gcsTestDirIdxcCluster = "m4appfw-cluster-" + testenv.RandomDNSName(4) + clusterappFileList := testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to GCP (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for cluster-wide install (cluster scope)", appVersion)) + gcsTestDirShcCluster = "m4appfw-cluster-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameLocalIdxc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameLocalShc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameClusterIdxc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameClusterShc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcLocal := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcLocal := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcCluster := "appframework-test-volume-idxc-cluster-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcCluster := "appframework-test-volume-shc-cluster-" + testenv.RandomDNSName(3) + + // Create App framework Spec for Cluster master with scope local and append cluster scope + + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalIdxc, gcsTestDirIdxcLocal, 0) + volumeSpecCluster := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameIdxcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + appFrameworkSpecIdxc.VolList = append(appFrameworkSpecIdxc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameIdxcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterIdxc, gcsTestDirIdxcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecIdxc.AppSources = append(appFrameworkSpecIdxc.AppSources, appSourceSpecCluster...) + + // Create App framework Spec for Search head cluster with scope local and append cluster scope + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalShc, gcsTestDirShcLocal, 0) + volumeSpecCluster = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameShcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "gcs", testenv.GetDefaultGCPRegion())} + + appFrameworkSpecShc.VolList = append(appFrameworkSpecShc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec = enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameShcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster = []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterShc, gcsTestDirShcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecShc.AppSources = append(appFrameworkSpecShc.AppSources, appSourceSpecCluster...) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Master and Deployer + testcaseEnvInst.Log.Info("Deploy Single site Indexer Cluster with both Local and Cluster scope for apps installation") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameLocalIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcLocal, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + cmAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameClusterIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcCluster, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + ClusterMasterBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Redefine app lists as LDAP app isn't in V1 apps + appListLocal = appListV1[len(appListV1)/2:] + appListCluster = appListV1[:len(appListV1)/2] + + // Upload appListLocal list of V2 apps to GCP (to be used for local install) + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for local install (local scope)", appVersion)) + localappFileList = testenv.GetAppFileList(appListLocal) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of V2 apps to GCP (to be used for cluster-wide install) + clusterappFileList = testenv.GetAppFileList(appListCluster) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for cluster install (cluster scope)", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // ############ ENABLE MANUAL POLL ############ + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterMaster"] = strings.Replace(config.Data["ClusterMaster"], "off", "on", 1) + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameClusterIdxc, clusterappFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // ########## Verify Manual Poll config map disabled after the poll is triggered ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info("Verify config map set back to off after poll trigger for app", "version", appVersion) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterMaster"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //########## UPGRADE VERIFICATION ############# + cmAppSourceInfoLocal.CrAppVersion = appVersion + cmAppSourceInfoLocal.CrAppList = appListLocal + cmAppSourceInfoLocal.CrAppFileList = localappFileList + cmAppSourceInfoCluster.CrAppVersion = appVersion + cmAppSourceInfoCluster.CrAppList = appListCluster + cmAppSourceInfoCluster.CrAppFileList = clusterappFileList + shcAppSourceInfoLocal.CrAppVersion = appVersion + shcAppSourceInfoLocal.CrAppList = appListLocal + shcAppSourceInfoLocal.CrAppFileList = localappFileList + shcAppSourceInfoCluster.CrAppVersion = appVersion + shcAppSourceInfoCluster.CrAppList = appListCluster + shcAppSourceInfoCluster.CrAppFileList = clusterappFileList + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, ClusterMasterBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (M4) and App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4, add new apps to app source while install is in progress and have all apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload big-size app to GCP for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Master and Deployer + * Prepare and deploy M4 CRD with app framework + * Verify app installation is in progress on Cluster Master and Deployer + * Upload more apps from GCP during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Cluster Master and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + gcsTestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Download all test apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCP for Cluster Master + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Cluster Manager") + gcsTestDirIdxc = "m4appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big-size app to GCP for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Search Head Cluster") + gcsTestDirShc = "m4appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App installation is in progress on Cluster Master + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to GCP for Cluster Master + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to GCP for Cluster Manager") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload more apps to GCP for Deployer + testcaseEnvInst.Log.Info("Upload more apps to GCP for Deployer") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Deployer") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Ensure Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Verify all apps are installed on Cluster Master + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Cluster Manager", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), cmPod, appList, true, "enabled", false, false) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + time.Sleep(60 * time.Second) + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList) + + // Verify all apps are installed on Deployer + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Deployer", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), deployerPod, appList, true, "enabled", false, false) + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (M4) and App Framework", func() { + It(" m4gcp, masterappframeworkm4gcp, appframeworkgcp, m4_gcp_sanity: can deploy a M4, add new apps to app source while install is in progress and have all apps installed cluster-wide", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload big-size app to GCP for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Master and Deployer + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ############## VERIFICATIONS ################ + * Verify App installation is in progress on Cluster Master and Deployer + * Upload more apps from GCP during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Cluster Master and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + gcsTestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Download all test apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCP for Cluster Master + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Cluster Manager") + gcsTestDirIdxc = "m4appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big-size app to GCP for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Search Head Cluster") + gcsTestDirShc = "m4appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App installation is in progress + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to GCP for Cluster Master + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to GCP for Cluster Manager") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload more apps to GCP for Deployer + testcaseEnvInst.Log.Info("Upload more apps to GCP for Deployer") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Deployer") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Ensure Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Verify all apps are installed on indexers + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + idxcPodNames := testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), indexersPerSite, true, siteCount) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on indexers", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), idxcPodNames, appList, true, "enabled", false, true) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList) + + // Verify all apps are installed on Search Heads + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Search Heads", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), shcPodNames, appList, true, "enabled", false, true) + + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and reset operator pod while app install is in progress", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * While app install is in progress, restart the operator + ########## VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download all apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App installation is in progress on Cluster Master + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgInstallPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and reset operator pod while app download is in progress", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * While app download is in progress, restart the operator + ########## VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download all apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App Download is in progress on Cluster Master + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgDownloadComplete, enterpriseApi.AppPkgDownloadPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled, install an app, then disable it by using a disabled version of the app and then remove it from app source", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ########## INITIAL VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + * Disable the app + * Delete the app from gcs + * Check for repo state in App Deployment Info + */ + + //################## SETUP ################## + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATIONS ########## + idxcPodNames := testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify repo state on App to be disabled to be 1 (i.e app present on GCP bucket) + appName := appListV1[0] + appFileName := testenv.GetAppFileList([]string{appName}) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind, appSourceNameIdxc, 1, appFileName[0]) + + // Disable the app + testenv.DisableAppsToGCP(downloadDirV1, appFileName, gcsTestDirIdxc) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileName) + + // Ensure Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Wait for App state to update after config file change + testenv.WaitforAppInstallState(ctx, deployment, testcaseEnvInst, idxcPodNames, testcaseEnvInst.GetName(), appName, "disabled", true) + + // Delete the file from GCP + gcsFilepath := filepath.Join(gcsTestDirIdxc, appFileName[0]) + err = testenv.DeleteFileOnGCP(testGcsBucket, gcsFilepath) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to delete %s app on GCP test directory", appFileName)) + + // Verify repo state is set to 2 (i.e app deleted from GCP bucket) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind, appSourceNameIdxc, 2, appFileName[0]) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (M4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA, install apps via manual polling, switch to periodic polling, verify apps are not updated before the end of AppsRepoPollInterval, then updated after", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP + * Create app source with local scope for M4 SVA, AppsRepoPollInterval=0 to set apps polling as manual + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Master and Deployer + * Verify status is 'OFF' in config map for Cluster Master and Search Head Cluster + ######### SWITCH FROM MANUAL TO PERIODIC POLLING ############ + * Set AppsRepoPollInterval to 180 seconds for Cluster Master and Search Head Cluster + * Change status to 'ON' in config map for Cluster Master and Search Head Cluster + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ UPGRADE VERIFICATION ########## + * Verify apps are not updated before the end of AppsRepoPollInterval duration + * Verify apps are updated after the end of AppsRepoPollInterval duration + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 0) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Master and Deployer + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + // Verify status is 'OFF' in config map for Cluster Master and Search Head Cluster + testcaseEnvInst.Log.Info("Verify status is 'OFF' in config map for Cluster Master and Search Head Cluster") + config, _ := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterMaster"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //######### SWITCH FROM MANUAL TO PERIODIC POLLING ############ + // Get instance of current Cluster Master CR with latest config + cm = &enterpriseApiV3.ClusterMaster{} + err = deployment.GetInstance(ctx, deployment.GetName(), cm) + Expect(err).To(Succeed(), "Failed to edit Cluster Master") + + // Set AppsRepoPollInterval for Cluster Master to 180 seconds + testcaseEnvInst.Log.Info("Set AppsRepoPollInterval for Cluster Master to 180 seconds") + cm.Spec.AppFrameworkConfig.AppsRepoPollInterval = int64(180) + err = deployment.UpdateCR(ctx, cm) + Expect(err).To(Succeed(), "Failed to change AppsRepoPollInterval value for Cluster Master") + + // Get instance of current Search Head Cluster CR with latest config + shc = &enterpriseApi.SearchHeadCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + Expect(err).To(Succeed(), "Failed to edit Search Head Cluster") + + // Set AppsRepoPollInterval for Search Head Cluster to 180 seconds + testcaseEnvInst.Log.Info("Set AppsRepoPollInterval for Search Head Cluster to 180 seconds") + shc.Spec.AppFrameworkConfig.AppsRepoPollInterval = int64(180) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to change AppsRepoPollInterval value for Search Head Cluster") + + // Change status to 'ON' in config map for Cluster Master and Search Head Cluster + testcaseEnvInst.Log.Info("Change status to 'ON' in config map for Cluster Master") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map") + + config.Data["ClusterMaster"] = strings.Replace(config.Data["ClusterMaster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map for Cluster Master") + + testcaseEnvInst.Log.Info("Change status to 'ON' in config map for Search Head Cluster") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster") + + // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done + testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done") + time.Sleep(5 * time.Second) + + // Verify status is 'ON' in config map for Cluster Master and Search Head Cluster + testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Master and Search Head Cluster") + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterMaster"], "status: on") && strings.Contains(config.Data["SearchHeadCluster"], "status: on")).To(Equal(true), "Config map update not complete") + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATIONS ############ + testcaseEnvInst.Log.Info("Verify apps are not updated before the end of AppsRepoPollInterval duration") + appVersion = "V1" + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Wait for the end of AppsRepoPollInterval duration + testcaseEnvInst.Log.Info("Wait for the end of AppsRepoPollInterval duration") + time.Sleep(2 * time.Minute) + + testcaseEnvInst.Log.Info("Verify apps are updated after the end of AppsRepoPollInterval duration") + appVersion = "V2" + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and update apps after app download is completed", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * While app download is in progress, restart the operator + * While app download is completed, upload new versions of the apps + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Search Heads and Indexers pods + ######### UPGRADE VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download all apps from GCP + appVersion := "V1" + appListV1 := []string{appListV1[0]} + appFileList := testenv.GetAppFileList(appListV1) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, gcsTestDirIdxc, 120) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, gcsTestDirShc, 120) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App Download is in progress on Cluster Master + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyPending) + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appListV2 := []string{appListV2[0]} + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + appVersion = "V1" + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())}, appListV1, false, "enabled", false, false) + + // Check for changes in App phase to determine if next poll has been triggered + appFileList = testenv.GetAppFileList(appListV2) + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + //############ UPGRADE VERIFICATIONS ############ + appVersion = "V2" + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("m4, integration, masterappframeworkm4, appframework: can deploy a M4 SVA and install a bigger volume of apps than the operator PV disk space", func() { + + /* Test Steps + ################## SETUP #################### + * Upload 15 apps of 100MB size each to GCP for Indexer Cluster and Search Head Cluster for cluster scope + * Create app sources for Cluster Master and Deployer with cluster scope + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Create a large file on Operator pod + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + err := testenv.CreateDummyFileOnOperator(ctx, deployment, opPod, testenv.AppDownloadVolume, "1G", "test_file.img") + Expect(err).To(Succeed(), "Unable to create file on operator") + filePresentOnOperator = true + + // Download apps for test + appVersion := "V1" + appList := testenv.PVTestApps + appFileList := testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsPVTestApps, downloadDirPVTestApps, appFileList) + Expect(err).To(Succeed(), "Unable to download app files") + + // Upload apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + gcsTestDirIdxc := "m4appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search head Cluster", appVersion)) + gcsTestDirShc := "m4appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 30 + + // Create App framework Spec for C3 + appSourceNameIdxc := "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc := "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecIdxc.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + appFrameworkSpecShc.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Master and Deployer + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, masterappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and delete apps from app directory when download is complete", func() { + + /* Test Steps + ################## SETUP ################## + * Upload big-size app to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * When app download is complete, delete apps from app directory + ########## VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download big size apps from GCP + appList := testenv.BigSingleApp + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload big size app to GCP for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info("Upload big size app to GCP for Indexer Cluster") + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big size to GCP test directory for Indexer Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big size app to GCP for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big size app to GCP for Search Head Cluster") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big size to GCP test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App Download is completed on Cluster Master + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgPodCopyComplete, enterpriseApi.AppPkgPodCopyPending) + + //Delete apps from app directory when app download is complete + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(splcommon.AppDownloadVolume, "downloadedApps", testenvInstance.GetName(), cm.Kind, deployment.GetName(), enterpriseApi.ScopeCluster, appSourceNameIdxc, testenv.AppInfo[appList[0]]["filename"]) + err = testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + Expect(err).To(Succeed(), "Unable to delete file on pod") + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It(" m4gcp, masterappframeworkm4gcp, appframeworkgcp, m4_gcp_sanity: can deploy a M4 SVA with App Framework enabled, install apps and check IsDeploymentInProgress for CM and SHC CR's", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework + * Verify IsDeploymentInProgress is set + * Wait for the pods to be ready + */ + + //################## SETUP ################## + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, gcsTestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, gcsTestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, gcsTestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify IsDeploymentInProgress Flag is set to true for Cluster Master CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag for Cluster Manager") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind) + + // Ensure that the Cluster Master goes to Ready phase + testenv.ClusterMasterReady(ctx, deployment, testcaseEnvInst) + + // Verify IsDeploymentInProgress Flag is set to true for SHC CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag for SHC") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, shc.Name, shc.Kind) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + }) + }) +}) diff --git a/test/appframework_gcp/m4/manager_appframework_test.go b/test/appframework_gcp/m4/manager_appframework_test.go new file mode 100644 index 000000000..c7166e907 --- /dev/null +++ b/test/appframework_gcp/m4/manager_appframework_test.go @@ -0,0 +1,2702 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.s +package m4gcpappfw + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + testenv "github.com/splunk/splunk-operator/test/testenv" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("m4appfw test", func() { + + var testcaseEnvInst *testenv.TestCaseEnv + var deployment *testenv.Deployment + var uploadedApps []string + var appSourceNameIdxc string + var appSourceNameShc string + var s3TestDirShc string + var s3TestDirIdxc string + var appSourceVolumeNameIdxc string + var appSourceVolumeNameShc string + var s3TestDirShcLocal string + var s3TestDirIdxcLocal string + var s3TestDirShcCluster string + var s3TestDirIdxcCluster string + var filePresentOnOperator bool + + ctx := context.TODO() + + BeforeEach(func() { + var err error + name := fmt.Sprintf("%s-%s", testenvInstance.GetName(), testenv.RandomDNSName(3)) + testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) + Expect(err).To(Succeed(), "Unable to create testcaseenv") + deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + s3TestDirIdxc = "m4appfw-idxc-" + testenv.RandomDNSName(4) + s3TestDirShc = "m4appfw-shc-" + testenv.RandomDNSName(4) + appSourceVolumeNameIdxc = "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc = "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testcaseEnvInst.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + // Delete files uploaded to GCP + if !testcaseEnvInst.SkipTeardown { + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + } + if testcaseEnvInst != nil { + Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred()) + } + + if filePresentOnOperator { + //Delete files from app-directory + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(testenv.AppDownloadVolume, "test_file.img") + testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + } + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It(" m4gcp, managerappframeworkm4gcp, appframeworkgcp, m4_mgr_gcp_sanity: can deploy a M4 SVA with App Framework enabled, install apps and upgrade them", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ########## INITIAL VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + ############# UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for Monitoring Console and M4 pod to be ready + ########## UPGRADE VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + s3TestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + volumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, volumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, s3TestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + //########## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############# UPGRADE APPS ################ + // Delete apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + _ = testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATIONS ########## + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled, install apps and downgrade them", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V2 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload V2 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ########## INITIAL VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + ############ DOWNGRADE APPS ############### + * Downgrade apps in app sources + * Wait for Monitoring Console and M4 to be ready + ########## DOWNGRADE VERIFICATIONS ######## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and downgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V2 version of apps to GCP for Monitoring Console + appVersion := "V2" + appFileList := testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + s3TestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + volumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, volumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, s3TestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console instance") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V2 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############# DOWNGRADE APPS ################ + // Delete V2 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V1 apps to GCP for Indexer Cluster + appVersion = "V1" + appFileList = testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## DOWNGRADE VERIFICATIONS ######## + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV1 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV1 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV1 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled, install apps, scale up clusters, install apps on new pods, scale down", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for M4 + * Create app source for M4 SVA (Cluster Manager and Deployer) + * Prepare and deploy M4 CRD with app config and wait for pods to be ready + ########### INITIAL VERIFICATIONS ######### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are copied and installed on Monitoring Console and also on Search Heads and Indexers pods + ############### SCALING UP ################ + * Scale up Indexers and Search Head Cluster + * Wait for Monitoring Console and M4 to be ready + ######### SCALING UP VERIFICATIONS ######## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are copied and installed on new Search Heads and Indexers pods + ############### SCALING DOWN ############## + * Scale down Indexers and Search Head Cluster + * Wait for Monitoring Console and M4 to be ready + ######### SCALING DOWN VERIFICATIONS ###### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is sucessful + * Verify apps are still copied and installed on all Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + indexersPerSite := 1 + shReplicas := 3 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // Ingest data on Indexers + for i := 1; i <= siteCount; i++ { + podName := fmt.Sprintf(testenv.MultiSiteIndexerPod, deployment.GetName(), i, 0) + logFile := fmt.Sprintf("test-log-%s.log", testenv.RandomDNSName(3)) + testenv.CreateMockLogfile(logFile, 2000) + testenv.IngestFileViaMonitor(ctx, logFile, "main", podName, deployment) + } + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + + //########### INITIAL VERIFICATIONS ######### + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //Delete configMap Object + err = testenv.DeleteConfigMap(testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to delete ConfigMao", "ConfigMap name", ConfigMapName) + + //############### SCALING UP ################ + // Get instance of current Search Head Cluster CR with latest config + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale up Search Head Cluster + defaultSHReplicas := shc.Spec.Replicas + scaledSHReplicas := defaultSHReplicas + 1 + testcaseEnvInst.Log.Info("Scale up Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of Search Head Cluster + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to scale up Search Head Cluster") + + // Ensure Search Head Cluster scales up and go to ScalingUp phase + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingUp) + + // Get instance of current Indexer CR with latest config + idxcName := deployment.GetName() + "-" + "site1" + idxc := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(ctx, idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + defaultIndexerReplicas := idxc.Spec.Replicas + scaledIndexerReplicas := defaultIndexerReplicas + 1 + testcaseEnvInst.Log.Info("Scale up Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed to Scale Up Indexer Cluster") + + // Ensure Indexer cluster scales up and go to ScalingUp phase + testenv.VerifyIndexerClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingUp, idxcName) + + // Ensure Indexer cluster go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ingest data on new Indexers + podName := fmt.Sprintf(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, 1) + logFile := fmt.Sprintf("test-log-%s.log", testenv.RandomDNSName(3)) + testenv.CreateMockLogfile(logFile, 2000) + testenv.IngestFileViaMonitor(ctx, logFile, "main", podName, deployment) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Search for data on newly added indexer + searchPod := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), 0) + indexerName := fmt.Sprintf(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, 1) + searchString := fmt.Sprintf("index=%s host=%s | stats count by host", "main", indexerName) + searchResultsResp, err := testenv.PerformSearchSync(ctx, searchPod, searchString, deployment) + Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", searchPod, searchString) + + // Verify result. + searchResponse := strings.Split(searchResultsResp, "\n")[0] + var searchResults map[string]interface{} + jsonErr := json.Unmarshal([]byte(searchResponse), &searchResults) + Expect(jsonErr).To(Succeed(), "Failed to unmarshal JSON Search Results from response '%s'", searchResultsResp) + + testcaseEnvInst.Log.Info("Search results :", "searchResults", searchResults["result"]) + Expect(searchResults["result"]).ShouldNot(BeNil(), "No results in search response '%s' on pod %s", searchResults, searchPod) + + resultLine := searchResults["result"].(map[string]interface{}) + testcaseEnvInst.Log.Info("Sync Search results host count:", "count", resultLine["count"].(string), "host", resultLine["host"].(string)) + testHostname := strings.Compare(resultLine["host"].(string), indexerName) + Expect(testHostname).To(Equal(0), "Incorrect search result hostname. Expect: %s Got: %s", indexerName, resultLine["host"].(string)) + + //######### SCALING UP VERIFICATIONS ######## + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + // Listing the Search Head cluster pods to exclude them from the 'no pod reset' test as they are expected to be reset after scaling + shcPodNames = []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + shcPodNames = append(shcPodNames, testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1)...) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, shcPodNames) + + //############### SCALING DOWN ############## + // Get instance of current Search Head Cluster CR with latest config + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale down Search Head Cluster + defaultSHReplicas = shc.Spec.Replicas + scaledSHReplicas = defaultSHReplicas - 1 + testcaseEnvInst.Log.Info("Scaling down Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of Search Head Cluster + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to scale down Search Head Cluster") + + // Ensure Search Head Cluster scales down and go to ScalingDown phase + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingDown) + + // Get instance of current Indexer CR with latest config + err = deployment.GetInstance(ctx, idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + defaultIndexerReplicas = idxc.Spec.Replicas + scaledIndexerReplicas = defaultIndexerReplicas - 1 + testcaseEnvInst.Log.Info("Scaling down Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(ctx, idxc) + Expect(err).To(Succeed(), "Failed to Scale down Indexer Cluster") + + // Ensure Indexer cluster scales down and go to ScalingDown phase + testenv.VerifyIndexerClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseScalingDown, idxcName) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure Indexer cluster go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Search for data from removed indexer + searchString = fmt.Sprintf("index=%s host=%s | stats count by host", "main", indexerName) + searchResultsResp, err = testenv.PerformSearchSync(ctx, searchPod, searchString, deployment) + Expect(err).To(Succeed(), "Failed to execute search '%s' on pod %s", searchPod, searchString) + + // Verify result. + searchResponse = strings.Split(searchResultsResp, "\n")[0] + jsonErr = json.Unmarshal([]byte(searchResponse), &searchResults) + Expect(jsonErr).To(Succeed(), "Failed to unmarshal JSON Search Results from response '%s'", searchResultsResp) + + testcaseEnvInst.Log.Info("Search results :", "searchResults", searchResults["result"]) + Expect(searchResults["result"]).ShouldNot(BeNil(), "No results in search response '%s' on pod %s", searchResults, searchPod) + + resultLine = searchResults["result"].(map[string]interface{}) + testcaseEnvInst.Log.Info("Sync Search results host count:", "count", resultLine["count"].(string), "host", resultLine["host"].(string)) + testHostname = strings.Compare(resultLine["host"].(string), indexerName) + Expect(testHostname).To(Equal(0), "Incorrect search result hostname. Expect: %s Got: %s", indexerName, resultLine["host"].(string)) + + //######### SCALING DOWN VERIFICATIONS ###### + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, shcPodNames) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA and have apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP + * Create app source with local scope for M4 SVA (Cluster Manager and Deployer) + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ########## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, s3TestDirShc, 60) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + siteCount := 3 + indexersPerSite := 1 + shReplicas := 3 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATIONS ############ + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled for manual poll", func() { + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework and wait for the pod to be ready + * Upload V1 apps to GCP + * Create app source with manaul poll for M4 SVA (Cluster Manager and Deployer) + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the check + ############## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify apps are installed locally on Cluster Manager and Deployer + */ + + // ################## SETUP #################### + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + s3TestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + volumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, volumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, s3TestDirMC, 0) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 0) + + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Multi Site Indexer Cluster with App framework") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + // ############### UPGRADE APPS ################ + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Monitoring Console + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + appVersion = "V1" + allPodNames := append(idxcPodNames, shcPodNames...) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPodNames, appListV1, true, "enabled", false, true) + + // ############ ENABLE MANUAL POLL ############ + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["MonitoringConsole"] = strings.Replace(config.Data["MonitoringConsole"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + time.Sleep(2 * time.Minute) + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ########## Verify Manual Poll disabled after the check ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify config map set back to off after poll trigger for %s app", appVersion)) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off") && strings.Contains(config.Data["MonitoringConsole"], "status: off")).To(Equal(true), "Config map update not complete") + + // ############ VERIFY APPS UPDATED TO V2 ############# + appVersion = "V2" + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA and have apps installed and updated locally on Cluster Manager and Deployer via manual poll", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP + * Create app source with local scope for M4 SVA (Cluster Manager and Deployer) + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the poll is triggered + ########## UPGRADE VERIFICATIONS ############ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are copied, installed and upgraded on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, s3TestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, s3TestDirShc, 0) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + appVersion = "V1" + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ ENABLE MANUAL POLL ############ + appVersion = "V2" + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer cluster configured as multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ########## Verify Manual Poll config map disabled after the poll is triggered ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify config map set back to off after poll trigger for %s app", appVersion)) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //########## UPGRADE VERIFICATIONS ############ + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("m4, integration, managerappframeworkm4, appframework: can deploy a m4 SVA with apps installed locally on Cluster Manager and Deployer, cluster-wide on Peers and Search Heads, then upgrade them via a manual poll", func() { + + /* Test Steps + ################## SETUP #################### + * Split Applist into clusterlist and local list + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster for local and cluster scope + * Create app sources for Cluster Manager and Deployer with local and cluster scope + * Prepare and deploy m4 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Monitoring Console and on Search Heads and Indexers pods + ############### UPGRADE APPS ################ + * Upload V2 apps on GCP + * Wait for all m4 pods to be ready + ############ FINAL VERIFICATIONS ############ + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V2 apps are copied and upgraded on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Split Applist into 2 lists for local and cluster install + appVersion := "V1" + appListLocal := appListV1[len(appListV1)/2:] + appListCluster := appListV1[:len(appListV1)/2] + + // Upload appListLocal list of apps to GCP (to be used for local install) for Idxc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for local install (local scope)", appVersion)) + s3TestDirIdxcLocal = "m4appfw-" + testenv.RandomDNSName(4) + localappFileList := testenv.GetAppFileList(appListLocal) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListLocal list of apps to GCP (to be used for local install) for Shc + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for local install (local scope)", appVersion)) + s3TestDirShcLocal = "m4appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShcLocal, localappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (local scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to GCP (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for cluster-wide install (cluster scope)", appVersion)) + s3TestDirIdxcCluster = "m4appfw-cluster-" + testenv.RandomDNSName(4) + clusterappFileList := testenv.GetAppFileList(appListCluster) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of apps to GCP (to be used for cluster-wide install) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for cluster-wide install (cluster scope)", appVersion)) + s3TestDirShcCluster = "m4appfw-cluster-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShcCluster, clusterappFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps (cluster scope) to GCP test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameLocalIdxc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameLocalShc := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameClusterIdxc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameClusterShc := "appframework-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcLocal := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcLocal := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appSourceVolumeNameIdxcCluster := "appframework-test-volume-idxc-cluster-" + testenv.RandomDNSName(3) + appSourceVolumeNameShcCluster := "appframework-test-volume-shc-cluster-" + testenv.RandomDNSName(3) + + // Create App framework Spec for Cluster manager with scope local and append cluster scope + + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalIdxc, s3TestDirIdxcLocal, 0) + volumeSpecCluster := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameIdxcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "s3", testenv.GetDefaultGCPRegion())} + appFrameworkSpecIdxc.VolList = append(appFrameworkSpecIdxc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameIdxcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterIdxc, s3TestDirIdxcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecIdxc.AppSources = append(appFrameworkSpecIdxc.AppSources, appSourceSpecCluster...) + + // Create App framework Spec for Search head cluster with scope local and append cluster scope + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShcLocal, enterpriseApi.ScopeLocal, appSourceNameLocalShc, s3TestDirShcLocal, 0) + volumeSpecCluster = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(appSourceVolumeNameShcCluster, testenv.GetGCPEndpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "s3", testenv.GetDefaultGCPRegion())} + + appFrameworkSpecShc.VolList = append(appFrameworkSpecShc.VolList, volumeSpecCluster...) + appSourceClusterDefaultSpec = enterpriseApi.AppSourceDefaultSpec{ + VolName: appSourceVolumeNameShcCluster, + Scope: enterpriseApi.ScopeCluster, + } + appSourceSpecCluster = []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceNameClusterShc, s3TestDirShcCluster, appSourceClusterDefaultSpec)} + appFrameworkSpecShc.AppSources = append(appFrameworkSpecShc.AppSources, appSourceSpecCluster...) + + // Create Single site Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + testcaseEnvInst.Log.Info("Deploy Single site Indexer Cluster with both Local and Cluster scope for apps installation") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameLocalIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcLocal, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + cmAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameClusterIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxcCluster, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoLocal := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameLocalShc, CrAppSourceVolumeName: appSourceVolumeNameShcLocal, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListLocal, CrAppFileList: localappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + shcAppSourceInfoCluster := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameClusterShc, CrAppSourceVolumeName: appSourceVolumeNameShcCluster, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListCluster, CrAppFileList: clusterappFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + clusterManagerBundleHash := testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + //############### UPGRADE APPS ################ + // Delete apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Redefine app lists as LDAP app isn't in V1 apps + appListLocal = appListV1[len(appListV1)/2:] + appListCluster = appListV1[:len(appListV1)/2] + + // Upload appListLocal list of V2 apps to GCP (to be used for local install) + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for local install (local scope)", appVersion)) + localappFileList = testenv.GetAppFileList(appListLocal) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShcLocal, localappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for local install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload appListCluster list of V2 apps to GCP (to be used for cluster-wide install) + clusterappFileList = testenv.GetAppFileList(appListCluster) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for cluster install (cluster scope)", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShcCluster, clusterappFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for cluster-wide install", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // ############ ENABLE MANUAL POLL ############ + + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameClusterIdxc, clusterappFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // ########## Verify Manual Poll config map disabled after the poll is triggered ################# + + // Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info("Verify config map set back to off after poll trigger for app", "version", appVersion) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //########## UPGRADE VERIFICATION ############# + cmAppSourceInfoLocal.CrAppVersion = appVersion + cmAppSourceInfoLocal.CrAppList = appListLocal + cmAppSourceInfoLocal.CrAppFileList = localappFileList + cmAppSourceInfoCluster.CrAppVersion = appVersion + cmAppSourceInfoCluster.CrAppList = appListCluster + cmAppSourceInfoCluster.CrAppFileList = clusterappFileList + shcAppSourceInfoLocal.CrAppVersion = appVersion + shcAppSourceInfoLocal.CrAppList = appListLocal + shcAppSourceInfoLocal.CrAppFileList = localappFileList + shcAppSourceInfoCluster.CrAppVersion = appVersion + shcAppSourceInfoCluster.CrAppList = appListCluster + shcAppSourceInfoCluster.CrAppFileList = clusterappFileList + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfoLocal, cmAppSourceInfoCluster, shcAppSourceInfoLocal, shcAppSourceInfoCluster} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, clusterManagerBundleHash) + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (M4) and App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4, add new apps to app source while install is in progress and have all apps installed locally on Cluster Manager and Deployer", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload big-size app to GCP for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy M4 CRD with app framework + * Verify app installation is in progress on Cluster Manager and Deployer + * Upload more apps from GCP during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + s3TestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, s3TestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Download all test apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCP for Cluster Manager + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Cluster Manager") + s3TestDirIdxc = "m4appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big-size app to GCP for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Search Head Cluster") + s3TestDirShc = "m4appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App installation is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to GCP for Cluster Manager + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to GCP for Cluster Manager") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload more apps to GCP for Deployer + testcaseEnvInst.Log.Info("Upload more apps to GCP for Deployer") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Deployer") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Verify all apps are installed on Cluster Manager + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Cluster Manager", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), cmPod, appList, true, "enabled", false, false) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + time.Sleep(60 * time.Second) + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList) + + // Verify all apps are installed on Deployer + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Deployer", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), deployerPod, appList, true, "enabled", false, false) + }) + }) + + Context("Single Site Indexer Cluster with Search Head Cluster (M4) and App Framework", func() { + It(" m4gcp, managerappframeworkm4gcp, appframeworkgcp, m4_mgr_gcp_sanity: can deploy a M4, add new apps to app source while install is in progress and have all apps installed cluster-wide", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Upload big-size app to GCP for Indexer Cluster and Search Head Cluster + * Create app sources for Cluster Manager and Deployer + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ############## VERIFICATIONS ################ + * Verify App installation is in progress on Cluster Manager and Deployer + * Upload more apps from GCP during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Cluster Manager and Deployer + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Monitoring Console", appVersion)) + s3TestDirMC := "m4appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Monitoring Console %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Prepare Monitoring Console spec with its own app source + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, s3TestDirMC, 60) + + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Download all test apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCP for Cluster Manager + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Cluster Manager") + s3TestDirIdxc = "m4appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big-size app to GCP for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big-size app to GCP for Search Head Cluster") + s3TestDirShc = "m4appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCP test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, mcName, "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App installation is in progress + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to GCP for Cluster Manager + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to GCP for Cluster Manager") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Cluster Manager") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload more apps to GCP for Deployer + testcaseEnvInst.Log.Info("Upload more apps to GCP for Deployer") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCP test directory for Deployer") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Verify all apps are installed on indexers + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + idxcPodNames := testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), indexersPerSite, true, siteCount) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on indexers", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), idxcPodNames, appList, true, "enabled", false, true) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName()+"-shc", shc.Kind, appSourceNameShc, appFileList) + + // Verify all apps are installed on Search Heads + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Search Heads", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), shcPodNames, appList, true, "enabled", false, true) + + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and reset operator pod while app install is in progress", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * While app install is in progress, restart the operator + ########## VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download all apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App installation is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgInstallPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and reset operator pod while app download is in progress", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * While app download is in progress, restart the operator + ########## VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download all apps from GCP + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App Download is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgDownloadComplete, enterpriseApi.AppPkgDownloadPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled, install an app, then disable it by using a disabled version of the app and then remove it from app source", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ########## INITIAL VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + * Disable the app + * Delete the app from s3 + * Check for repo state in App Deployment Info + */ + + //################## SETUP ################## + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATIONS ########## + idxcPodNames := testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames := testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify repo state on App to be disabled to be 1 (i.e app present on GCP bucket) + appName := appListV1[0] + appFileName := testenv.GetAppFileList([]string{appName}) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind, appSourceNameIdxc, 1, appFileName[0]) + + // Disable the app + testenv.DisableAppsToGCP(downloadDirV1, appFileName, s3TestDirIdxc) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileName) + + // Ensure Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Wait for App state to update after config file change + testenv.WaitforAppInstallState(ctx, deployment, testcaseEnvInst, idxcPodNames, testcaseEnvInst.GetName(), appName, "disabled", true) + + // Delete the file from GCP + s3Filepath := filepath.Join(s3TestDirIdxc, appFileName[0]) + err = testenv.DeleteFileOnGCP(testGcsBucket, s3Filepath) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to delete %s app on GCP test directory", appFileName)) + + // Verify repo state is set to 2 (i.e app deleted from GCP bucket) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind, appSourceNameIdxc, 2, appFileName[0]) + }) + }) + + Context("Multi Site Indexer Cluster with Search Head Cluster (M4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA, install apps via manual polling, switch to periodic polling, verify apps are not updated before the end of AppsRepoPollInterval, then updated after", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCP + * Create app source with local scope for M4 SVA, AppsRepoPollInterval=0 to set apps polling as manual + * Prepare and deploy M4 CRD with app framework and wait for pods to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify apps are installed locally on Cluster Manager and Deployer + * Verify status is 'OFF' in config map for Cluster Manager and Search Head Cluster + ######### SWITCH FROM MANUAL TO PERIODIC POLLING ############ + * Set AppsRepoPollInterval to 180 seconds for Cluster Manager and Search Head Cluster + * Change status to 'ON' in config map for Cluster Manager and Search Head Cluster + ############### UPGRADE APPS ################ + * Upgrade apps in app sources + * Wait for pods to be ready + ############ UPGRADE VERIFICATION ########## + * Verify apps are not updated before the end of AppsRepoPollInterval duration + * Verify apps are updated after the end of AppsRepoPollInterval duration + */ + + //################## SETUP #################### + // Upload V1 apps to GCP for Indexer Cluster + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, s3TestDirIdxc, 0) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, s3TestDirShc, 0) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + + // Verify status is 'OFF' in config map for Cluster Manager and Search Head Cluster + testcaseEnvInst.Log.Info("Verify status is 'OFF' in config map for Cluster Manager and Search Head Cluster") + config, _ := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterManager"], "status: off") && strings.Contains(config.Data["SearchHeadCluster"], "status: off")).To(Equal(true), "Config map update not complete") + + //######### SWITCH FROM MANUAL TO PERIODIC POLLING ############ + // Get instance of current Cluster Manager CR with latest config + cm = &enterpriseApi.ClusterManager{} + err = deployment.GetInstance(ctx, deployment.GetName(), cm) + Expect(err).To(Succeed(), "Failed to edit Cluster Manager") + + // Set AppsRepoPollInterval for Cluster Manager to 180 seconds + testcaseEnvInst.Log.Info("Set AppsRepoPollInterval for Cluster Manager to 180 seconds") + cm.Spec.AppFrameworkConfig.AppsRepoPollInterval = int64(180) + err = deployment.UpdateCR(ctx, cm) + Expect(err).To(Succeed(), "Failed to change AppsRepoPollInterval value for Cluster Manager") + + // Get instance of current Search Head Cluster CR with latest config + shc = &enterpriseApi.SearchHeadCluster{} + err = deployment.GetInstance(ctx, deployment.GetName()+"-shc", shc) + Expect(err).To(Succeed(), "Failed to edit Search Head Cluster") + + // Set AppsRepoPollInterval for Search Head Cluster to 180 seconds + testcaseEnvInst.Log.Info("Set AppsRepoPollInterval for Search Head Cluster to 180 seconds") + shc.Spec.AppFrameworkConfig.AppsRepoPollInterval = int64(180) + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Failed to change AppsRepoPollInterval value for Search Head Cluster") + + // Change status to 'ON' in config map for Cluster Manager and Search Head Cluster + testcaseEnvInst.Log.Info("Change status to 'ON' in config map for Cluster Manager") + config, err = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map") + + config.Data["ClusterManager"] = strings.Replace(config.Data["ClusterManager"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map for Cluster Manager") + + testcaseEnvInst.Log.Info("Change status to 'ON' in config map for Search Head Cluster") + config.Data["SearchHeadCluster"] = strings.Replace(config.Data["SearchHeadCluster"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map for Search Head Cluster") + + // Wait 5 seconds to be sure reconcile caused by CR update and config map update are done + testcaseEnvInst.Log.Info("Wait 5 seconds to be sure reconcile caused by CR update and config map update are done") + time.Sleep(5 * time.Second) + + // Verify status is 'ON' in config map for Cluster Manager and Search Head Cluster + testcaseEnvInst.Log.Info("Verify status is 'ON' in config map for Cluster Manager and Search Head Cluster") + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["ClusterManager"], "status: on") && strings.Contains(config.Data["SearchHeadCluster"], "status: on")).To(Equal(true), "Config map update not complete") + + //############### UPGRADE APPS ################ + // Delete V1 apps on GCP + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCP", appVersion)) + testenv.DeleteFilesOnGCP(testGcsBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## UPGRADE VERIFICATIONS ############ + testcaseEnvInst.Log.Info("Verify apps are not updated before the end of AppsRepoPollInterval duration") + appVersion = "V1" + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Wait for the end of AppsRepoPollInterval duration + testcaseEnvInst.Log.Info("Wait for the end of AppsRepoPollInterval duration") + time.Sleep(2 * time.Minute) + + testcaseEnvInst.Log.Info("Verify apps are updated after the end of AppsRepoPollInterval duration") + appVersion = "V2" + cmAppSourceInfo.CrAppVersion = appVersion + cmAppSourceInfo.CrAppList = appListV2 + cmAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + shcAppSourceInfo.CrAppVersion = appVersion + shcAppSourceInfo.CrAppList = appListV2 + shcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and update apps after app download is completed", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * While app download is in progress, restart the operator + * While app download is completed, upload new versions of the apps + ######### VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Search Heads and Indexers pods + ######### UPGRADE VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify V1 apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download all apps from GCP + appVersion := "V1" + appListV1 := []string{appListV1[0]} + appFileList := testenv.GetAppFileList(appListV1) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeLocal, appSourceNameIdxc, s3TestDirIdxc, 120) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeLocal, appSourceNameShc, s3TestDirShc, 120) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App Download is in progress on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyPending) + + // Upload V2 apps to GCP for Indexer Cluster + appVersion = "V2" + appListV2 := []string{appListV2[0]} + appFileList = testenv.GetAppFileList(appListV2) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V2 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + appVersion = "V1" + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())}, appListV1, false, "enabled", false, false) + + // Check for changes in App phase to determine if next poll has been triggered + appFileList = testenv.GetAppFileList(appListV2) + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + //############ UPGRADE VERIFICATIONS ############ + appVersion = "V2" + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("m4, integration, managerappframeworkm4, appframework: can deploy a M4 SVA and install a bigger volume of apps than the operator PV disk space", func() { + + /* Test Steps + ################## SETUP #################### + * Upload 15 apps of 100MB size each to GCP for Indexer Cluster and Search Head Cluster for cluster scope + * Create app sources for Cluster Master and Deployer with cluster scope + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + ######### INITIAL VERIFICATIONS ############# + * Verify Apps are Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied, installed on Search Heads and Indexers pods + */ + + //################## SETUP #################### + // Create a large file on Operator pod + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + err := testenv.CreateDummyFileOnOperator(ctx, deployment, opPod, testenv.AppDownloadVolume, "1G", "test_file.img") + Expect(err).To(Succeed(), "Unable to create file on operator") + filePresentOnOperator = true + + // Download apps for test + appVersion := "V1" + appList := testenv.PVTestApps + appFileList := testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsPVTestApps, downloadDirPVTestApps, appFileList) + Expect(err).To(Succeed(), "Unable to download app files") + + // Upload apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + s3TestDirIdxc := "m4appfw-idxc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search head Cluster", appVersion)) + s3TestDirShc := "m4appfw-shc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 30 + + // Create App framework Spec for C3 + appSourceNameIdxc := "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc := "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceVolumeNameIdxc := "appframework-test-volume-idxc-" + testenv.RandomDNSName(3) + appSourceVolumeNameShc := "appframework-test-volume-shc-" + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecIdxc.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + appFrameworkSpecShc.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + + // Deploy Multisite Cluster and Search Head Cluster, with App Framework enabled on Cluster Manager and Deployer + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster with Search Head Cluster") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It("integration, m4, managerappframeworkm4, appframework: can deploy a M4 SVA with App Framework enabled and delete apps from app directory when download is complete", func() { + + /* Test Steps + ################## SETUP ################## + * Upload big-size app to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework and wait for the pods to be ready + * When app download is complete, delete apps from app directory + ########## VERIFICATIONS ########## + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify bundle push is successful + * Verify apps are copied and installed on Monitoring Console and on Search Heads and Indexers pods + */ + + //################## SETUP ################## + // Download big size apps from GCP + appList := testenv.BigSingleApp + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big-size app") + + // Upload big size app to GCP for Indexer Cluster + appVersion := "V1" + testcaseEnvInst.Log.Info("Upload big size app to GCP for Indexer Cluster") + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big size to GCP test directory for Indexer Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload big size app to GCP for Search Head Cluster + testcaseEnvInst.Log.Info("Upload big size app to GCP for Search Head Cluster") + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big size to GCP test directory for Search Head Cluster") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + shReplicas := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify App Download is completed on Cluster Manager + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), cm.Kind, appSourceNameIdxc, appFileList, enterpriseApi.AppPkgPodCopyComplete, enterpriseApi.AppPkgPodCopyPending) + + //Delete apps from app directory when app download is complete + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(splcommon.AppDownloadVolume, "downloadedApps", testenvInstance.GetName(), cm.Kind, deployment.GetName(), enterpriseApi.ScopeCluster, appSourceNameIdxc, testenv.AppInfo[appList[0]]["filename"]) + err = testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + Expect(err).To(Succeed(), "Unable to delete file on pod") + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## VERIFICATIONS ########## + var idxcPodNames, shcPodNames []string + idxcPodNames = testenv.GeneratePodNameSlice(testenv.MultiSiteIndexerPod, deployment.GetName(), 1, true, siteCount) + shcPodNames = testenv.GeneratePodNameSlice(testenv.SearchHeadPod, deployment.GetName(), shReplicas, false, 1) + cmPod := []string{fmt.Sprintf(testenv.ClusterManagerPod, deployment.GetName())} + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + cmAppSourceInfo := testenv.AppSourceInfo{CrKind: cm.Kind, CrName: cm.Name, CrAppSourceName: appSourceNameIdxc, CrAppSourceVolumeName: appSourceVolumeNameIdxc, CrPod: cmPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: indexersPerSite, CrMultisite: true, CrClusterPods: idxcPodNames} + shcAppSourceInfo := testenv.AppSourceInfo{CrKind: shc.Kind, CrName: shc.Name, CrAppSourceName: appSourceNameShc, CrAppSourceVolumeName: appSourceVolumeNameShc, CrPod: deployerPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeCluster, CrAppList: appList, CrAppFileList: appFileList, CrReplicas: shReplicas, CrClusterPods: shcPodNames} + allAppSourceInfo := []testenv.AppSourceInfo{cmAppSourceInfo, shcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify no pods reset by checking the pod age + testenv.VerifyNoPodReset(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), splunkPodAge, nil) + }) + }) + + Context("Multisite Indexer Cluster with Search Head Cluster (m4) with App Framework", func() { + It(" m4gcp, managerappframeworkm4gcp, appframeworkgcp, m4_mgr_gcp_sanity: can deploy a M4 SVA with App Framework enabled, install apps and check IsDeploymentInProgress for CM and SHC CR's", func() { + + /* Test Steps + ################## SETUP ################## + * Upload V1 apps to GCP for Indexer Cluster and Search Head Cluster + * Prepare and deploy M4 CRD with app framework + * Verify IsDeploymentInProgress is set + * Wait for the pods to be ready + */ + + //################## SETUP ################## + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + + // Upload V1 apps to GCP for Indexer Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Indexer Cluster", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGcsBucket, s3TestDirIdxc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Indexer Cluster %s", appVersion, testGcsBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload V1 apps to GCP for Search Head Cluster + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP for Search Head Cluster", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGcsBucket, s3TestDirShc, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP test directory for Search Head Cluster", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for M4 + appSourceNameIdxc = "appframework-idxc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appSourceNameShc = "appframework-shc-" + enterpriseApi.ScopeCluster + testenv.RandomDNSName(3) + appFrameworkSpecIdxc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameIdxc, enterpriseApi.ScopeCluster, appSourceNameIdxc, s3TestDirIdxc, 60) + appFrameworkSpecShc := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameShc, enterpriseApi.ScopeCluster, appSourceNameShc, s3TestDirShc, 60) + + // Deploy M4 CRD + testcaseEnvInst.Log.Info("Deploy Multisite Indexer Cluster with Search Head Cluster") + siteCount := 3 + indexersPerSite := 1 + cm, _, shc, err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx, deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpecIdxc, appFrameworkSpecShc, true, "", "") + + Expect(err).To(Succeed(), "Unable to deploy Multisite Indexer Cluster and Search Head Cluster with App framework") + + // Verify IsDeploymentInProgress Flag is set to true for Cluster Master CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag for Cluster Manager") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, cm.Name, cm.Kind) + + // Ensure that the Cluster Manager goes to Ready phase + testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) + + // Verify IsDeploymentInProgress Flag is set to true for SHC CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgress Flag for SHC") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, shc.Name, shc.Kind) + + // Ensure the Indexers of all sites go to Ready phase + testenv.IndexersReady(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Indexer Cluster configured as Multisite + testenv.IndexerClusterMultisiteStatus(ctx, deployment, testcaseEnvInst, siteCount) + + // Ensure Search Head Cluster go to Ready phase + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify RF SF is met + testenv.VerifyRFSFMet(ctx, deployment, testcaseEnvInst) + }) + }) +}) diff --git a/test/appframework_gcp/s1/appframework_gcs_suite_test.go b/test/appframework_gcp/s1/appframework_gcs_suite_test.go new file mode 100644 index 000000000..af2fab4c2 --- /dev/null +++ b/test/appframework_gcp/s1/appframework_gcs_suite_test.go @@ -0,0 +1,98 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package gcps1appfw + +import ( + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/splunk/splunk-operator/test/testenv" +) + +const ( + // PollInterval specifies the polling interval + PollInterval = 5 * time.Second + + // ConsistentPollInterval is the interval to use to consistently check a state is stable + ConsistentPollInterval = 200 * time.Millisecond + ConsistentDuration = 2000 * time.Millisecond +) + +var ( + testenvInstance *testenv.TestEnv + testSuiteName = "s1appfw-" + testenv.RandomDNSName(3) + appListV1 []string + appListV2 []string + testDataGcsBucket = os.Getenv("TEST_BUCKET") + testGCSBucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + gcsAppDirV1 = testenv.AppLocationV1 + gcsAppDirV2 = testenv.AppLocationV2 + gcsPVTestApps = testenv.PVTestAppsLocation + currDir, _ = os.Getwd() + downloadDirV1 = filepath.Join(currDir, "s1appfwV1-"+testenv.RandomDNSName(4)) + downloadDirV2 = filepath.Join(currDir, "s1appfwV2-"+testenv.RandomDNSName(4)) + downloadDirPVTestApps = filepath.Join(currDir, "s1appfwPVTestApps-"+testenv.RandomDNSName(4)) +) + +// TestBasic is the main entry point +func TestBasic(t *testing.T) { + + RegisterFailHandler(Fail) + + RunSpecs(t, "Running "+testSuiteName) +} + +var _ = BeforeSuite(func() { + var err error + testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) + Expect(err).ToNot(HaveOccurred()) + + if testenv.ClusterProvider == "gcp" { + // Create a list of apps to upload to GCP + appListV1 = testenv.BasicApps + appFileList := testenv.GetAppFileList(appListV1) + + // Download V1 Apps from GCP + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") + + // Create a list of apps to upload to GCP after poll period + appListV2 = append(appListV1, testenv.NewAppsAddedBetweenPolls...) + appFileList = testenv.GetAppFileList(appListV2) + + // Download V2 Apps from GCP + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download V2 app files") + } else { + testenvInstance.Log.Info("Skipping Before Suite Setup", "Cluster Provider", testenv.ClusterProvider) + } + +}) + +var _ = AfterSuite(func() { + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + // Delete locally downloaded app files + err := os.RemoveAll(downloadDirV1) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files") + err = os.RemoveAll(downloadDirV2) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V2 app files") +}) diff --git a/test/appframework_gcp/s1/appframework_gcs_test.go b/test/appframework_gcp/s1/appframework_gcs_test.go new file mode 100644 index 000000000..6bd3ad11c --- /dev/null +++ b/test/appframework_gcp/s1/appframework_gcs_test.go @@ -0,0 +1,2030 @@ +// Copyright (c) 2018-2022 Splunk Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.s +package gcps1appfw + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + testenv "github.com/splunk/splunk-operator/test/testenv" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("s1appfw test", func() { + + var testcaseEnvInst *testenv.TestCaseEnv + var deployment *testenv.Deployment + var gcsTestDir string + var uploadedApps []string + var appSourceName string + var appSourceVolumeName string + var filePresentOnOperator bool + + ctx := context.TODO() + + BeforeEach(func() { + var err error + name := fmt.Sprintf("%s-%s", testenvInstance.GetName(), testenv.RandomDNSName(3)) + testcaseEnvInst, err = testenv.NewDefaultTestCaseEnv(testenvInstance.GetKubeClient(), name) + Expect(err).To(Succeed(), "Unable to create testcaseenv") + deployment, err = testcaseEnvInst.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + gcsTestDir = "s1appfw-" + testenv.RandomDNSName(4) + appSourceVolumeName = "appframework-test-volume-" + testenv.RandomDNSName(3) + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testcaseEnvInst.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + // Delete files uploaded to GCS + if !testcaseEnvInst.SkipTeardown { + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + } + if testcaseEnvInst != nil { + Expect(testcaseEnvInst.Teardown()).ToNot(HaveOccurred()) + } + + if filePresentOnOperator { + //Delete files from app-directory + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(testenv.AppDownloadVolume, "test_file.img") + testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + } + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("smokegcp, s1gcp, appframeworks1gcp, appframeworkgcp, s1_gcp_sanity: can deploy a Standalone instance with App Framework enabled, install apps then upgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework and wait for the pod to be ready + * Upload V1 apps to gcs for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone with app framework and wait for the pod to be ready + ############ V1 APP VERIFICATION FOR STANDALONE AND MONITORING CONSOLE ########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + ############ UPGRADE V2 APPS ########### + * Upload V2 apps to gcs App Source + ############ V2 APP VERIFICATION FOR STANDALONE AND MONITORING CONSOLE ########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + */ + + // ################## SETUP FOR MONITORING CONSOLE #################### + + // Upload V1 apps to gcs for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Monitoring Console", appVersion)) + + gcsTestDirMC := "s1appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // ################## SETUP FOR STANDALONE #################### + // Upload V1 apps to gcs for Standalone + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 5 + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + // ############ INITIAL VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############## UPGRADE APPS ################# + + // Delete apps on gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to gcs for Standalone and Monitoring Console + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone and Monitoring Console", appVersion)) + appFileList = testenv.GetAppFileList(appListV2) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ UPGRADE VERIFICATION ########### + standaloneAppSourceInfo.CrAppVersion = appVersion + standaloneAppSourceInfo.CrAppList = appListV2 + standaloneAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV2 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{standaloneAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("smokegcp, s1gcp, appframeworks1gcp, appframeworkgcp, s1_gcp_sanity: can deploy a Standalone instance with App Framework enabled, install apps then downgrade them", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V2 apps to gcs for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework and wait for the pod to be ready + * Upload V2 apps to gcs for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone with app framework and wait for the pod to be ready + ############ INITIAL VERIFICATION FOR STANDALONE AND MONITORING CONSOLE ########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + ############# DOWNGRADE APPS ################ + * Upload V1 apps on gcs + * Wait for Monitoring Console and Standalone pods to be ready + ########## DOWNGRADE VERIFICATION FOR STANDALONE AND MONITORING CONSOLE ########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + // Upload V2 apps to gcs + appVersion := "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone and Monitoring Console", appVersion)) + appFileList := testenv.GetAppFileList(appListV2) + gcsTestDir = "s1appfw-" + testenv.RandomDNSName(4) + + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Monitoring Console", appVersion)) + gcsTestDirMC := "s1appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Create App framework Spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ INITIAL VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV2, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############# DOWNGRADE APPS ################ + // Delete apps on gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + uploadedApps = nil + + // get revision number of the resource + resourceVersion := testenv.GetResourceVersion(ctx, deployment, testcaseEnvInst, mc) + + // Upload V1 apps to gcs for Standalone and Monitoring Console + appVersion = "V1" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone and Monitoring Console", appVersion)) + appFileList = testenv.GetAppFileList(appListV1) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // wait for custom resource resource version to change + testenv.VerifyCustomResourceVersionChanged(ctx, deployment, testcaseEnvInst, mc, resourceVersion) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## DOWNGRADE VERIFICATION ########### + standaloneAppSourceInfo.CrAppVersion = appVersion + standaloneAppSourceInfo.CrAppList = appListV1 + standaloneAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + mcAppSourceInfo.CrAppVersion = appVersion + mcAppSourceInfo.CrAppList = appListV1 + mcAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV1) + allAppSourceInfo = []testenv.AppSourceInfo{standaloneAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("s1gcp, smokegcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone instance with App Framework enabled, install apps, scale up, install apps on new pod, scale down", func() { + + /* Test Steps + ################## SETUP #################### + * Upload apps on gcs + * Create 2 app sources for Monitoring Console and Standalone + * Prepare and deploy Monitoring Console CRD with app framework and wait for the pod to be ready + * Prepare and deploy Standalone CRD with app framework and wait for the pod to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + ############### SCALING UP ################## + * Scale up Standalone + * Wait for Monitoring Console and Standalone to be ready + ########### SCALING UP VERIFICATION ######### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + ############## SCALING DOWN ################# + * Scale down Standalone + * Wait for Monitoring Console and Standalone to be ready + ########### SCALING DOWN VERIFICATION ####### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + // Upload V1 apps to gcs for Standalone and Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone and Monitoring Console", appVersion)) + + gcsTestDirMC := "s1appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + // testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload apps to gcs for Standalone + gcsTestDir := "s1appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to gcs test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + + //########## INITIAL VERIFICATION ############# + scaledReplicaCount := 2 + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + mcPod := []string{fmt.Sprintf(testenv.MonitoringConsolePod, deployment.GetName())} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: scaledReplicaCount} + mcAppSourceInfo := testenv.AppSourceInfo{CrKind: mc.Kind, CrName: mc.Name, CrAppSourceName: appSourceNameMC, CrAppSourceVolumeName: appSourceNameMC, CrPod: mcPod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo, mcAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + //Delete configMap Object + err = testenv.DeleteConfigMap(testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to delete ConfigMao", "ConfigMap name", ConfigMapName) + + //############### SCALING UP ################## + // Scale up Standalone instance + testcaseEnvInst.Log.Info("Scale up Standalone") + + standalone = &enterpriseApi.Standalone{} + err = deployment.GetInstance(ctx, deployment.GetName(), standalone) + Expect(err).To(Succeed(), "Failed to get instance of Standalone") + + standalone.Spec.Replicas = int32(scaledReplicaCount) + + err = deployment.UpdateCR(ctx, standalone) + Expect(err).To(Succeed(), "Failed to scale up Standalone") + + // Ensure Standalone is scaling up + testenv.VerifyStandalonePhase(ctx, deployment, testcaseEnvInst, deployment.GetName(), enterpriseApi.PhaseScalingUp) + + // Wait for Standalone to be in READY status + testenv.VerifyStandalonePhase(ctx, deployment, testcaseEnvInst, deployment.GetName(), enterpriseApi.PhaseReady) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + //########### SCALING UP VERIFICATION ######### + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + + //############## SCALING DOWN ################# + // Scale down Standalone instance + testcaseEnvInst.Log.Info("Scale down Standalone") + scaledReplicaCount = 1 + standalone = &enterpriseApi.Standalone{} + err = deployment.GetInstance(ctx, deployment.GetName(), standalone) + Expect(err).To(Succeed(), "Failed to get instance of Standalone after scaling down") + + standalone.Spec.Replicas = int32(scaledReplicaCount) + err = deployment.UpdateCR(ctx, standalone) + Expect(err).To(Succeed(), "Failed to scale down Standalone") + + // Ensure Standalone is scaling down + testenv.VerifyStandalonePhase(ctx, deployment, testcaseEnvInst, deployment.GetName(), enterpriseApi.PhaseScalingDown) + + // Wait for Standalone to be in READY status + testenv.VerifyStandalonePhase(ctx, deployment, testcaseEnvInst, deployment.GetName(), enterpriseApi.PhaseReady) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + //########### SCALING DOWN VERIFICATION ####### + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("s1gcp, integrationgcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone instance with App Framework enabled, install apps, scale up, upgrade apps", func() { + + /* Test Steps + ################## SETUP #################### + * Upload apps on gcs + * Create app source for Standalone + * Prepare and deploy Standalone CRD with app framework and wait for the pod to be ready + ########## INITIAL VERIFICATION ############# + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + ############### SCALING UP ################## + * Scale up Standalone + * Wait for Standalone to be ready + ############### UPGRADE APPS ################ + * Upload V2 apps to gcs App Source + ###### SCALING UP/UPGRADE VERIFICATIONS ##### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + // Upload V1 apps to gcs for Standalone + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone", appVersion)) + + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload apps to gcs for Standalone + gcsTestDir := "s1appfw-" + testenv.RandomDNSName(4) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to gcs test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //########## INITIAL VERIFICATION ############# + scaledReplicaCount := 2 + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList, CrReplicas: scaledReplicaCount} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + //############### SCALING UP ################## + // Scale up Standalone instance + testcaseEnvInst.Log.Info("Scale up Standalone") + + standalone = &enterpriseApi.Standalone{} + err = deployment.GetInstance(ctx, deployment.GetName(), standalone) + Expect(err).To(Succeed(), "Failed to get instance of Standalone") + + standalone.Spec.Replicas = int32(scaledReplicaCount) + + err = deployment.UpdateCR(ctx, standalone) + Expect(err).To(Succeed(), "Failed to scale up Standalone") + + // Ensure Standalone is scaling up + testenv.VerifyStandalonePhase(ctx, deployment, testcaseEnvInst, deployment.GetName(), enterpriseApi.PhaseScalingUp) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // ############## UPGRADE APPS ################# + // Delete apps on gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to gcs for Standalone and Monitoring Console + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to gcs for Standalone and Monitoring Console", appVersion)) + appFileList = testenv.GetAppFileList(appListV2) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ SCALING UP/UPGRADE VERIFICATIONS ########### + standaloneAppSourceInfo.CrAppVersion = appVersion + standaloneAppSourceInfo.CrAppList = appListV2 + standaloneAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + standaloneAppSourceInfo.CrPod = []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0), fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 1)} + allAppSourceInfo = []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + // ES App Installation not supported at the time. Will be added back at a later time. + Context("Standalone deployment (S1) with App Framework", func() { + It("s1gcp, integrationgcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone and have ES app installed", func() { + + /* Test Steps + ################## SETUP #################### + * Upload ES app to gcs + * Create App Source for Standalone + * Prepare and deploy Standalone and wait for the pod to be ready + ################## VERIFICATION ############# + * Verify ES app is installed on Standalone + */ + + //################## SETUP #################### + + // Download ES App from gcs + testcaseEnvInst.Log.Info("Download ES app from gcs") + esApp := []string{"SplunkEnterpriseSecuritySuite"} + appFileList := testenv.GetAppFileList(esApp) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download ES app") + + // Upload ES app to gcs + testcaseEnvInst.Log.Info("Upload ES app on gcs") + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload ES app to gcs test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopePremiumApps, appSourceName, gcsTestDir, 60) + appFrameworkSpec.AppSources[0].PremiumAppsProps = enterpriseApi.PremiumAppsProps{ + Type: enterpriseApi.PremiumAppsTypeEs, + EsDefaults: enterpriseApi.EsDefaults{ + SslEnablement: enterpriseApi.SslEnablementIgnore, + }, + } + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone with App framework") + + // Ensure Standalone goes to Ready phase + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ INITIAL VERIFICATION ########### + appVersion := "V1" + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: esApp, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############## UPGRADE APPS ################# + + // Delete apps on gcs + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on gcs", appVersion)) + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + uploadedApps = nil + + // Download ES App from gcs + testcaseEnvInst.Log.Info("Download updated ES app from gcs") + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download ES app") + + // Upload V2 apps to gcs for Standalone + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s Es app to gcs for Standalone and Monitoring Console", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s Es app to gcs test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ UPGRADE VERIFICATION ########### + standaloneAppSourceInfo.CrAppVersion = appVersion + standaloneAppSourceInfo.CrAppList = esApp + standaloneAppSourceInfo.CrAppFileList = testenv.GetAppFileList(esApp) + allAppSourceInfo = []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone instance with App Framework enabled and install around 350MB of apps at once", func() { + + /* Test Steps + ################## SETUP #################### + * Create app source for Standalone + * Add more apps than usual on gcs for this test + * Prepare and deploy Standalone with app framework and wait for the pod to be ready + ############### VERIFICATION ################ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + + // Creating a bigger list of apps to be installed than the default one + appList := append(appListV1, testenv.RestartNeededApps...) + appFileList := testenv.GetAppFileList(appList) + appVersion := "V1" + + // Download apps from gcs + testcaseEnvInst.Log.Info("Download bigger amount of apps from gcs for this test") + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + + Expect(err).To(Succeed(), "Unable to download apps files") + + // Upload apps to GCS + testcaseEnvInst.Log.Info("Upload bigger amount of apps to GCS for this test") + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to GCS test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############### VERIFICATION ################ + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("s1gcp, smokegcp, appframeworks1gcp, appframeworkgcp: can deploy a standalone instance with App Framework enabled for manual poll", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework and wait for the pod to be ready + * Create app source for Standalone + * Prepare and deploy Standalone with app framework(MANUAL POLL) and wait for the pod to be ready + ############### VERIFICATION ################ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + ############ UPGRADE V2 APPS ########### + * Upload V2 apps to GCS App Source + ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + * Verify Apps are not updated + ############ ENABLE MANUAL POLL ############ + * Verify Manual Poll disabled after the check + ############ V2 APP VERIFICATION FOR STANDALONE AND MONITORING CONSOLE ########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + + // Upload V1 apps to GCS for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + gcsTestDirMC := "s1appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 0) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Upload V1 apps to GCS + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 0) + + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Create Standalone Deployment with App Framework + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy standalone instance with App framework") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############### VERIFICATION ################ + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + //############### UPGRADE APPS ################ + + //Delete apps on GCS for new Apps + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + uploadedApps = nil + + //Upload new Versioned Apps to GCS + appVersion = "V2" + appFileList = testenv.GetAppFileList(appListV2) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // ############ VERIFICATION APPS ARE NOT UPDATED BEFORE ENABLING MANUAL POLL ############ + appVersion = "V1" + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ ENABLE MANUAL POLL ############ + appVersion = "V2" + testcaseEnvInst.Log.Info("Get config map for triggering manual update") + config, err := testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(err).To(Succeed(), "Unable to get config map for manual poll") + testcaseEnvInst.Log.Info("Config map data for", "Standalone", config.Data["Standalone"]) + + testcaseEnvInst.Log.Info("Modify config map to trigger manual update") + config.Data["Standalone"] = strings.Replace(config.Data["Standalone"], "off", "on", 1) + config.Data["MonitoringConsole"] = strings.Replace(config.Data["Standalone"], "off", "on", 1) + err = deployment.UpdateCR(ctx, config) + Expect(err).To(Succeed(), "Unable to update config map") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //Verify config map set back to off after poll trigger + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify config map set back to off after poll trigger for %s app", appVersion)) + config, _ = testenv.GetAppframeworkManualUpdateConfigMap(ctx, deployment, testcaseEnvInst.GetName()) + Expect(strings.Contains(config.Data["Standalone"], "status: off") && strings.Contains(config.Data["MonitoringConsole"], "status: off")).To(Equal(true), "Config map update not complete") + + //############### VERIFICATION FOR UPGRADE ################ + standaloneAppSourceInfo.CrAppVersion = appVersion + standaloneAppSourceInfo.CrAppList = appListV2 + standaloneAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: can deploy Several standalone CRs in the same namespace with App Framework enabled", func() { + + /* Test Steps + ################## SETUP #################### + * Add more apps than usual on GCS for this test + * Split the App list into 2 segments with a common apps and uncommon apps for each Standalone + * Create app source for 2 Standalones + * Prepare and deploy Standalones with app framework and wait for the pod to be ready + ############### VERIFICATION ################ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + + // Creating a list of apps to be installed on both standalone + appList1 := append(appListV1, testenv.RestartNeededApps[len(testenv.RestartNeededApps)/2:]...) + appList2 := append(appListV1, testenv.RestartNeededApps[:len(testenv.RestartNeededApps)/2]...) + appVersion := "V1" + + // Download apps from GCS + testcaseEnvInst.Log.Info("Download the extra apps from GCS for this test") + appFileList := testenv.GetAppFileList(testenv.RestartNeededApps) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps files") + + // Upload apps to GCS for first Standalone + testcaseEnvInst.Log.Info("Upload apps to GCS for 1st Standalone") + appFileListStandalone1 := testenv.GetAppFileList(appList1) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileListStandalone1, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to GCS test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Upload apps to GCS for second Standalone + testcaseEnvInst.Log.Info("Upload apps to GCS for 2nd Standalone") + gcsTestDirStandalone2 := "s1appfw-2-" + testenv.RandomDNSName(4) + appFileListStandalone2 := testenv.GetAppFileList(appList2) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirStandalone2, appFileListStandalone2, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to GCS test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Create App framework Spec + appSourceNameStandalone2 := "appframework-2-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appSourceVolumeNameStandalone2 := "appframework-test-volume-2-" + testenv.RandomDNSName(3) + appFrameworkSpecStandalone2 := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameStandalone2, enterpriseApi.ScopeLocal, appSourceNameStandalone2, gcsTestDirStandalone2, 60) + specStandalone2 := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecStandalone2, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy 1st Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy 1st Standalone instance") + testcaseEnvInst.Log.Info("Deploy 2nd Standalone") + standalone2Name := deployment.GetName() + testenv.RandomDNSName(3) + standalone2, err := deployment.DeployStandaloneWithGivenSpec(ctx, standalone2Name, specStandalone2) + Expect(err).To(Succeed(), "Unable to deploy 2nd Standalone instance") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone2, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############### VERIFICATION ################ + + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appList1, CrAppFileList: appFileListStandalone1} + standalone2Pod := []string{fmt.Sprintf(testenv.StandalonePod, standalone2Name, 0)} + standalone2AppSourceInfo := testenv.AppSourceInfo{CrKind: standalone2.Kind, CrName: standalone2Name, CrAppSourceName: appSourceNameStandalone2, CrPod: standalone2Pod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appList2, CrAppFileList: appFileListStandalone2} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo, standalone2AppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: can add new apps to app source while install is in progress and have all apps installed", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework and wait for the pod to be ready + * Upload big-size app to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone + ############## VERIFICATIONS ################ + * Verify App installation is in progress on Standalone + * Upload more apps from GCS during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Standalone + */ + + // ################## SETUP FOR MONITORING CONSOLE #################### + // Upload V1 apps to GCS for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + gcsTestDirMC := "s1appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // ################## SETUP FOR STANDALONE #################### + // Download all test apps from GCS + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList = testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCS for Standalone + appList = testenv.BigSingleApp + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload big-size app to GCS for Standalone") + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCS test directory for Standalone") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Verify App installation is in progress on Standalone + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyComplete) + + // Upload more apps to GCS for Standalone + appList = testenv.ExtraApps + appFileList = testenv.GetAppFileList(appList) + testcaseEnvInst.Log.Info("Upload more apps to GCS for Standalone") + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload more apps to GCS test directory for Standalone") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Wait for polling interval to pass + testenv.WaitForAppInstall(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Verify all apps are installed on Standalone + appList = append(testenv.BigSingleApp, testenv.ExtraApps...) + standalonePodName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) + testcaseEnvInst.Log.Info(fmt.Sprintf("Verify all apps %v are installed on Standalone", appList)) + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), []string{standalonePodName}, appList, true, "enabled", false, false) + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: Deploy a Standalone instance with App Framework enabled and reset operator pod while app install is in progress", func() { + + /* Test Steps + ################## SETUP #################### + * Upload big-size app to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone + * While app install is in progress, restart the operator + ############## VERIFICATIONS ################ + * Verify App installation is in progress on Standalone + * Upload more apps from GCS during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Standalone + */ + + // ################## SETUP FOR STANDALONE #################### + // Download all test apps from GCS + appVersion := "V1" + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCS for Standalone + testcaseEnvInst.Log.Info("Upload big-size app to GCS for Standalone") + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCS test directory for Standalone") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + ConfigMapName := enterprise.GetProbeConfigMapName(testcaseEnvInst.GetName()) + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + if err != nil { + for i := 1; i < 10; i++ { + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + if err == nil { + continue + } else { + time.Sleep(1 * time.Second) + } + } + } + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + + // Verify App installation is in progress on Standalone + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgInstallPending) + + //Delete configMap Object + err = testenv.DeleteConfigMap(testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to delete ConfigMao", "ConfigMap name", ConfigMapName) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appList, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Verify livenessProbe and readinessProbe config object and scripts############ + testcaseEnvInst.Log.Info("Get config map for livenessProbe and readinessProbe") + _, err = testenv.GetConfigMap(ctx, deployment, testcaseEnvInst.GetName(), ConfigMapName) + Expect(err).To(Succeed(), "Unable to get config map for livenessProbe and readinessProbe", "ConfigMap name", ConfigMapName) + scriptsNames := []string{enterprise.GetLivenessScriptName(), enterprise.GetReadinessScriptName(), enterprise.GetStartupScriptName()} + allPods := testenv.DumpGetPods(testcaseEnvInst.GetName()) + testenv.VerifyFilesInDirectoryOnPod(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), allPods, scriptsNames, enterprise.GetProbeMountDirectory(), false, true) + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: Deploy a Standalone instance with App Framework enabled and reset operator pod while app download is in progress", func() { + + /* Test Steps + ################## SETUP #################### + * Upload big-size app to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone + * While app download is in progress, restart the operator + ############## VERIFICATIONS ################ + * Verify App download is in progress on Standalone + * Upload more apps from GCS during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Standalone + */ + + // ################## SETUP FOR STANDALONE #################### + // Download all test apps from GCS + appVersion := "V1" + appList := append(testenv.BigSingleApp, testenv.ExtraApps...) + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload big-size app to GCS for Standalone + testcaseEnvInst.Log.Info("Upload big-size app to GCS for Standalone") + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCS test directory for Standalone") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Verify App download is in progress on Standalone + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList, enterpriseApi.AppPkgDownloadComplete, enterpriseApi.AppPkgDownloadPending) + + // Delete Operator pod while Install in progress + testenv.DeleteOperatorPod(testcaseEnvInst) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appList, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone instance with App Framework enabled, install an app then disable it and remove it from app source", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone with app framework and wait for the pod to be ready + ############ VERIFICATION########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + * Disable the app + * Delete the app from GCS + * Check for repo state in App Deployment Info + */ + + // ################## SETUP FOR STANDALONE #################### + // Upload V1 apps to GCS for Standalone + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Standalone", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 5 + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // Verify repo state on App to be disabled to be 1 (i.e app present on GCS bucket) + appName := appListV1[0] + appFileName := testenv.GetAppFileList([]string{appName}) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, standalone.Name, standalone.Kind, appSourceName, 1, appFileName[0]) + + // Disable the app + testenv.DisableAppsToGCP(downloadDirV1, appFileName, gcsTestDir) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileName) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Wait for App state to update after config file change + standalonePodName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) + testenv.WaitforAppInstallState(ctx, deployment, testcaseEnvInst, []string{standalonePodName}, testcaseEnvInst.GetName(), appName, "disabled", false) + + // Delete the file from GCS + gcsFilepath := filepath.Join(gcsTestDir, appFileName[0]) + err = testenv.DeleteFileOnGCP(testGCSBucket, gcsFilepath) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to delete %s app on GCS test directory", appFileName[0])) + + // Verify repo state is set to 2 (i.e app deleted from GCS bucket) + testenv.VerifyAppRepoState(ctx, deployment, testcaseEnvInst, standalone.Name, standalone.Kind, appSourceName, 2, appFileName[0]) + + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone instance with App Framework enabled, attempt to update using incorrect GCS credentials", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone with app framework and wait for the pod to be ready + ############ V1 APP VERIFICATION FOR STANDALONE########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + // ############ Modify secret key ########### + * Create App framework volume with random credentials and apply to Spec + * Check for changes in App phase to determine if next poll has been triggered + ############ UPGRADE V2 APPS ########### + * Upload V2 apps to GCS App Source + * Check no apps are updated as auth key is incorrect + ############ Modify secret key to correct one########### + * Apply spec with correct credentails + * Wait for the pod to be ready + ############ V2 APP VERIFICATION########### + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify no pod resets triggered due to app install + * Verify App enabled and version by running splunk cmd + */ + + // ################## SETUP FOR STANDALONE #################### + // Upload V1 apps to GCS for Standalone + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Standalone", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 5 + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + secretref := standalone.Spec.AppFrameworkConfig.VolList[0].SecretRef + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + secretStruct, err := testenv.GetSecretStruct(ctx, deployment, testcaseEnvInst.GetName(), secretref) + Expect(err).To(Succeed(), "Unable to obtain secret object") + secretData := secretStruct.Data + modifiedSecretData := map[string][]byte{"gcs_access_key": []byte(testenv.RandomDNSName(5)), "gcs_secret_key": []byte(testenv.RandomDNSName(5))} + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ INITIAL VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appListV1, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Modify secret key ########### + // Create App framework volume with invalid credentials and apply to Spec + testcaseEnvInst.Log.Info("Update Standalone spec with invalid credentials") + err = testenv.ModifySecretObject(ctx, deployment, testcaseEnvInst.GetName(), secretref, modifiedSecretData) + Expect(err).To(Succeed(), "Unable to update secret Object") + + // ############## UPGRADE APPS ################# + // Delete apps on + testcaseEnvInst.Log.Info(fmt.Sprintf("Delete %s apps on GCS", appVersion)) + testenv.DeleteFilesOnGCP(testGCSBucket, uploadedApps) + uploadedApps = nil + + // Upload V2 apps to GCS for Standalone + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Standalone", appVersion)) + appFileList = testenv.GetAppFileList(appListV2) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Check no apps are updated as auth key is incorrect + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + // ############ Modify secret key to correct one########### + // Apply spec with correct credentials + err = testenv.ModifySecretObject(ctx, deployment, testcaseEnvInst.GetName(), secretref, secretData) + Expect(err).To(Succeed(), "Unable to update secret Object") + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge = testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ UPGRADE VERIFICATION ########### + standaloneAppSourceInfo.CrAppVersion = appVersion + standaloneAppSourceInfo.CrAppList = appListV2 + standaloneAppSourceInfo.CrAppFileList = testenv.GetAppFileList(appListV2) + allAppSourceInfo = []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: Deploy a Standalone instance with App Framework enabled and update apps after app download is completed", func() { + + /* Test Steps + ################## SETUP #################### + * Upload app to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone + * While app download is completed, upload new versions of the apps + ############## VERIFICATIONS ################ + * Verify App download is in completed on Standalone + * Upload updated app to GCS as pervious app download is complete + * Verify app is installed on Standalone + ############## UPGRADE VERIFICATIONS ################ + * Wait for next poll to trigger on Standalone + * Verify all apps are installed on Standalone + */ + + // ################## SETUP FOR STANDALONE #################### + // Download test app from GCS + appVersion := "V1" + appListV1 := []string{appListV1[0]} + appFileList := testenv.GetAppFileList(appListV1) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download apps") + + // Upload apps to GCS for Standalone + testcaseEnvInst.Log.Info("Upload apps to GCS for Standalone") + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload app to GCS test directory for Standalone") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 120) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Verify App download is in progress on Standalone + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList, enterpriseApi.AppPkgInstallComplete, enterpriseApi.AppPkgPodCopyPending) + + // Upload V2 apps to GCS for Standalone + appVersion = "V2" + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s app to GCS for Standalone", appVersion)) + appFileList = testenv.GetAppFileList([]string{appListV2[0]}) + + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s app to GCS test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + //######### VERIFICATIONS ############# + appVersion = "V1" + testenv.VerifyAppInstalled(ctx, deployment, testcaseEnvInst, testcaseEnvInst.GetName(), []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)}, appListV1, false, "enabled", false, false) + + // Check for changes in App phase to determine if next poll has been triggered + testenv.WaitforPhaseChange(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############ UPGRADE VERIFICATION ########### + appVersion = "V2" + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: []string{appListV2[0]}, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: can deploy a Standalone instance and install a bigger volume of apps than the operator PV disk space", func() { + + /* Test Steps + ################## SETUP #################### + * Create a file on operator to utilize over 1G of space + * Upload file to gcs for standalone + * Create app source for Standalone with parallelDownload=15 + * Prepare and deploy Standalone with app framework and wait for the pod to be ready + ############### VERIFICATION ################ + * Verify Apps Downloaded in App Deployment Info + * Verify Apps Copied in App Deployment Info + * Verify App Package is deleted from Operator Pod + * Verify Apps Installed in App Deployment Info + * Verify App Package is deleted from Splunk Pod + * Verify App Directory in under splunk path + * Verify App enabled and version by running splunk cmd + */ + + //################## SETUP #################### + // Create a large file on Operator pod + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + err := testenv.CreateDummyFileOnOperator(ctx, deployment, opPod, testenv.AppDownloadVolume, "1G", "test_file.img") + Expect(err).To(Succeed(), "Unable to create file on operator") + filePresentOnOperator = true + + // Download apps for test + appVersion := "V1" + appList := testenv.PVTestApps + appFileList := testenv.GetAppFileList(appList) + err = testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsPVTestApps, downloadDirPVTestApps, appFileList) + Expect(err).To(Succeed(), "Unable to download app files") + + // Upload apps to GCS + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Standalone", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirPVTestApps) + Expect(err).To(Succeed(), "Unable to upload apps to GCS test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 15 + + // Create App framework Spec + appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + //############### VERIFICATION ################ + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appList, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("integrationgcp, s1gcp, appframeworks1gcp, appframeworkgcp: Deploy a Standalone instance with App Framework enabled and delete apps from app directory when app download is complete", func() { + + /* Test Steps + ################## SETUP #################### + * Upload big-size app to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone + * When app download is complete, delete apps from app directory + ############## VERIFICATIONS ################ + * Verify App installation is in progress on Standalone + * Upload more apps from GCS during bigger app install + * Wait for polling interval to pass + * Verify all apps are installed on Standalone + */ + + // ################## SETUP FOR STANDALONE #################### + // Download big size apps from GCS + appVersion := "V1" + appList := testenv.BigSingleApp + appFileList := testenv.GetAppFileList(appList) + err := testenv.DownloadFilesFromGCP(testDataGcsBucket, gcsAppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download big app") + + // Upload big-size app to GCS for Standalone + testcaseEnvInst.Log.Info("Upload big-size app to GCS for Standalone") + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload big-size app to GCS test directory for Standalone") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Verify App Download is completed on Standalone + testenv.VerifyAppState(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind, appSourceName, appFileList, enterpriseApi.AppPkgPodCopyComplete, enterpriseApi.AppPkgPodCopyPending) + + //Delete apps from app-directory when app download is complete + opPod := testenv.GetOperatorPodName(testcaseEnvInst) + podDownloadPath := filepath.Join(splcommon.AppDownloadVolume, "downloadedApps", testenvInstance.GetName(), standalone.Kind, deployment.GetName(), enterpriseApi.ScopeLocal, appSourceName, testenv.AppInfo[appList[0]]["filename"]) + err = testenv.DeleteFilesOnOperatorPod(ctx, deployment, opPod, []string{podDownloadPath}) + Expect(err).To(Succeed(), "Unable to delete file on pod") + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Get Pod age to check for pod resets later + splunkPodAge := testenv.GetPodsStartTime(testcaseEnvInst.GetName()) + + // ############ VERIFICATION ########### + standalonePod := []string{fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)} + standaloneAppSourceInfo := testenv.AppSourceInfo{CrKind: standalone.Kind, CrName: standalone.Name, CrAppSourceName: appSourceName, CrPod: standalonePod, CrAppVersion: appVersion, CrAppScope: enterpriseApi.ScopeLocal, CrAppList: appList, CrAppFileList: appFileList} + allAppSourceInfo := []testenv.AppSourceInfo{standaloneAppSourceInfo} + testenv.AppFrameWorkVerifications(ctx, deployment, testcaseEnvInst, allAppSourceInfo, splunkPodAge, "") + }) + }) + + Context("Standalone deployment (S1) with App Framework", func() { + It("smokegcp, s1gcp, appframeworks1gcp, appframeworkgcp, s1_gcp_sanity: can deploy a Standalone instance with App Framework enabled, install apps and check isDeploymentInProgress is set for Standaloen and MC CR's", func() { + + /* Test Steps + ################## SETUP #################### + * Upload V1 apps to GCS for Monitoring Console + * Create app source for Monitoring Console + * Prepare and deploy Monitoring Console with app framework + * Check isDeploymentInProgress is set for Monitoring Console CR + * Wait for the pod to be ready + * Upload V1 apps to GCS for Standalone + * Create app source for Standalone + * Prepare and deploy Standalone with app framework + * Check isDeploymentInProgress is set for Monitoring Console CR + * Wait for the pod to be ready + */ + + // ################## SETUP FOR MONITORING CONSOLE #################### + + // Upload V1 apps to GCS for Monitoring Console + appVersion := "V1" + appFileList := testenv.GetAppFileList(appListV1) + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Monitoring Console", appVersion)) + + gcsTestDirMC := "s1appfw-mc-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testGCSBucket, gcsTestDirMC, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Monitoring Console %s", appVersion, testGCSBucket)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework spec for Monitoring Console + appSourceNameMC := "appframework-" + enterpriseApi.ScopeLocal + "mc-" + testenv.RandomDNSName(3) + appSourceVolumeNameMC := "appframework-test-volume-mc-" + testenv.RandomDNSName(3) + appFrameworkSpecMC := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeNameMC, enterpriseApi.ScopeLocal, appSourceNameMC, gcsTestDirMC, 60) + mcSpec := enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + AppFrameworkConfig: appFrameworkSpecMC, + } + + // Deploy Monitoring Console + testcaseEnvInst.Log.Info("Deploy Monitoring Console") + mcName := deployment.GetName() + mc, err := deployment.DeployMonitoringConsoleWithGivenSpec(ctx, testcaseEnvInst.GetName(), mcName, mcSpec) + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console") + + // Verify IsDeploymentInProgress Flag is set to true for Monitroing Console CR + testcaseEnvInst.Log.Info("Checking isDeploymentInProgressFlag") + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, mcName, mc.Kind) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + // ################## SETUP FOR STANDALONE #################### + // Upload V1 apps to GCS for Standalone + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCS for Standalone", appVersion)) + uploadedFiles, err = testenv.UploadFilesToGCP(testGCSBucket, gcsTestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCS test directory for Standalone", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Maximum apps to be downloaded in parallel + maxConcurrentAppDownloads := 5 + + // Create App framework spec for Standalone + appSourceName = "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) + appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, gcsTestDir, 60) + appFrameworkSpec.MaxConcurrentAppDownloads = uint64(maxConcurrentAppDownloads) + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + AppFrameworkConfig: appFrameworkSpec, + } + + // Deploy Standalone + testcaseEnvInst.Log.Info("Deploy Standalone") + standalone, err := deployment.DeployStandaloneWithGivenSpec(ctx, deployment.GetName(), spec) + Expect(err).To(Succeed(), "Unable to deploy Standalone instance with App framework") + + // Verify IsDeploymentInProgress Flag is set to true for Standalone CR + testenv.VerifyIsDeploymentInProgressFlagIsSet(ctx, deployment, testcaseEnvInst, deployment.GetName(), standalone.Kind) + + // Wait for Standalone to be in READY status + testenv.StandaloneReady(ctx, deployment, deployment.GetName(), standalone, testcaseEnvInst) + + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(ctx, deployment, deployment.GetName(), mc, testcaseEnvInst) + + }) + }) + +}) diff --git a/test/deploy-gcp-cluster.sh b/test/deploy-gcp-cluster.sh new file mode 100755 index 000000000..ceb7ba515 --- /dev/null +++ b/test/deploy-gcp-cluster.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +if [[ -z "${GCP_VPC_PUBLIC_SUBNET_STRING}" ]]; then + echo "GCP PUBLIC SUBNET STRING not set. Changing to env.sh value" + export GCP_VPC_PUBLIC_SUBNET_STRING="${VPC_PUBLIC_SUBNET_STRING}" +fi + +if [[ -z "${GCP_VPC_PRIVATE_SUBNET_STRING}" ]]; then + echo "GCP PRIVATE SUBNET STRING not set. Changing to env.sh value" + export GCP_VPC_PRIVATE_SUBNET_STRING="${VPC_PRIVATE_SUBNET_STRING}" +fi + +if [[ -z "${GCR_REPOSITORY}" ]]; then + echo "GCR_REPOSITORY not set. Changing to env.sh value" + export GCR_REPOSITORY="${PRIVATE_REGISTRY}" +fi + +if [[ -z "${GKE_CLUSTER_K8_VERSION}" ]]; then + echo "GKE_CLUSTER_K8_VERSION not set. Changing to 1.29" + export GKE_CLUSTER_K8_VERSION="1.29" +fi + +function deleteCluster() { + echo "Cleanup remaining PVC on the GKE Cluster ${TEST_CLUSTER_NAME}" + tools/cleanup.sh + gcloud container clusters delete ${TEST_CLUSTER_NAME} --zone ${GCP_ZONE} --project ${GCP_PROJECT_ID} --quiet + if [ $? -ne 0 ]; then + echo "Unable to delete cluster - ${TEST_CLUSTER_NAME}" + return 1 + fi + return 0 +} + +function createCluster() { + # Deploy gcloud cluster if not deployed + rc=$(which gcloud) + if [ -z "$rc" ]; then + echo "gcloud is not installed or in the PATH. Please install gcloud from https://cloud.google.com/sdk/docs/install." + return 1 + fi + + found=$(gcloud container clusters list --filter="name=${TEST_CLUSTER_NAME}" --format="value(name)") + if [ -z "${found}" ]; then + gcloud container clusters create ${TEST_CLUSTER_NAME} \ + --num-nodes=${CLUSTER_WORKERS} \ + --zone=${GCP_ZONE} \ + --disk-size 50 \ + --network ${GCP_NETWORK} \ + --subnetwork ${GCP_SUBNETWORK} \ + --machine-type n2-standard-8 \ + --scopes "https://www.googleapis.com/auth/cloud-platform" \ + --enable-ip-alias + if [ $? -ne 0 ]; then + echo "Unable to create cluster - ${TEST_CLUSTER_NAME}" + return 1 + fi + else + echo "Retrieving kubeconfig for ${TEST_CLUSTER_NAME}" + # Cluster exists but kubeconfig may not + gcloud container clusters get-credentials ${TEST_CLUSTER_NAME} --zone ${GCP_ZONE} + fi + + echo "Logging in to GCR" + gcloud auth configure-docker + if [ $? -ne 0 ]; then + echo "Unable to configure Docker for GCR" + return 1 + fi + + # Output + echo "GKE cluster nodes:" + kubectl get nodes +} diff --git a/test/env.sh b/test/env.sh index d374cdd13..08aed7af0 100644 --- a/test/env.sh +++ b/test/env.sh @@ -2,7 +2,7 @@ : "${SPLUNK_OPERATOR_IMAGE:=splunk/splunk-operator:latest}" : "${SPLUNK_ENTERPRISE_IMAGE:=splunk/splunk:latest}" -: "${CLUSTER_PROVIDER:=kind}" +: "${CLUSTER_PROVIDER:=eks}" : "${CLUSTER_NAME:=integration-test-cluster-eks}" : "${NUM_WORKERS:=3}" : "${NUM_NODES:=2}" @@ -13,11 +13,11 @@ : "${VPC_PRIVATE_SUBNET_STRING:=}" : "${EKS_CLUSTER_K8_VERSION:=1.31}" # Below env variables required to run license master test cases -: "${ENTERPRISE_LICENSE_S3_PATH:=}" -: "${TEST_S3_BUCKET:=}" +: "${ENTERPRISE_LICENSE_S3_PATH:=/test_licenses}" +: "${TEST_S3_BUCKET:=splk-test-data-bucket}" # Below env variables required to run remote indexes test cases -: "${INDEXES_S3_BUCKET:=}" -: "${AWS_S3_REGION:=}" +: "${INDEXES_S3_BUCKET:=splk-integration-test-bucket}" +: "${AWS_S3_REGION:=us-west-2}" # Azure specific variables : "${AZURE_REGION:=}" : "${AZURE_TEST_CONTAINER:=}" @@ -31,6 +31,19 @@ : "${AZURE_STORAGE_ACCOUNT:=}" : "${AZURE_STORAGE_ACCOUNT_KEY:=}" : "${AZURE_MANAGED_ID_ENABLED:=}" +# GCP specific variables +: "${GCP_REGION:=us-west2}" +: "${GCP_TEST_CONTAINER:=}" +: "${GCP_INDEXES_CONTAINER:=}" +: "${GCP_ENTERPRISE_LICENSE_PATH:=}" +: "${GCP_RESOURCE_GROUP:=}" +: "${GCP_CONTAINER_REGISTRY:=}" +: "${GCP_CONTAINER_REGISTRY_LOGIN_SERVER:=}" +: "${GCP_CLUSTER_AGENTPOOL:=}" +: "${GCP_CLUSTER_AGENTPOOL_RG:=}" +: "${GCP_STORAGE_ACCOUNT:=}" +: "${GCP_STORAGE_ACCOUNT_KEY:=}" +: "${GCP_MANAGED_ID_ENABLED:=}" # set when operator need to be installed clusterwide : "${CLUSTER_WIDE:=false}" # Below env variable can be used to set the test cases to be run. Defaults to smoke test @@ -42,6 +55,7 @@ : "${DEBUG_RUN:=False}" # Type of deplyoment, manifest files or helm chart, possible values "manifest" or "helm" : "${DEPLOYMENT_TYPE:=manifest}" +: "${TEST_CLUSTER_PLATFORM:=eks}" # Docker registry to use to push the test images to and pull from in the cluster if [ -z "${PRIVATE_REGISTRY}" ]; then @@ -64,5 +78,13 @@ if [ -z "${PRIVATE_REGISTRY}" ]; then PRIVATE_REGISTRY="${AZURE_CONTAINER_REGISTRY_LOGIN_SERVER}" echo "${PRIVATE_REGISTRY}" ;; + gcp) + if [ -z "${GCP_CONTAINER_REGISTRY_LOGIN_SERVER}" ]; then + echo "Please define GCP_CONTAINER_REGISTRY_LOGIN_SERVER that specified where images are pushed and pulled from." + exit 1 + fi + PRIVATE_REGISTRY="${GCP_CONTAINER_REGISTRY_LOGIN_SERVER}" + echo "${PRIVATE_REGISTRY}" + ;; esac fi diff --git a/test/gcp-storageclass.yaml b/test/gcp-storageclass.yaml new file mode 100644 index 000000000..15e243c22 --- /dev/null +++ b/test/gcp-storageclass.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: default + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: kubernetes.io/gce-pd +reclaimPolicy: Delete +volumeBindingMode: Immediate \ No newline at end of file diff --git a/test/licensemanager/lm_s1_test.go b/test/licensemanager/lm_s1_test.go index 6e2f3d6ee..32227aa47 100644 --- a/test/licensemanager/lm_s1_test.go +++ b/test/licensemanager/lm_s1_test.go @@ -68,6 +68,11 @@ var _ = Describe("Licensemanager test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/licensemanager/manager_lm_c3_test.go b/test/licensemanager/manager_lm_c3_test.go index af80f1fd2..d6cf4de92 100644 --- a/test/licensemanager/manager_lm_c3_test.go +++ b/test/licensemanager/manager_lm_c3_test.go @@ -73,6 +73,11 @@ var _ = Describe("Licensemanager test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) @@ -155,6 +160,9 @@ var _ = Describe("Licensemanager test", func() { containerName := "/" + AzureDataContainer + "/" + appDirV1 err := testenv.DownloadFilesFromAzure(ctx, testenv.GetAzureEndpoint(ctx), testenv.StorageAccountKey, testenv.StorageAccount, downloadDirV1, containerName, appFileList) Expect(err).To(Succeed(), "Unable to download V1 app files") + case "gcp": + err := testenv.DownloadFilesFromGCP(testDataS3Bucket, appDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") } // Upload V1 apps @@ -171,6 +179,12 @@ var _ = Describe("Licensemanager test", func() { uploadedFiles, err := testenv.UploadFilesToAzure(ctx, testenv.StorageAccount, testenv.StorageAccountKey, downloadDirV1, testDir, appFileList) Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Azure", appVersion)) uploadedApps = append(uploadedApps, uploadedFiles...) + case "gcp": + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3", appVersion)) + testDir = "lm-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testS3Bucket, testDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to gcp", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) } // Download License File @@ -186,6 +200,11 @@ var _ = Describe("Licensemanager test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) @@ -199,6 +218,8 @@ var _ = Describe("Licensemanager test", func() { volumeSpec = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "s3", testenv.GetDefaultS3Region())} case "azure": volumeSpec = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpecAzure(volumeName, testenv.GetAzureEndpoint(ctx), testcaseEnvInst.GetIndexSecretName(), "azure", "blob")} + case "gcp": + volumeSpec = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testcaseEnvInst.GetIndexSecretName(), "gcp", "blob", testenv.GetDefaultS3Region())} } // AppSourceDefaultSpec: Remote Storage volume name and Scope of App deployment @@ -262,6 +283,8 @@ var _ = Describe("Licensemanager test", func() { case "azure": azureBlobClient := &testenv.AzureBlobClient{} azureBlobClient.DeleteFilesOnAzure(ctx, testenv.GetAzureEndpoint(ctx), testenv.StorageAccountKey, testenv.StorageAccount, uploadedApps) + case "gcp": + testenv.DeleteFilesOnGCP(testS3Bucket, uploadedApps) } uploadedApps = nil @@ -278,6 +301,9 @@ var _ = Describe("Licensemanager test", func() { containerName := "/" + AzureDataContainer + "/" + appDirV2 err := testenv.DownloadFilesFromAzure(ctx, testenv.GetAzureEndpoint(ctx), testenv.StorageAccountKey, testenv.StorageAccount, downloadDirV2, containerName, appFileList) Expect(err).To(Succeed(), "Unable to download V2 app files") + case "gcp": + err := testenv.DownloadFilesFromGCP(testDataS3Bucket, appDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download V2 app files") } // Upload V2 apps @@ -292,6 +318,11 @@ var _ = Describe("Licensemanager test", func() { uploadedFiles, err := testenv.UploadFilesToAzure(ctx, testenv.StorageAccount, testenv.StorageAccountKey, downloadDirV2, testDir, appFileList) Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Azure", appVersion)) uploadedApps = append(uploadedApps, uploadedFiles...) + case "gcp": + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testS3Bucket, testDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Gcp", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) } // Wait for the poll period for the apps to be downloaded @@ -313,6 +344,8 @@ var _ = Describe("Licensemanager test", func() { case "azure": azureBlobClient := &testenv.AzureBlobClient{} azureBlobClient.DeleteFilesOnAzure(ctx, testenv.GetAzureEndpoint(ctx), testenv.StorageAccountKey, testenv.StorageAccount, uploadedApps) + case "gcp": + testenv.DeleteFilesOnGCP(testS3Bucket, uploadedApps) } // Delete locally downloaded app files diff --git a/test/licensemanager/manager_lm_m4_test.go b/test/licensemanager/manager_lm_m4_test.go index db282dca6..7531147aa 100644 --- a/test/licensemanager/manager_lm_m4_test.go +++ b/test/licensemanager/manager_lm_m4_test.go @@ -67,6 +67,11 @@ var _ = Describe("Licensemanager test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/licensemaster/lm_c3_test.go b/test/licensemaster/lm_c3_test.go index 986bb2d1f..54dcb5a54 100644 --- a/test/licensemaster/lm_c3_test.go +++ b/test/licensemaster/lm_c3_test.go @@ -74,6 +74,11 @@ var _ = Describe("licensemaster test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) @@ -172,6 +177,12 @@ var _ = Describe("licensemaster test", func() { uploadedFiles, err := testenv.UploadFilesToAzure(ctx, testenv.StorageAccount, testenv.StorageAccountKey, downloadDirV1, testDir, appFileList) Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Azure", appVersion)) uploadedApps = append(uploadedApps, uploadedFiles...) + case "gcp": + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to GCP", appVersion)) + testDir = "lm-" + testenv.RandomDNSName(4) + uploadedFiles, err := testenv.UploadFilesToGCP(testS3Bucket, testDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to S3", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) } // Download License File @@ -187,6 +198,11 @@ var _ = Describe("licensemaster test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) @@ -200,6 +216,8 @@ var _ = Describe("licensemaster test", func() { volumeSpec = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testcaseEnvInst.GetIndexSecretName(), "aws", "s3", testenv.GetDefaultS3Region())} case "azure": volumeSpec = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpecAzure(volumeName, testenv.GetAzureEndpoint(ctx), testcaseEnvInst.GetIndexSecretName(), "azure", "blob")} + case "gcp": + volumeSpec = []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testcaseEnvInst.GetIndexSecretName(), "gcp", "blob", testenv.GetDefaultS3Region())} } // AppSourceDefaultSpec: Remote Storage volume name and Scope of App deployment @@ -280,6 +298,9 @@ var _ = Describe("licensemaster test", func() { containerName := "/" + AzureDataContainer + "/" + appDirV2 err := testenv.DownloadFilesFromAzure(ctx, testenv.GetAzureEndpoint(ctx), testenv.StorageAccountKey, testenv.StorageAccount, downloadDirV2, containerName, appFileList) Expect(err).To(Succeed(), "Unable to download V2 app files") + case "gcp": + err := testenv.DownloadFilesFromGCP(testDataS3Bucket, appDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download V2 app files") } // Upload V2 apps @@ -294,6 +315,11 @@ var _ = Describe("licensemaster test", func() { uploadedFiles, err := testenv.UploadFilesToAzure(ctx, testenv.StorageAccount, testenv.StorageAccountKey, downloadDirV2, testDir, appFileList) Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to Azure", appVersion)) uploadedApps = append(uploadedApps, uploadedFiles...) + case "gcp": + testcaseEnvInst.Log.Info(fmt.Sprintf("Upload %s apps to S3", appVersion)) + uploadedFiles, err := testenv.UploadFilesToGCP(testS3Bucket, testDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), fmt.Sprintf("Unable to upload %s apps to GCP", appVersion)) + uploadedApps = append(uploadedApps, uploadedFiles...) } // Wait for the poll period for the apps to be downloaded @@ -315,6 +341,8 @@ var _ = Describe("licensemaster test", func() { case "azure": azureBlobClient := &testenv.AzureBlobClient{} azureBlobClient.DeleteFilesOnAzure(ctx, testenv.GetAzureEndpoint(ctx), testenv.StorageAccountKey, testenv.StorageAccount, uploadedApps) + case "gcp": + testenv.DeleteFilesOnGCP(testS3Bucket, uploadedApps) } // Delete locally downloaded app files diff --git a/test/licensemaster/lm_m4_test.go b/test/licensemaster/lm_m4_test.go index 46d72126f..bba8dc3f7 100644 --- a/test/licensemaster/lm_m4_test.go +++ b/test/licensemaster/lm_m4_test.go @@ -67,6 +67,11 @@ var _ = Describe("Licensemaster test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/licensemaster/lm_s1_test.go b/test/licensemaster/lm_s1_test.go index ff8531342..dbb7dd226 100644 --- a/test/licensemaster/lm_s1_test.go +++ b/test/licensemaster/lm_s1_test.go @@ -68,6 +68,11 @@ var _ = Describe("Licensemanager test", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/run-tests.sh b/test/run-tests.sh index a5069cf83..e72757603 100755 --- a/test/run-tests.sh +++ b/test/run-tests.sh @@ -180,6 +180,31 @@ case ${CLUSTER_PROVIDER} in export STORAGE_ACCOUNT_KEY="${AZURE_STORAGE_ACCOUNT_KEY}" fi ;; + "gcp") + if [[ -z "${GCP_ENTERPRISE_LICENSE_LOCATION}" ]]; then + echo "License path not set. Changing to default" + export ENTERPRISE_LICENSE_LOCATION="${GCP_ENTERPRISE_LICENSE_LOCATION}" + fi + if [[ -z "${ENTERPRISE_LICENSE_LOCATION}" ]]; then + echo "License path not set. Changing to default" + export ENTERPRISE_LICENSE_LOCATION="${ENTERPRISE_LICENSE_S3_PATH}" + fi + + if [[ -z "${TEST_BUCKET}" ]]; then + echo "Data bucket not set. Changing to default" + export TEST_BUCKET="${TEST_S3_BUCKET}" + fi + + if [[ -z "${TEST_INDEXES_S3_BUCKET}" ]]; then + echo "Test bucket not set. Changing to default" + export TEST_INDEXES_S3_BUCKET="${INDEXES_S3_BUCKET}" + fi + + if [[ -z "${S3_REGION}" ]]; then + echo "S3 Region not set. Changing to default" + export S3_REGION="${AWS_S3_REGION}" + fi + ;; esac diff --git a/test/secret/manager_secret_c3_test.go b/test/secret/manager_secret_c3_test.go index 1c4740472..ef349f179 100644 --- a/test/secret/manager_secret_c3_test.go +++ b/test/secret/manager_secret_c3_test.go @@ -76,6 +76,11 @@ var _ = Describe("Secret Test for SVA C3", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/secret/manager_secret_m4_test.go b/test/secret/manager_secret_m4_test.go index 7e19b60d4..3e4aa8f9c 100644 --- a/test/secret/manager_secret_m4_test.go +++ b/test/secret/manager_secret_m4_test.go @@ -78,6 +78,11 @@ var _ = Describe("Secret Test for M4 SVA", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/secret/manager_secret_s1_test.go b/test/secret/manager_secret_s1_test.go index b26afbdb4..bbe7d5e9d 100644 --- a/test/secret/manager_secret_s1_test.go +++ b/test/secret/manager_secret_s1_test.go @@ -78,6 +78,11 @@ var _ = Describe("Secret Test for SVA S1", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) @@ -180,6 +185,11 @@ var _ = Describe("Secret Test for SVA S1", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/secret/secret_c3_test.go b/test/secret/secret_c3_test.go index 2375deb5a..162312fa5 100644 --- a/test/secret/secret_c3_test.go +++ b/test/secret/secret_c3_test.go @@ -76,6 +76,11 @@ var _ = Describe("Secret Test for SVA C3", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/secret/secret_m4_test.go b/test/secret/secret_m4_test.go index a56bc7ced..73ecbf922 100644 --- a/test/secret/secret_m4_test.go +++ b/test/secret/secret_m4_test.go @@ -79,6 +79,11 @@ var _ = Describe("Secret Test for M4 SVA", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/secret/secret_s1_test.go b/test/secret/secret_s1_test.go index 4e2dd919f..88277f787 100644 --- a/test/secret/secret_s1_test.go +++ b/test/secret/secret_s1_test.go @@ -78,6 +78,11 @@ var _ = Describe("Secret Test for SVA S1", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) @@ -180,6 +185,11 @@ var _ = Describe("Secret Test for SVA S1", func() { Expect(err).To(Succeed(), "Unable to download license file from Azure") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) + case "gcp": + licenseFilePath, err := testenv.DownloadLicenseFromGCPBucket() + Expect(err).To(Succeed(), "Unable to download license file from GCP") + // Create License Config Map + testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) default: fmt.Printf("Unable to download license file") testcaseEnvInst.Log.Info(fmt.Sprintf("Unable to download license file with Cluster Provider set as %v", testenv.ClusterProvider)) diff --git a/test/testenv/appframework_utils.go b/test/testenv/appframework_utils.go index 44a3f2845..9413f5302 100644 --- a/test/testenv/appframework_utils.go +++ b/test/testenv/appframework_utils.go @@ -376,8 +376,11 @@ func GenerateAppFrameworkSpec(ctx context.Context, testenvInstance *TestCaseEnv, } else { volumeSpec = []enterpriseApi.VolumeSpec{GenerateIndexVolumeSpecAzureManagedID(volumeName, GetAzureEndpoint(ctx), "azure", "blob")} } + case "gcp": + volumeSpec = []enterpriseApi.VolumeSpec{GenerateIndexVolumeSpec(volumeName, GetGCPEndpoint(), testenvInstance.GetIndexSecretName(), "gcp", "gcs", GetDefaultS3Region())} + default: - testenvInstance.Log.Info("Failed to identify cluster provider name: Should be 'eks' or 'azure' ") + testenvInstance.Log.Info("Failed to identify cluster provider name: Should be 'eks' or 'azure' or 'gcp' ") } // AppSourceDefaultSpec: Remote Storage volume name and Scope of App deployment diff --git a/test/testenv/gcputils.go b/test/testenv/gcputils.go new file mode 100644 index 000000000..e999f4116 --- /dev/null +++ b/test/testenv/gcputils.go @@ -0,0 +1,628 @@ +package testenv + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/base64" + //"encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "cloud.google.com/go/storage" + "github.com/google/uuid" + //"golang.org/x/oauth2/google" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Set GCP Variables +var ( + gcpProjectID = os.Getenv("GCP_PROJECT_ID") + gcpRegion = os.Getenv("GCP_REGION") + testGCPBucket = os.Getenv("TEST_BUCKET") + testIndexesGCPBucket = os.Getenv("TEST_INDEXES_GCP_BUCKET") + enterpriseLicenseLocationGCP = os.Getenv("ENTERPRISE_LICENSE_LOCATION") +) + +// GetSmartStoreIndexesBucket returns the SmartStore test bucket name +func GetSmartStoreIndexesBucket() string { + return testIndexesGCPBucket +} + +// GetDefaultGCPRegion returns the default GCP Region +func GetDefaultGCPRegion() string { + return gcpRegion +} + +// GetGCPEndpoint returns GCP Storage endpoint +func GetGCPEndpoint() string { + return "https://storage.googleapis.com" +} + +// GCPClient wraps the GCP Storage client +type GCPClient struct { + Client *storage.Client + Ctx context.Context +} + +// NewGCPClient initializes and returns a GCPClient +func NewGCPClient() (*GCPClient, error) { + var err error + ctx := context.Background() + var client *storage.Client + encodedString := os.Getenv("GCP_SERVICE_ACCOUNT_KEY") + gcpCredentials, err := base64.StdEncoding.DecodeString(encodedString) + if err != nil { + logf.Log.Error(err, "Error decoding GCP service account key") + return nil, err + } + + if len(gcpCredentials) == 0 { + client, err = storage.NewClient(ctx) + if err != nil { + logf.Log.Error(err, "Failed to create GCP Storage client") + return nil, err + } + + } else { + //var creds google.Credentials + + //err = json.Unmarshal([]byte(gcpCredentials), &creds) + //if err != nil { + // logf.Log.Error(err, "Secret key.json value is not parsable") + // return nil, err + //} + //client, err = storage.NewClient(ctx, option.WithCredentials(&creds)) + //client, err = storage.NewClient(ctx, option.WithCredentialsFile("/Users/vivekr/Projects/splunk-operator/auth.json")) + client, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(gcpCredentials))) + if err != nil { + logf.Log.Error(err, "Failed to create GCP Storage client") + return nil, err + } + } + + return &GCPClient{ + Client: client, + Ctx: ctx, + }, nil +} + +// CheckPrefixExistsOnGCP checks if a prefix exists in a GCP bucket +func CheckPrefixExistsOnGCP(prefix string) bool { + dataBucket := testIndexesGCPBucket + client, err := NewGCPClient() + if err != nil { + return false + } + defer client.Client.Close() + + it := client.Client.Bucket(dataBucket).Objects(client.Ctx, &storage.Query{ + Prefix: prefix, + // You can set other query parameters if needed + }) + + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + logf.Log.Error(err, "Error listing objects in GCP bucket") + return false + } + logf.Log.Info("CHECKING OBJECT", "OBJECT", objAttrs.Name) + if strings.Contains(objAttrs.Name, prefix) { + logf.Log.Info("Prefix found in bucket", "Prefix", prefix, "Object", objAttrs.Name) + return true + } + } + return false +} + +// CreateBucketAndPathIfNotExist creates a bucket and path if they do not exist +func CreateBucketAndPathIfNotExist(bucketName, path string) error { + client, err := NewGCPClient() + if err != nil { + return err + } + defer client.Client.Close() + + ctx, cancel := context.WithTimeout(client.Ctx, time.Minute*5) + defer cancel() + + // Check if the bucket exists + _, err = client.Client.Bucket(bucketName).Attrs(ctx) + if err == storage.ErrBucketNotExist { + // Create the bucket + err = client.Client.Bucket(bucketName).Create(ctx, gcpProjectID, nil) + if err != nil { + logf.Log.Error(err, "Failed to create bucket", "Bucket Name", bucketName) + return err + } + logf.Log.Info("Bucket created", "Bucket Name", bucketName) + } else if err != nil { + logf.Log.Error(err, "Error checking bucket attributes", "Bucket Name", bucketName) + return err + } + + // Check if the path exists by trying to get its attributes + _, err = client.Client.Bucket(bucketName).Object(path).Attrs(ctx) + if err == storage.ErrObjectNotExist { + // Create a zero-length object to represent the path + wc := client.Client.Bucket(bucketName).Object(path).NewWriter(ctx) + if _, err := wc.Write([]byte{}); err != nil { + logf.Log.Error(err, "Failed to create path", "Path", path) + return err + } + if err := wc.Close(); err != nil { + logf.Log.Error(err, "Failed to finalize path creation", "Path", path) + return err + } + logf.Log.Info("Path created", "Path", path) + } else if err != nil { + logf.Log.Error(err, "Error checking path attributes", "Path", path) + return err + } + + return nil +} + +// DownloadLicenseFromGCPBucket downloads the license file from GCP +func DownloadLicenseFromGCPBucket() (string, error) { + location := enterpriseLicenseLocationGCP + item := "enterprise.lic" + dataBucket := testGCPBucket + filename, err := DownloadFileFromGCP(dataBucket, item, location, ".") + return filename, err +} + +// DownloadFileFromGCP downloads a file from a GCP bucket to a local directory +func DownloadFileFromGCP(bucketName, objectName, gcpFilePath, downloadDir string) (string, error) { + // Ensure the download directory exists + if _, err := os.Stat(downloadDir); errors.Is(err, os.ErrNotExist) { + err := os.MkdirAll(downloadDir, os.ModePerm) + if err != nil { + logf.Log.Error(err, "Unable to create download directory") + return "", err + } + } + + client, err := NewGCPClient() + if err != nil { + return "", err + } + defer client.Client.Close() + + ctx, cancel := context.WithTimeout(client.Ctx, time.Minute*10) + defer cancel() + + objectPath := filepath.Join(gcpFilePath, objectName) + rc, err := client.Client.Bucket(bucketName).Object(objectPath).NewReader(ctx) + if err != nil { + logf.Log.Error(err, "Failed to create reader for object", "Object", objectName) + return "", err + } + defer rc.Close() + + localPath := filepath.Join(downloadDir, objectName) + file, err := os.Create(localPath) + if err != nil { + logf.Log.Error(err, "Failed to create local file", "Filename", localPath) + return "", err + } + defer file.Close() + + written, err := io.Copy(file, rc) + if err != nil { + logf.Log.Error(err, "Failed to download object", "Object", objectName) + return "", err + } + + logf.Log.Info("Downloaded", "filename", localPath, "bytes", written) + return localPath, nil +} + +// UploadFileToGCP uploads a file to a GCP bucket +func UploadFileToGCP(bucketName, objectName, path string, file *os.File) (string, error) { + client, err := NewGCPClient() + if err != nil { + return "", err + } + defer client.Client.Close() + + ctx, cancel := context.WithTimeout(client.Ctx, time.Minute*10) + defer cancel() + + objectPath := filepath.Join(path, objectName) + wc := client.Client.Bucket(bucketName).Object(objectPath).NewWriter(ctx) + defer wc.Close() + + written, err := io.Copy(wc, file) + if err != nil { + logf.Log.Error(err, "Failed to upload file to GCP", "Filename", objectName) + return "", err + } + + if err := wc.Close(); err != nil { + logf.Log.Error(err, "Failed to finalize upload to GCP", "Filename", objectName) + return "", err + } + + logf.Log.Info("Uploaded", "filename", file.Name(), "bytes", written) + return objectPath, nil +} + +// GetFileListOnGCP lists objects in a GCP bucket with the given prefix +func GetFileListOnGCP(bucketName, prefix string) []*storage.ObjectAttrs { + client, err := NewGCPClient() + if err != nil { + return nil + } + defer client.Client.Close() + + it := client.Client.Bucket(bucketName).Objects(client.Ctx, &storage.Query{ + Prefix: prefix, + }) + + var objects []*storage.ObjectAttrs + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + logf.Log.Error(err, "Error listing objects in GCP bucket") + return nil + } + objects = append(objects, objAttrs) + } + return objects +} + +// DeleteFilesOnGCP deletes a list of files from a GCP bucket +func DeleteFilesOnGCP(bucketName string, filenames []string) error { + client, err := NewGCPClient() + if err != nil { + return err + } + defer client.Client.Close() + + for _, file := range filenames { + err := DeleteFileOnGCP(bucketName, file) + if err != nil { + return err + } + } + return nil +} + +// DeleteFileOnGCP deletes a single file from a GCP bucket +func DeleteFileOnGCP(bucketName, objectName string) error { + client, err := NewGCPClient() + if err != nil { + return err + } + defer client.Client.Close() + + ctx, cancel := context.WithTimeout(client.Ctx, time.Minute*5) + defer cancel() + + err = client.Client.Bucket(bucketName).Object(objectName).Delete(ctx) + if err != nil && err != storage.ErrObjectNotExist { + logf.Log.Error(err, "Unable to delete object from bucket", "Object Name", objectName, "Bucket Name", bucketName) + return err + } + + // Optionally, verify deletion + _, err = client.Client.Bucket(bucketName).Object(objectName).Attrs(ctx) + if err == storage.ErrObjectNotExist { + logf.Log.Info("Deleted file on GCP", "File Name", objectName, "Bucket", bucketName) + return nil + } + if err != nil { + logf.Log.Error(err, "Error verifying deletion of object", "Object Name", objectName, "Bucket Name", bucketName) + return err + } + + return errors.New("object still exists after deletion") +} + +// GetFilesInPathOnGCP retrieves a list of file names under a given path in a GCP bucket +func GetFilesInPathOnGCP(bucketName, path string) []string { + resp := GetFileListOnGCP(bucketName, path) + var files []string + for _, obj := range resp { + logf.Log.Info("CHECKING OBJECT", "OBJECT", obj.Name) + if strings.HasPrefix(obj.Name, path) { + filename := strings.TrimPrefix(obj.Name, path) + // This condition filters out directories as GCP returns objects with their full paths + if len(filename) > 1 && !strings.HasSuffix(filename, "/") { + logf.Log.Info("File found in bucket", "Path", path, "Object", obj.Name) + files = append(files, filename) + } + } + } + return files +} + +// DownloadFilesFromGCP downloads a list of files from a GCP bucket to a local directory +func DownloadFilesFromGCP(bucketName, gcpAppDir, downloadDir string, appList []string) error { + for _, key := range appList { + logf.Log.Info("Downloading file from GCP", "File name", key) + _, err := DownloadFileFromGCP(bucketName, key, gcpAppDir, downloadDir) + if err != nil { + logf.Log.Error(err, "Unable to download file", "File Name", key) + return err + } + } + return nil +} + +// UploadFilesToGCP uploads a list of files to a specified path in a GCP bucket +func UploadFilesToGCP(bucketName, gcpTestDir string, appList []string, uploadDir string) ([]string, error) { + var uploadedFiles []string + for _, key := range appList { + logf.Log.Info("Uploading file to GCP", "File name", key) + logf.Log.Info("Using bucket", "Bucket", bucketName, "Path", gcpTestDir, "Upload Dir", uploadDir) + fileLocation := filepath.Join(uploadDir, key) + fileBody, err := os.Open(fileLocation) + if err != nil { + logf.Log.Error(err, "Unable to open file", "File name", key) + return nil, err + } + defer fileBody.Close() + + objectPath, err := UploadFileToGCP(bucketName, key, gcpTestDir, fileBody) + if err != nil { + logf.Log.Error(err, "Unable to upload file", "File name", key) + return nil, err + } + logf.Log.Info("File uploaded to GCP", "File name", objectPath) + uploadedFiles = append(uploadedFiles, objectPath) + } + return uploadedFiles, nil +} + +// DisableAppsToGCP untars apps, modifies their config files to disable them, re-tars, and uploads the disabled versions to GCP +func DisableAppsToGCP(downloadDir string, appFileList []string, gcpTestDir string) ([]string, error) { + // Create directories for untarred and disabled apps + untarredAppsMainFolder := filepath.Join(downloadDir, "untarred_apps") + disabledAppsFolder := filepath.Join(downloadDir, "disabled_apps") + + err := os.MkdirAll(untarredAppsMainFolder, os.ModePerm) + if err != nil { + logf.Log.Error(err, "Unable to create directory for untarred apps") + return nil, err + } + + err = os.MkdirAll(disabledAppsFolder, os.ModePerm) + if err != nil { + logf.Log.Error(err, "Unable to create directory for disabled apps") + return nil, err + } + + for _, key := range appFileList { + // Create a unique folder for each app to avoid conflicts + appUniqueID := uuid.New().String() + untarredCurrentAppFolder := filepath.Join(untarredAppsMainFolder, key+"_"+appUniqueID) + err := os.MkdirAll(untarredCurrentAppFolder, os.ModePerm) + if err != nil { + logf.Log.Error(err, "Unable to create folder for current app", "App", key) + return nil, err + } + + // Untar the app + tarfile := filepath.Join(downloadDir, key) + err = untarFile(tarfile, untarredCurrentAppFolder) + if err != nil { + logf.Log.Error(err, "Failed to untar app", "App", key) + return nil, err + } + + // Disable the app by modifying its config file + appConfFile := filepath.Join(untarredCurrentAppFolder, "default", "app.conf") + err = disableAppConfig(appConfFile) + if err != nil { + logf.Log.Error(err, "Failed to disable app config", "File", appConfFile) + return nil, err + } + + // Tar the disabled app + tarDestination := filepath.Join(disabledAppsFolder, key) + err = tarGzFolder(untarredCurrentAppFolder, tarDestination) + if err != nil { + logf.Log.Error(err, "Failed to tar disabled app", "App", key) + return nil, err + } + } + + // Upload disabled apps to GCP + uploadedFiles, err := UploadFilesToGCP(testIndexesGCPBucket, gcpTestDir, appFileList, disabledAppsFolder) + if err != nil { + logf.Log.Error(err, "Failed to upload disabled apps to GCP") + return nil, err + } + + return uploadedFiles, nil +} + +// untarFile extracts a tar.gz file to the specified destination +func untarFile(src, dest string) error { + file, err := os.Open(src) + if err != nil { + return err + } + defer file.Close() + + gzr, err := gzip.NewReader(file) + if err != nil { + return err + } + defer gzr.Close() + + tarReader := tar.NewReader(gzr) + + for { + header, err := tarReader.Next() + if err != nil { + return err + } + + // Sanitize the file path to prevent Zip Slip + targetPath := filepath.Join(dest, header.Name) + if !strings.HasPrefix(targetPath, filepath.Clean(dest)+string(os.PathSeparator)) { + return fmt.Errorf("invalid file path: %s", targetPath) + } + + if err == io.EOF { + break // End of archive + } + if err != nil { + return err + } + + targetPath = filepath.Join(dest, header.Name) + + switch header.Typeflag { + case tar.TypeDir: + // Create Directory + if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil { + return err + } + case tar.TypeReg: + // Create File + err := os.MkdirAll(filepath.Dir(targetPath), os.ModePerm) + if err != nil { + return err + } + outFile, err := os.Create(targetPath) + if err != nil { + return err + } + if _, err := io.Copy(outFile, tarReader); err != nil { + outFile.Close() + return err + } + outFile.Close() + // Set file permissions + if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil { + return err + } + default: + logf.Log.Info("Unknown type in tar archive", "Type", header.Typeflag, "Name", header.Name) + } + } + return nil +} + +// tarGzFolder creates a tar.gz archive from the specified folder +func tarGzFolder(sourceDir, tarGzPath string) error { + file, err := os.Create(tarGzPath) + if err != nil { + return err + } + defer file.Close() + + gzw := gzip.NewWriter(file) + defer gzw.Close() + + tw := tar.NewWriter(gzw) + defer tw.Close() + + err = filepath.Walk(sourceDir, func(filePath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Create header + header, err := tar.FileInfoHeader(info, info.Name()) + if err != nil { + return err + } + + // Update the name to maintain the folder structure + relPath, err := filepath.Rel(sourceDir, filePath) + if err != nil { + return err + } + header.Name = relPath + + // Write header + if err := tw.WriteHeader(header); err != nil { + return err + } + + // If not a regular file, skip + if !info.Mode().IsRegular() { + return nil + } + + // Open file for reading + f, err := os.Open(filePath) + if err != nil { + return err + } + defer f.Close() + + // Copy file data to tar writer + if _, err := io.Copy(tw, f); err != nil { + return err + } + + return nil + }) + + if err != nil { + return err + } + + return nil +} + +// disableAppConfig modifies the app.conf file to disable the app +func disableAppConfig(appConfFile string) error { + input, err := os.ReadFile(appConfFile) + if err != nil { + return err + } + lines := strings.Split(string(input), "\n") + var outputLines []string + inInstallSection := false + + for _, line := range lines { + trimmedLine := strings.TrimSpace(line) + if strings.HasPrefix(trimmedLine, "[install]") { + inInstallSection = true + outputLines = append(outputLines, "[install]") + outputLines = append(outputLines, "state = disabled") + continue + } + if inInstallSection { + if strings.HasPrefix(trimmedLine, "state = enabled") { + // Skip this line + continue + } + // Exit install section on encountering another section + if strings.HasPrefix(trimmedLine, "[") && strings.HasSuffix(trimmedLine, "]") { + inInstallSection = false + } + } + outputLines = append(outputLines, line) + } + + output := strings.Join(outputLines, "\n") + err = os.WriteFile(appConfFile, []byte(output), 0644) + if err != nil { + return err + } + + return nil +} diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go index 29a81e39f..b09e1c731 100644 --- a/test/testenv/testcaseenv.go +++ b/test/testenv/testcaseenv.go @@ -17,6 +17,7 @@ package testenv import ( "context" + "encoding/base64" "fmt" "os" "time" @@ -148,6 +149,8 @@ func (testenv *TestCaseEnv) setup() error { testenv.createIndexSecret() case "azure": testenv.createIndexSecretAzure() + case "gcp": + testenv.createIndexSecretGCP() default: testenv.Log.Info("Failed to create secret object") } @@ -525,6 +528,34 @@ func (testenv *TestCaseEnv) createIndexSecret() error { return nil } +// CreateIndexSecret create secret object +func (testenv *TestCaseEnv) createIndexSecretGCP() error { + secretName := testenv.s3IndexSecret + ns := testenv.namespace + encodedString := os.Getenv("GCP_SERVICE_ACCOUNT_KEY") + gcpCredentials, err := base64.StdEncoding.DecodeString(encodedString) + if err != nil { + testenv.Log.Error(err, "Unable to decode GCP service account key") + return err + } + data := map[string][]byte{"key.json": []byte(gcpCredentials)} + secret := newSecretSpec(ns, secretName, data) + if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil { + testenv.Log.Error(err, "Unable to create GCP index secret object") + return err + } + + testenv.pushCleanupFunc(func() error { + err := testenv.GetKubeClient().Delete(context.TODO(), secret) + if err != nil { + testenv.Log.Error(err, "Unable to delete GCP index secret object") + return err + } + return nil + }) + return nil +} + // createIndexSecretAzure create secret object for Azure func (testenv *TestCaseEnv) createIndexSecretAzure() error { secretName := testenv.s3IndexSecret diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index 1099314e1..a7556c1b7 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -58,7 +58,7 @@ const ( ConsistentDuration = 2000 * time.Millisecond // DefaultTimeout is the max timeout before we failed. - DefaultTimeout = 200 * time.Minute + DefaultTimeout = 2000 * time.Minute // SearchHeadPod Template String for search head pod SearchHeadPod = "splunk-%s-shc-search-head-%d" diff --git a/test/testenv/util.go b/test/testenv/util.go index a12dbd174..565d122dd 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -801,6 +801,7 @@ func GetOperatorPodName(testcaseEnvInst *TestCaseEnv) string { return splunkPods } } + logf.Log.Info("Operator pod is set to ", "operatorPod", splunkPods) return splunkPods } diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index e62d6e5b9..d2fe8e802 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -72,8 +72,7 @@ func VerifyMonitoringConsoleReady(ctx context.Context, deployment *Deployment, m } testenvInstance.Log.Info("Waiting for Monitoring Console phase to be ready", "instance", monitoringConsole.ObjectMeta.Name, "Phase", monitoringConsole.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return monitoringConsole.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -94,8 +93,7 @@ func StandaloneReady(ctx context.Context, deployment *Deployment, deploymentName } testenvInstance.Log.Info("Waiting for Standalone phase to be ready", "instance", standalone.ObjectMeta.Name, "Phase", standalone.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return standalone.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -118,8 +116,7 @@ func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenv } testenvInstance.Log.Info("Waiting for Search head cluster phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -130,8 +127,7 @@ func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenv } testenvInstance.Log.Info("Waiting for Deployer phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.DeployerPhase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return shc.Status.DeployerPhase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -142,8 +138,7 @@ func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenv } testenvInstance.Log.Info("Waiting for Search Head Cluster phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + DumpGetSplunkVersion(ctx, testenvInstance.GetName(), deployment, "-shc-") return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -167,8 +162,7 @@ func SingleSiteIndexersReady(ctx context.Context, deployment *Deployment, testen } testenvInstance.Log.Info("Waiting for indexer instance's phase to be ready", "instance", instanceName, "Phase", idc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return idc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -192,8 +186,7 @@ func ClusterManagerReady(ctx context.Context, deployment *Deployment, testenvIns } testenvInstance.Log.Info("Waiting for cluster-manager phase to be ready", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // Test ClusterManager Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -219,8 +212,7 @@ func ClusterMasterReady(ctx context.Context, deployment *Deployment, testenvInst } testenvInstance.Log.Info("Waiting for cluster-master phase to be ready", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // Test ClusterMaster Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -249,8 +241,7 @@ func IndexersReady(ctx context.Context, deployment *Deployment, testenvInstance } testenvInstance.Log.Info("Waiting for indexer site instance phase to be ready", "instance", instanceName, "Phase", idc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return idc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -340,8 +331,6 @@ func LicenseManagerReady(ctx context.Context, deployment *Deployment, testenvIns testenvInstance.Log.Info("Waiting for License Manager instance status to be ready", "instance", LicenseManager.ObjectMeta.Name, "Phase", LicenseManager.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() return LicenseManager.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -366,8 +355,6 @@ func LicenseMasterReady(ctx context.Context, deployment *Deployment, testenvInst testenvInstance.Log.Info("Waiting for License Master instance status to be ready", "instance", LicenseMaster.ObjectMeta.Name, "Phase", LicenseMaster.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() return LicenseMaster.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -482,8 +469,7 @@ func VerifySearchHeadClusterPhase(ctx context.Context, deployment *Deployment, t } testenvInstance.Log.Info("Waiting for Search Head Cluster Phase", "instance", shc.ObjectMeta.Name, "Expected", phase, "Phase", shc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseScalingUp)) } @@ -498,8 +484,7 @@ func VerifyIndexerClusterPhase(ctx context.Context, deployment *Deployment, test } testenvInstance.Log.Info("Waiting for Indexer Cluster Phase", "instance", idxc.ObjectMeta.Name, "Expected", phase, "Phase", idxc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return idxc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } @@ -514,8 +499,7 @@ func VerifyStandalonePhase(ctx context.Context, deployment *Deployment, testenvI } testenvInstance.Log.Info("Waiting for Standalone status", "instance", standalone.ObjectMeta.Name, "Expected", phase, " Actual Phase", standalone.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return standalone.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } @@ -530,8 +514,7 @@ func VerifyMonitoringConsolePhase(ctx context.Context, deployment *Deployment, t } testenvInstance.Log.Info("Waiting for Monitoring Console CR status", "instance", mc.ObjectMeta.Name, "Expected", phase, " Actual Phase", mc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return mc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } @@ -626,8 +609,7 @@ func VerifyCustomResourceVersionChanged(ctx context.Context, deployment *Deploym } testenvInstance.Log.Info("Waiting for ", kind, " CR status", "instance", name, "Not Expected", resourceVersion, " Actual Resource Version", newResourceVersion) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return newResourceVersion }, deployment.GetTimeout(), PollInterval).ShouldNot(gomega.Equal(resourceVersion)) } @@ -669,8 +651,7 @@ func VerifyClusterManagerPhase(ctx context.Context, deployment *Deployment, test } testenvInstance.Log.Info("Waiting for cluster-manager Phase", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase, "Expected", phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // Test ClusterManager Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) @@ -686,8 +667,7 @@ func VerifyClusterMasterPhase(ctx context.Context, deployment *Deployment, teste } testenvInstance.Log.Info("Waiting for cluster-manager Phase", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase, "Expected", phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // Test ClusterManager Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) @@ -1051,8 +1031,7 @@ func VerifyClusterManagerBundlePush(ctx context.Context, deployment *Deployment, return false } clusterPodNames := DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + for _, podName := range clusterPodNames { if strings.Contains(podName, "-indexer-") { if _, present := clusterManagerBundleStatus[podName]; present { @@ -1077,16 +1056,14 @@ func VerifyDeployerBundlePush(ctx context.Context, deployment *Deployment, teste if len(deployerAppPushStatus) == 0 { testenvInstance.Log.Info("Bundle push not complete on all pods") DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return false } for appName, val := range deployerAppPushStatus { if val < replicas { testenvInstance.Log.Info("Bundle push not complete on all pods for", "AppName", appName) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + return false } } @@ -1102,6 +1079,9 @@ func VerifyNoPodReset(ctx context.Context, deployment *Deployment, testenvInstan // Get current Age on all splunk pods and compare with previous currentSplunkPodAge := GetPodsStartTime(ns) for podName, currentpodAge := range currentSplunkPodAge { + if strings.Contains(podName, "monitoring-console") { + continue + } // Only compare if the pod was present in previous pod iteration testenvInstance.Log.Info("Checking Pod reset for Pod Name", "PodName", podName, "Current Pod Age", currentpodAge) if _, ok := podStartTimeMap[podName]; ok {