From bed64018335241b4ca59fe03829295eb170a14af Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 14 Jun 2023 14:28:44 -0700 Subject: [PATCH 01/85] Added changeAnnotation method --- pkg/splunk/enterprise/licensemanager.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 83a1048dd..79f3bad35 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -26,6 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -222,3 +223,7 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } + +func changeAnnotations(ctx context.Context, c client.Client, meta metav1.ObjectMeta) error { + return nil +} From 7de6f36459a187208acc142d6a5e07452761da99 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 10:28:17 -0700 Subject: [PATCH 02/85] Refined changeClusterManagerAnnotations --- pkg/splunk/enterprise/licensemanager.go | 60 ++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 79f3bad35..b28bbdeac 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -179,6 +179,11 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, if !result.Requeue { result.RequeueAfter = 0 } + + err = changeClusterManagerAnnotations(ctx, client, cr) + if err != nil { + return result, err + } return result, nil } @@ -224,6 +229,57 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } -func changeAnnotations(ctx context.Context, c client.Client, meta metav1.ObjectMeta) error { +// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { + +// namespacedName := types.NamespacedName{ +// Namespace: cr.GetNamespace(), +// Name: cr.Spec.ClusterManagerRef.Name, +// } +// clusterManagerInstance := &enterpriseApi.ClusterManager{} +// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) +// if err != nil && k8serrors.IsNotFound(err) { +// return false, nil +// } +// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { +// return true, nil +// } + +// return true, err + +// } + +// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + return nil + } From f4453a178703e3196095b9eb6c64836cc47d2c4b Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 15 Jun 2023 15:05:56 -0700 Subject: [PATCH 03/85] test case for upgrade scenario --- kuttl/kuttl-test-helm-upgrade.yaml | 10 + .../upgrade/c3-with-operator/00-assert.yaml | 9 + .../c3-with-operator/00-install-c3.yaml | 6 + .../upgrade/c3-with-operator/01-assert.yaml | 100 +++++++++ .../upgrade/c3-with-operator/02-assert.yaml | 17 ++ .../upgrade/c3-with-operator/03-assert.yaml | 17 ++ .../03-upgrade-splunk-image.yaml | 6 + .../upgrade/c3-with-operator/04-assert.yaml | 196 ++++++++++++++++++ .../upgrade/c3-with-operator/c3_config.yaml | 50 +++++ 9 files changed, 411 insertions(+) create mode 100644 kuttl/kuttl-test-helm-upgrade.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/00-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/01-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/02-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/04-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/c3_config.yaml diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml new file mode 100644 index 000000000..d8ecc7336 --- /dev/null +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -0,0 +1,10 @@ +# Entrypoint for helm automation +apiVersion: kuttl.dev/v1beta1 +kind: TestSuite +testDirs: +- ./kuttl/tests/upgrade +parallel: 3 +timeout: 5000 +startKIND: false +artifactsDir: kuttl-artifacts +kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml new file mode 100644 index 000000000..142b71272 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/00-assert.yaml @@ -0,0 +1,9 @@ +--- +# assert for splunk operator deployment to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml new file mode 100644 index 000000000..d5a3330d1 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml new file mode 100644 index 000000000..dce36af8b --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -0,0 +1,100 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml new file mode 100644 index 000000000..84b4ee495 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -0,0 +1,17 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..84b4ee495 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,17 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml new file mode 100644 index 000000000..a11eefac7 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml new file mode 100644 index 000000000..9938285c4 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -0,0 +1,196 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-cm-cluster-manager-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-deployer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml new file mode 100644 index 000000000..fd00ad06d --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml @@ -0,0 +1,50 @@ +splunk-operator: + enabled: true + splunkOperator: + clusterWideAccess: false + +sva: + c3: + enabled: true + + clusterManager: + name: cm + + indexerClusters: + - name: idxc + + searchHeadClusters: + - name: shc + + +indexerCluster: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + + serviceTemplate: + spec: + type: LoadBalancer + +clusterManager: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" + +searchHeadCluster: + enabled: true + + additionalLabels: + label: "true" + + additionalAnnotations: + annotation: "true" From 32c385daec524b058f55ca16e4cea1dde8d6ea66 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 21 Jun 2023 10:31:37 -0700 Subject: [PATCH 04/85] Modified kuttl cases --- Makefile | 2 +- kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../c3-with-operator/00-install-c3.yaml | 2 +- .../upgrade/c3-with-operator/01-assert.yaml | 93 +-------- ...mage.yaml => 01-upgrade-splunk-image.yaml} | 0 .../upgrade/c3-with-operator/02-assert.yaml | 17 -- .../upgrade/c3-with-operator/03-assert.yaml | 17 -- .../upgrade/c3-with-operator/04-assert.yaml | 196 ------------------ pkg/splunk/enterprise/licensemanager.go | 19 -- 9 files changed, 4 insertions(+), 344 deletions(-) rename kuttl/tests/upgrade/c3-with-operator/{03-upgrade-splunk-image.yaml => 01-upgrade-splunk-image.yaml} (100%) delete mode 100644 kuttl/tests/upgrade/c3-with-operator/02-assert.yaml delete mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml delete mode 100644 kuttl/tests/upgrade/c3-with-operator/04-assert.yaml diff --git a/Makefile b/Makefile index dd59513ae..aef47f310 100644 --- a/Makefile +++ b/Makefile @@ -137,7 +137,7 @@ build: setup/ginkgo manifests generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go -docker-build: test ## Build docker image with the manager. +docker-build: #test ## Build docker image with the manager. docker build -t ${IMG} . docker-push: ## Push docker image with the manager. diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index d8ecc7336..a152a8423 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 5000 +timeout: 500 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml index d5a3330d1..a10c31557 100644 --- a/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/00-install-c3.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} + - command: helm install splunk-c3 $HELM_REPO_PATH/splunk-enterprise -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_IMAGE} --namespace ${NAMESPACE} namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml index dce36af8b..4b09ebf54 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -6,95 +6,4 @@ metadata: name: splunk-operator-controller-manager status: readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 + availableReplicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml similarity index 100% rename from kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml rename to kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml deleted file mode 100644 index 84b4ee495..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful sets and replicas updated -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml deleted file mode 100644 index 84b4ee495..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful sets and replicas updated -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml deleted file mode 100644 index 9938285c4..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml +++ /dev/null @@ -1,196 +0,0 @@ ---- -# assert for splunk operator pod to be ready -apiVersion: apps/v1 -kind: Deployment -metadata: - name: splunk-operator-controller-manager -status: - readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-cm-cluster-manager-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-deployer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true \ No newline at end of file diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index b28bbdeac..06ca95316 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -229,25 +229,6 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } -// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { - -// namespacedName := types.NamespacedName{ -// Namespace: cr.GetNamespace(), -// Name: cr.Spec.ClusterManagerRef.Name, -// } -// clusterManagerInstance := &enterpriseApi.ClusterManager{} -// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) -// if err != nil && k8serrors.IsNotFound(err) { -// return false, nil -// } -// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { -// return true, nil -// } - -// return true, err - -// } - // changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { From 8efb4565cabd2723d1b7400c919bb5c1cf8cdd14 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 10:59:44 -0700 Subject: [PATCH 05/85] Added kuttl tests; Updated LicenseMaster --- env.sh | 8 ++++ kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../01-upgrade-splunk-image.yaml | 2 +- .../upgrade/c3-with-operator/02-assert.yaml | 24 +++++++++++ .../upgrade/c3-with-operator/03-assert.yaml | 40 +++++++++++++++++++ .../upgrade/c3-with-operator/04-assert.yaml | 24 +++++++++++ .../c3-with-operator/05-uninstall-c3.yaml | 5 +++ pkg/splunk/enterprise/licensemaster.go | 37 +++++++++++++++++ 8 files changed, 140 insertions(+), 2 deletions(-) create mode 100755 env.sh create mode 100644 kuttl/tests/upgrade/c3-with-operator/02-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/04-assert.yaml create mode 100644 kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml diff --git a/env.sh b/env.sh new file mode 100755 index 000000000..5a20de2e2 --- /dev/null +++ b/env.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export NAMESPACE=test +export HELM_REPO_PATH=../../../../helm-chart +export KUTTL_SPLUNK_OPERATOR_IMAGE=docker.io/tgarg1701/splunk-operator:2.4.0 +export KUTTL_SPLUNK_ENTERPRISE_IMAGE=docker.io/splunk/splunk:9.0.3-a2 +export KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE=docker.io/splunk/splunk:9.0.5 +export AWS_DEFAULT_REGION=us-west-2 diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index a152a8423..d8ecc7336 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 500 +timeout: 5000 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml index a11eefac7..f5689caa5 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-upgrade-splunk-image.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} --namespace ${NAMESPACE} namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml new file mode 100644 index 000000000..731366343 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -0,0 +1,24 @@ +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..c3c560798 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,40 @@ +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml new file mode 100644 index 000000000..4d5aadaf4 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -0,0 +1,24 @@ +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml new file mode 100644 index 000000000..0a24c9a2d --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-c3 -n --namespace ${NAMESPACE} + namespaced: true diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 8ff920be8..3c3506886 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -225,3 +226,39 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } + +// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} + err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterMasterInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image + + clusterMasterInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterMasterInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From 3c75c998b8aa555156e6df3323d625b61b3b9bef Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 14:53:38 -0700 Subject: [PATCH 06/85] Fixed uninstall kuttl test --- kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml index 0a24c9a2d..abb75c68d 100644 --- a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -1,5 +1,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm uninstall splunk-c3 -n --namespace ${NAMESPACE} + - command: helm uninstall splunk-c3--namespace ${NAMESPACE} namespaced: true From ee474fc417ecb984454b6e6b32223b5ebf5466ba Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 16:40:45 -0700 Subject: [PATCH 07/85] Fixed unit test --- pkg/splunk/enterprise/clustermanager.go | 35 ++++++++++++++++++ pkg/splunk/enterprise/licensemanager.go | 37 -------------------- pkg/splunk/enterprise/licensemanager_test.go | 14 +++++++- 3 files changed, 48 insertions(+), 38 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index d45206475..0967620bb 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -31,6 +31,7 @@ import ( splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -434,3 +435,37 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, } return extraEnv, err } + +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 06ca95316..828a169d5 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -26,7 +26,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -228,39 +227,3 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } - -// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image - - clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index dbdaf153c..a476e202d 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -57,6 +57,7 @@ func TestApplyLicenseManager(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-stack1-license-manager-secret-v1"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, + {MetaName: "*v4.ClusterManager-test-"}, {MetaName: "*v4.LicenseManager-test-stack1"}, {MetaName: "*v4.LicenseManager-test-stack1"}, } @@ -73,7 +74,7 @@ func TestApplyLicenseManager(t *testing.T) { {ListOpts: listOpts}} createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} + updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12], funcCalls[13]} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} current := enterpriseApi.LicenseManager{ TypeMeta: metav1.TypeMeta{ @@ -719,6 +720,17 @@ func TestLicenseManagerList(t *testing.T) { } } +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + lm := enterpriseApi.LicenseManager{} + + client := spltest.NewMockClient() + err := changeClusterManagerAnnotations(ctx, client, &lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } +} + func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} From 35a2eb06ed71fb2e9a53cdd0cd253f4d69a94aee Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 14:40:57 -0700 Subject: [PATCH 08/85] Removed changeAnnotation from licenseMaster --- pkg/splunk/enterprise/clustermanager.go | 2 ++ pkg/splunk/enterprise/licensemaster.go | 37 ------------------------- 2 files changed, 2 insertions(+), 37 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 0967620bb..5d6dd6e1d 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -436,6 +436,8 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } +// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { namespacedName := types.NamespacedName{ diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 3c3506886..8ff920be8 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,7 +25,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -226,39 +225,3 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } - -// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} - err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterMasterInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image - - clusterMasterInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterMasterInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} From d9540fd7bf62dd9abbdb5ae36fa10c5c2ac2cf55 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 15:11:11 -0700 Subject: [PATCH 09/85] Added branch in int-tests --- .github/workflows/helm-test-workflow.yml | 1 + .github/workflows/int-test-workflow.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index e68dc44d7..d2e9b7aff 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,6 +2,7 @@ name: Helm Test WorkFlow on: push: branches: + - CSPL-2094-LM-upgrade-strategy - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 3dd4eed22..25a85105a 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,6 +2,7 @@ name: Integration Test WorkFlow on: push: branches: + - CSPL-2094-LM-upgrade-strategy - develop - main - feature** From 5a17a5ffe71d54d75d5c97e2e53fddde0b98e7e3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 16:42:05 -0700 Subject: [PATCH 10/85] Completed code coverage tests --- pkg/splunk/enterprise/clustermanager.go | 12 +++- pkg/splunk/enterprise/clustermanager_test.go | 61 ++++++++++++++++++++ pkg/splunk/enterprise/licensemanager_test.go | 11 ---- 3 files changed, 70 insertions(+), 14 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 5d6dd6e1d..7d9db1a62 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -436,19 +436,24 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// changeClusterMasterAnnotations updates the checkUpdateImage field of the Cluster Master Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name, } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + err := client.Get(ctx, namespacedName, clusterManagerInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + + // fetch and check the annotation fields of the ClusterManager annotations := clusterManagerInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -459,12 +464,13 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr } } + // create/update the checkUpdateImage annotation field annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image clusterManagerInstance.SetAnnotations(annotations) err = client.Update(ctx, clusterManagerInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 190e23f75..6c994f00b 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1384,6 +1384,67 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + lm := &enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lm", + Namespace: "default", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test-cm", + }, + }, + }, + } + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "default", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + cm.Spec.Image = "splunk/splunk:latest" + + client := spltest.NewMockClient() + + client.Create(ctx, lm) + client.Create(ctx, cm) + + err := changeClusterManagerAnnotations(ctx, client, lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, clusterManager) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + annotations := clusterManager.GetAnnotations() + if annotations["checkUpdateImage"] != cm.Spec.Image { + t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + } + +} + func TestClusterManagerWitReadyState(t *testing.T) { // create directory for app framework newpath := filepath.Join("/tmp", "appframework") diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index a476e202d..8c7d597c9 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -720,17 +720,6 @@ func TestLicenseManagerList(t *testing.T) { } } -func TestChangeClusterManagerAnnotations(t *testing.T) { - ctx := context.TODO() - lm := enterpriseApi.LicenseManager{} - - client := spltest.NewMockClient() - err := changeClusterManagerAnnotations(ctx, client, &lm) - if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) - } -} - func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} From 706dc96aa9c9a323fe333a940bc89ec6ed534605 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 13:22:58 -0700 Subject: [PATCH 11/85] Added upgradeScenario and related methods for CM --- pkg/splunk/enterprise/clustermanager.go | 41 +++++++++++++++++++++++++ pkg/splunk/enterprise/licensemanager.go | 9 ++++++ 2 files changed, 50 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 7d9db1a62..dad71072b 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -178,6 +178,14 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } + checkUpgradeReady, err := upgradeScenario(ctx, client, cr) + if err != nil { + return result, err + } + if !checkUpgradeReady { + return result, err + } + clusterManagerManager := splctrl.DefaultStatefulSetPodManager{} phase, err := clusterManagerManager.Update(ctx, client, statefulSet, 1) if err != nil { @@ -475,5 +483,38 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr } return nil +} + +func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { + + licenseManagerRef := cr.Spec.LicenseManagerRef + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} + + // create new object + licenseManager := &enterpriseApi.LicenseManager{} + + // get the license manager referred in cluster manager + err := c.Get(ctx, namespacedName, licenseManager) + if err != nil { + return false, err + } + + lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) + cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + + if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { + return true, nil + } + + return false, nil +} + +func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { + statefulSet, err := getClusterManagerStatefulSet(ctx, c, cr) + if err != nil { + return "", err + } + image := statefulSet.Spec.Template.Spec.InitContainers[0].Image + return image, nil } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 828a169d5..a274e4914 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -227,3 +227,12 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } +func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { + statefulSet, err := getLicenseManagerStatefulSet(ctx, c, cr) + if err != nil { + return "", err + } + image := statefulSet.Spec.Template.Spec.InitContainers[0].Image + + return image, nil +} From c5af670586bf74ce306ffcbbeb4579d1511d9c7b Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 15:43:57 -0700 Subject: [PATCH 12/85] Added label selectors to get Current Image --- pkg/splunk/enterprise/clustermanager.go | 40 +++++++++++++++++++++++-- pkg/splunk/enterprise/licensemanager.go | 38 +++++++++++++++++++++-- 2 files changed, 72 insertions(+), 6 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index dad71072b..53494167e 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -32,7 +33,10 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -510,11 +514,41 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente } func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { - statefulSet, err := getClusterManagerStatefulSet(ctx, c, cr) + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) if err != nil { return "", err } - image := statefulSet.Spec.Template.Spec.InitContainers[0].Image + labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) + if err != nil { + return "", err + } + + statefulsetPods := &corev1.PodList{} + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + rclient.MatchingLabelsSelector{Selector: labelSelector}, + } + + err = c.List(ctx, statefulsetPods, opts...) + if err != nil { + return "", err + } + + for _, v := range statefulsetPods.Items { + for _, container := range v.Spec.Containers { + if strings.Contains(container.Name, "splunk") { + image := container.Image + return image, nil + } + + } + } - return image, nil + return "", nil } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index a274e4914..b81241892 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -26,8 +27,10 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -228,11 +231,40 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { - statefulSet, err := getLicenseManagerStatefulSet(ctx, c, cr) + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil { + return "", err + } + labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) if err != nil { return "", err } - image := statefulSet.Spec.Template.Spec.InitContainers[0].Image - return image, nil + statefulsetPods := &corev1.PodList{} + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + rclient.MatchingLabelsSelector{Selector: labelSelector}, + } + + err = c.List(ctx, statefulsetPods, opts...) + if err != nil { + return "", err + } + + for _, v := range statefulsetPods.Items { + for _, container := range v.Spec.Containers { + if strings.Contains(container.Name, "splunk") { + image := container.Image + return image, nil + } + + } + } + + return "", nil } From 237ecdf7d3e3dd7e57ea0b855b4091833f0ad0ba Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 16:24:18 -0700 Subject: [PATCH 13/85] Changed pod.Spec to pod.Status --- pkg/splunk/enterprise/clustermanager.go | 2 +- pkg/splunk/enterprise/licensemanager.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 53494167e..44fb43f60 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -541,7 +541,7 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl } for _, v := range statefulsetPods.Items { - for _, container := range v.Spec.Containers { + for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { image := container.Image return image, nil diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index b81241892..3f99e56de 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -257,7 +257,7 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl } for _, v := range statefulsetPods.Items { - for _, container := range v.Spec.Containers { + for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { image := container.Image return image, nil From 5966d8726df92f7b1652e4fe8d0b8b9132149078 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 21 Jun 2023 12:17:17 -0700 Subject: [PATCH 14/85] Added changeAnnotations for MC --- pkg/splunk/enterprise/clustermanager.go | 5 +++ pkg/splunk/enterprise/monitoringconsole.go | 36 ++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 44fb43f60..6cb60e281 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -506,6 +506,7 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + // check conditions for upgrade if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { return true, nil } @@ -513,6 +514,8 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente return false, nil } +// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, +// returns the image, and error if something goes wring func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { namespacedName := types.NamespacedName{ @@ -529,6 +532,7 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", err } + // get a list of all pods in the namespace with matching labels as the statefulset statefulsetPods := &corev1.PodList{} opts := []rclient.ListOption{ rclient.InNamespace(cr.GetNamespace()), @@ -540,6 +544,7 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", err } + // find the container with the phrase 'splunk' in it for _, v := range statefulsetPods.Items { for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 8979877de..a1fb3e2b9 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -355,3 +355,39 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } } + +// changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.MonitoringConsoleRef.Name, + } + monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} + err := client.Get(context.TODO(), namespacedName, monitoringConsoleInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := monitoringConsoleInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == monitoringConsoleInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = monitoringConsoleInstance.Spec.Image + + monitoringConsoleInstance.SetAnnotations(annotations) + err = client.Update(ctx, monitoringConsoleInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From b827bd9fe73d4ecacc6ebfd459fa0540821e4dee Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 14:27:00 -0700 Subject: [PATCH 15/85] Added kuttl test cases --- env.sh | 7 ------- kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 | 5 +++++ 2 files changed, 5 insertions(+), 7 deletions(-) mode change 100755 => 100644 env.sh create mode 100644 kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 diff --git a/env.sh b/env.sh old mode 100755 new mode 100644 index 5a20de2e2..f1f641af1 --- a/env.sh +++ b/env.sh @@ -1,8 +1 @@ #!/usr/bin/env bash - -export NAMESPACE=test -export HELM_REPO_PATH=../../../../helm-chart -export KUTTL_SPLUNK_OPERATOR_IMAGE=docker.io/tgarg1701/splunk-operator:2.4.0 -export KUTTL_SPLUNK_ENTERPRISE_IMAGE=docker.io/splunk/splunk:9.0.3-a2 -export KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE=docker.io/splunk/splunk:9.0.5 -export AWS_DEFAULT_REGION=us-west-2 diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 new file mode 100644 index 000000000..95f8297ca --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} + namespaced: true \ No newline at end of file From a1159a80f5634b45ff56084719ac6e82c0329f31 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 11:03:00 -0700 Subject: [PATCH 16/85] Fixed unit test --- pkg/splunk/enterprise/clustermanager.go | 8 +++++++- pkg/splunk/enterprise/clustermanager_test.go | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 6cb60e281..6fffb032b 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -500,11 +500,17 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente // get the license manager referred in cluster manager err := c.Get(ctx, namespacedName, licenseManager) if err != nil { - return false, err + return true, nil } lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) + if err != nil { + return false, err + } cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + if err != nil { + return false, err + } // check conditions for upgrade if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 6c994f00b..2bb8d83ab 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -66,6 +66,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -81,6 +82,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, From 3aa032b210e55268189ba91caa10c135e6f5850a Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 11:23:02 -0700 Subject: [PATCH 17/85] Fixed SmartStore unit test --- pkg/splunk/enterprise/clustermanager_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 2bb8d83ab..4ff101c9c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -498,6 +498,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, @@ -519,6 +520,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, From f0e73c8e8bac371a6401c5c50325b7fa9df7503e Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 13:32:02 -0700 Subject: [PATCH 18/85] Added code coverage test --- pkg/splunk/enterprise/clustermanager_test.go | 146 ++++++++++++++----- 1 file changed, 112 insertions(+), 34 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 4ff101c9c..4c5c1c15f 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1388,65 +1388,143 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } -func TestChangeClusterManagerAnnotations(t *testing.T) { +func TestUpgradeScenario(t *testing.T) { + ctx := context.TODO() - lm := &enterpriseApi.LicenseManager{ + cm := enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-lm", - Namespace: "default", + Name: "stack1", + Namespace: "test", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "clustermanager", + }, + } + lm := enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "LicenseManager", + }, + } + fmt.Println(ctx, cm, lm) + +} + +func TestGetClusterManagerCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", }, - Spec: enterpriseApi.LicenseManagerSpec{ + Spec: enterpriseApi.ClusterManagerSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: enterpriseApi.Spec{ ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ - Name: "test-cm", + MonitoringConsoleRef: corev1.ObjectReference{ + Name: "mcName", }, }, }, } - cm := &enterpriseApi.ClusterManager{ + replicas := int32(1) + statefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "default", + Name: "splunk-test-cluster-manager", + Namespace: "test", }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", + Spec: appsv1.StatefulSetSpec{ + ServiceName: "splunk-test-cluster-manager-headless", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, }, - Volumes: []corev1.Volume{}, }, + Replicas: &replicas, }, } - cm.Spec.Image = "splunk/splunk:latest" - + matchlabels := map[string]string{ + "app": "test", + "tier": "splunk", + } + statefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: matchlabels, + } + service := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cluster-manager-headless", + Namespace: "test", + }, + } + // current.Spec.Image = "splunk/test" client := spltest.NewMockClient() + err := client.Create(ctx, service) + err = client.Create(ctx, statefulset) + err = client.Create(ctx, ¤t) + _, err = ApplyClusterManager(ctx, client, ¤t) + fmt.Println(err) - client.Create(ctx, lm) - client.Create(ctx, cm) - - err := changeClusterManagerAnnotations(ctx, client, lm) - if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) - } - clusterManager := &enterpriseApi.ClusterManager{} - namespacedName := types.NamespacedName{ - Name: cm.Name, - Namespace: cm.Namespace, + stpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cluster-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, } - err = client.Get(ctx, namespacedName, clusterManager) + // simulate create stateful set + err = client.Create(ctx, stpod) if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() } - annotations := clusterManager.GetAnnotations() - if annotations["checkUpdateImage"] != cm.Spec.Image { - t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + // update statefulset + stpod.Status.Phase = corev1.PodRunning + stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, } - + err = client.Status().Update(ctx, stpod) + if err != nil { + t.Errorf("Unexpected update statefulset %v", err) + debug.PrintStack() + } + image, err := getClusterManagerCurrentImage(ctx, client, ¤t) + fmt.Println(image) + fmt.Println(err) } func TestClusterManagerWitReadyState(t *testing.T) { From 53f6f68833d2f809f2a37eed83a540276c76a8c0 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 27 Jun 2023 09:31:40 -0700 Subject: [PATCH 19/85] using fake client instead of mock --- pkg/splunk/enterprise/clustermanager_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 4c5c1c15f..f51abbe6c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1474,7 +1474,11 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { }, } // current.Spec.Image = "splunk/test" - client := spltest.NewMockClient() + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + //client := spltest.NewMockClient() err := client.Create(ctx, service) err = client.Create(ctx, statefulset) err = client.Create(ctx, ¤t) From 8301fd997521803dcb240f89730777e076a9bff5 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 27 Jun 2023 09:42:42 -0700 Subject: [PATCH 20/85] removed creating statefulset and service --- pkg/splunk/enterprise/clustermanager_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index f51abbe6c..5d9425a8c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1467,21 +1467,21 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { statefulset.Spec.Selector = &metav1.LabelSelector{ MatchLabels: matchlabels, } - service := &corev1.Service{ + /*service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-test-cluster-manager-headless", Namespace: "test", }, - } + } */ // current.Spec.Image = "splunk/test" builder := fake.NewClientBuilder() client := builder.Build() utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) //client := spltest.NewMockClient() - err := client.Create(ctx, service) - err = client.Create(ctx, statefulset) - err = client.Create(ctx, ¤t) + //err := client.Create(ctx, service) + //err = client.Create(ctx, statefulset) + err := client.Create(ctx, ¤t) _, err = ApplyClusterManager(ctx, client, ¤t) fmt.Println(err) From 8353d25e969b96ab2e4a4a000c53dba63e850298 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 15:55:16 -0700 Subject: [PATCH 21/85] Corrected LMCurrentImage method --- pkg/splunk/enterprise/licensemanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 3f99e56de..cad24ad11 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -233,7 +233,7 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + Name: GetSplunkStatefulsetName(SplunkLicenseManager, cr.GetName()), } statefulSet := &appsv1.StatefulSet{} err := c.Get(ctx, namespacedName, statefulSet) From dab93db40c1fa8fe6051bb8300c639561d82990f Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 27 Jun 2023 15:08:50 -0700 Subject: [PATCH 22/85] Completed Coverage tests for CM --- pkg/splunk/enterprise/clustermanager.go | 28 +++ pkg/splunk/enterprise/clustermanager_test.go | 231 ++++++++++++++++--- pkg/splunk/enterprise/licensemanager.go | 13 ++ pkg/splunk/enterprise/licensemanager_test.go | 92 ++++++++ pkg/splunk/enterprise/monitoringconsole.go | 7 +- 5 files changed, 334 insertions(+), 37 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 6fffb032b..dd569d333 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -186,6 +186,11 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, if err != nil { return result, err } + + // TODO: Right now if the CM is not ready for upgrade the reconcile loop goes into + // an infite loop and ives Time Out. We still want the other functions to run if + // a proper upgrade does not happen + if !checkUpgradeReady { return result, err } @@ -491,6 +496,10 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + licenseManagerRef := cr.Spec.LicenseManagerRef namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} @@ -505,10 +514,14 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) if err != nil { + eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to licenseManager current image") return false, err } cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) if err != nil { + eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to clusterManager current image") return false, err } @@ -517,6 +530,11 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente return true, nil } + // Temporary workaround to keep the clusterManager method working only when the LM is ready + if licenseManager.Status.Phase == enterpriseApi.PhaseReady { + return true, nil + } + return false, nil } @@ -524,6 +542,10 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente // returns the image, and error if something goes wring func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getClusterManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), @@ -531,10 +553,14 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl statefulSet := &appsv1.StatefulSet{} err := c.Get(ctx, namespacedName, statefulSet) if err != nil { + eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) + scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) if err != nil { + eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) + scopedLog.Error(err, "Unable to get labels") return "", err } @@ -547,6 +573,8 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl err = c.List(ctx, statefulsetPods, opts...) if err != nil { + eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) + scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 5d9425a8c..705823f68 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1391,32 +1391,99 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { func TestUpgradeScenario(t *testing.T) { ctx := context.TODO() - cm := enterpriseApi.ClusterManager{ + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create License Manager + lm := enterpriseApi.LicenseManager{ ObjectMeta: metav1.ObjectMeta{ - Name: "stack1", + Name: "test", Namespace: "test", }, - TypeMeta: metav1.TypeMeta{ - Kind: "clustermanager", + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, }, } - lm := enterpriseApi.LicenseManager{ + + err := client.Create(ctx, &lm) + _, err = ApplyLicenseManager(ctx, client, &lm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + lm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &lm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + // get StatefulSet labels + + namespacedName := types.NamespacedName{ + Namespace: lm.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkLicenseManager, lm.GetName()), + } + lmstatefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, lmstatefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := lmstatefulSet.Spec.Template.ObjectMeta.Labels + + // create LM pod + lmstpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "stack1", + Name: "splunk-test-license-manager-0", Namespace: "test", }, - TypeMeta: metav1.TypeMeta{ - Kind: "LicenseManager", + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, }, } - fmt.Println(ctx, cm, lm) - -} + lmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } -func TestGetClusterManagerCurrentImage(t *testing.T) { + // update pod + lmstpod.Status.Phase = corev1.PodRunning + lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } - ctx := context.TODO() - current := enterpriseApi.ClusterManager{ + // Create Cluster Manager + cm := enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "test", @@ -1425,16 +1492,21 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: enterpriseApi.Spec{ ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", }, Volumes: []corev1.Volume{}, - MonitoringConsoleRef: corev1.ObjectReference{ - Name: "mcName", + LicenseManagerRef: corev1.ObjectReference{ + Name: "test", }, }, }, } replicas := int32(1) - statefulset := &appsv1.StatefulSet{ + labels = map[string]string{ + "app": "test", + "tier": "splunk", + } + cmstatefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-test-cluster-manager", Namespace: "test", @@ -1460,30 +1532,114 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { Replicas: &replicas, }, } - matchlabels := map[string]string{ - "app": "test", - "tier": "splunk", + cmstatefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, } - statefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: matchlabels, + + err = client.Create(ctx, &cm) + err = client.Create(ctx, cmstatefulset) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) } - /*service := &corev1.Service{ + + // Create CM pod + cmstpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-headless", + Name: "splunk-test-cluster-manager-0", Namespace: "test", }, - } */ - // current.Spec.Image = "splunk/test" + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + cmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update CM pod + cmstpod.Status.Phase = corev1.PodRunning + cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + cm.Spec.Image = "splunk2" + lmstpod.Status.ContainerStatuses[0].Image = "splunk2" + err = client.Status().Update(ctx, lmstpod) + check, err := upgradeScenario(ctx, client, &cm) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("upgradeScenario: CM should be ready for upgrade") + } + +} + +func TestGetClusterManagerCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } builder := fake.NewClientBuilder() client := builder.Build() utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - //client := spltest.NewMockClient() - //err := client.Create(ctx, service) - //err = client.Create(ctx, statefulset) err := client.Create(ctx, ¤t) _, err = ApplyClusterManager(ctx, client, ¤t) - fmt.Println(err) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + namespacedName := types.NamespacedName{ + Namespace: current.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, current.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, statefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := statefulSet.Spec.Template.ObjectMeta.Labels stpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -1505,7 +1661,8 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { }, }, } - // simulate create stateful set + stpod.ObjectMeta.Labels = labels + // simulate create pod err = client.Create(ctx, stpod) if err != nil { t.Errorf("Unexpected create pod failed %v", err) @@ -1523,12 +1680,18 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { } err = client.Status().Update(ctx, stpod) if err != nil { - t.Errorf("Unexpected update statefulset %v", err) + t.Errorf("Unexpected update pod %v", err) debug.PrintStack() } + image, err := getClusterManagerCurrentImage(ctx, client, ¤t) - fmt.Println(image) - fmt.Println(err) + + if err != nil { + t.Errorf("Unexpected getClusterManagerCurrentImage error %v", err) + } + if image != stpod.Status.ContainerStatuses[0].Image { + t.Errorf("getClusterManagerCurrentImage does not return the current pod image") + } } func TestClusterManagerWitReadyState(t *testing.T) { diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index cad24ad11..60d8a95a9 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -231,6 +231,11 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getLicenseManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkLicenseManager, cr.GetName()), @@ -238,13 +243,18 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl statefulSet := &appsv1.StatefulSet{} err := c.Get(ctx, namespacedName, statefulSet) if err != nil { + eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) + scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) if err != nil { + eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) + scopedLog.Error(err, "Unable to get labels") return "", err } + // get a list of all pods in the namespace with matching labels as the statefulset statefulsetPods := &corev1.PodList{} opts := []rclient.ListOption{ rclient.InNamespace(cr.GetNamespace()), @@ -253,9 +263,12 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl err = c.List(ctx, statefulsetPods, opts...) if err != nil { + eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) + scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) return "", err } + // find the container with the phrase 'splunk' in it for _, v := range statefulsetPods.Items { for _, container := range v.Status.ContainerStatuses { if strings.Contains(container.Name, "splunk") { diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 8c7d597c9..25ffd6f0b 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -720,6 +720,98 @@ func TestLicenseManagerList(t *testing.T) { } } +func TestGetLicenseManagerCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + err := client.Create(ctx, ¤t) + _, err = ApplyLicenseManager(ctx, client, ¤t) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + + namespacedName := types.NamespacedName{ + Namespace: current.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkLicenseManager, current.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, statefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := statefulSet.Spec.Template.ObjectMeta.Labels + + stpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-license-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + stpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, stpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update statefulset + stpod.Status.Phase = corev1.PodRunning + stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, stpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + image, err := getLicenseManagerCurrentImage(ctx, client, ¤t) + + if err != nil { + t.Errorf("Unexpected getLicenseManagerCurrentImage error %v", err) + } + if image != stpod.Status.ContainerStatuses[0].Image { + t.Errorf("getLicenseManagerCurrentImage does not return the current pod image") + } +} + func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index a1fb3e2b9..a042a0c6f 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -358,7 +358,7 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor // changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. -func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { +func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), @@ -369,17 +369,18 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co if err != nil && k8serrors.IsNotFound(err) { return nil } + image, _ := getClusterManagerCurrentImage(ctx, client, cr) annotations := monitoringConsoleInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} } if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == monitoringConsoleInstance.Spec.Image { + if annotations["checkUpdateImage"] == image { return nil } } - annotations["checkUpdateImage"] = monitoringConsoleInstance.Spec.Image + annotations["checkUpdateImage"] = image monitoringConsoleInstance.SetAnnotations(annotations) err = client.Update(ctx, monitoringConsoleInstance) From fb778805353597169558fd7a8eb72b71f0238023 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 15 Jun 2023 10:28:17 -0700 Subject: [PATCH 23/85] Refined changeClusterManagerAnnotations --- pkg/splunk/enterprise/licensemanager.go | 56 +++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 60d8a95a9..ce58aa825 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -27,6 +27,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -281,3 +282,58 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } + +// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { + +// namespacedName := types.NamespacedName{ +// Namespace: cr.GetNamespace(), +// Name: cr.Spec.ClusterManagerRef.Name, +// } +// clusterManagerInstance := &enterpriseApi.ClusterManager{} +// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) +// if err != nil && k8serrors.IsNotFound(err) { +// return false, nil +// } +// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { +// return true, nil +// } + +// return true, err + +// } + +// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From b6c70d54b97f5b2874055e50eaae66ce807f4b29 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 15 Jun 2023 15:05:56 -0700 Subject: [PATCH 24/85] test case for upgrade scenario --- .../upgrade/c3-with-operator/01-assert.yaml | 93 +++++++++- .../upgrade/c3-with-operator/02-assert.yaml | 2 +- .../upgrade/c3-with-operator/03-assert.yaml | 35 +--- .../03-upgrade-splunk-image.yaml | 6 + .../upgrade/c3-with-operator/04-assert.yaml | 174 +++++++++++++++++- 5 files changed, 278 insertions(+), 32 deletions(-) create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml index 4b09ebf54..dce36af8b 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -6,4 +6,95 @@ metadata: name: splunk-operator-controller-manager status: readyReplicas: 1 - availableReplicas: 1 \ No newline at end of file + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: idxc +status: + phase: Ready + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-idxc-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 diff --git a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml index 731366343..59008dd62 100644 --- a/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/02-assert.yaml @@ -21,4 +21,4 @@ status: apiVersion: v1 kind: Secret metadata: - name: splunk-cm-cluster-manager-secret-v1 \ No newline at end of file + name: splunk-cm-cluster-manager-secret-v1 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml index c3c560798..84b4ee495 100644 --- a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -1,40 +1,17 @@ --- -# assert for SearchHeadCluster custom resource to be ready +# assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster +kind: IndexerCluster metadata: - name: shc + name: idxc status: phase: Ready --- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set +# check for stateful sets and replicas updated apiVersion: apps/v1 kind: StatefulSet metadata: - name: splunk-shc-deployer + name: splunk-idxc-indexer status: - replicas: 1 \ No newline at end of file + replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml new file mode 100644 index 000000000..a11eefac7 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} + namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml index 4d5aadaf4..4f883ab81 100644 --- a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -1,3 +1,38 @@ +--- +# assert for splunk operator pod to be ready +apiVersion: apps/v1 +kind: Deployment +metadata: + name: splunk-operator-controller-manager +status: + readyReplicas: 1 + availableReplicas: 1 + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + --- # assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 @@ -21,4 +56,141 @@ status: apiVersion: v1 kind: Secret metadata: - name: splunk-idxc-indexer-secret-v1 \ No newline at end of file + name: splunk-idxc-indexer-secret-v1 + +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-idxc-indexer-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for statefull set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-cm-cluster-manager-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-1 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-search-head-2 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true + +--- +# check for pod set +apiVersion: apps/v1 +kind: Pod +metadata: + name: splunk-shc-deployer-0 +status: + containerStatuses: + - image: splunk/splunk:9.0.5 + name: splunk + ready: true From 6957966f146c2a7ad4b4fab498508a53dd4b8328 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 21 Jun 2023 10:31:37 -0700 Subject: [PATCH 25/85] Modified kuttl cases --- kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../upgrade/c3-with-operator/01-assert.yaml | 93 +------------------ .../upgrade/c3-with-operator/03-assert.yaml | 17 ---- .../03-upgrade-splunk-image.yaml | 6 -- pkg/splunk/enterprise/licensemanager.go | 19 ---- 5 files changed, 2 insertions(+), 135 deletions(-) delete mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml delete mode 100644 kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index d8ecc7336..a152a8423 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 5000 +timeout: 500 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml index dce36af8b..4b09ebf54 100644 --- a/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/01-assert.yaml @@ -6,95 +6,4 @@ metadata: name: splunk-operator-controller-manager status: readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 + availableReplicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml deleted file mode 100644 index 84b4ee495..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: idxc -status: - phase: Ready - ---- -# check for stateful sets and replicas updated -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-idxc-indexer -status: - replicas: 4 diff --git a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml b/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml deleted file mode 100644 index a11eefac7..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/03-upgrade-splunk-image.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - command: helm upgrade splunk-c3 $HELM_REPO_PATH/splunk-enterprise --reuse-values -f c3_config.yaml --set splunk-operator.splunkOperator.image.repository=${KUTTL_SPLUNK_OPERATOR_IMAGE} --set splunk-operator.image.repository=${KUTTL_SPLUNK_ENTERPRISE_NEW_IMAGE} - namespaced: true \ No newline at end of file diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index ce58aa825..418bea803 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -283,25 +283,6 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } -// func checkClusterManagerUpdate(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (bool, error) { - -// namespacedName := types.NamespacedName{ -// Namespace: cr.GetNamespace(), -// Name: cr.Spec.ClusterManagerRef.Name, -// } -// clusterManagerInstance := &enterpriseApi.ClusterManager{} -// err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) -// if err != nil && k8serrors.IsNotFound(err) { -// return false, nil -// } -// if clusterManagerInstance.Spec.Image != clusterManagerInstance.Spec.Image { -// return true, nil -// } - -// return true, err - -// } - // changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { From 70c73c299666a504e05311b222fc91b3878e0c6c Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 10:59:44 -0700 Subject: [PATCH 26/85] Added kuttl tests; Updated LicenseMaster --- kuttl/kuttl-test-helm-upgrade.yaml | 2 +- .../upgrade/c3-with-operator/03-assert.yaml | 40 ++++ .../upgrade/c3-with-operator/04-assert.yaml | 172 ------------------ pkg/splunk/enterprise/licensemaster.go | 37 ++++ 4 files changed, 78 insertions(+), 173 deletions(-) create mode 100644 kuttl/tests/upgrade/c3-with-operator/03-assert.yaml diff --git a/kuttl/kuttl-test-helm-upgrade.yaml b/kuttl/kuttl-test-helm-upgrade.yaml index a152a8423..d8ecc7336 100644 --- a/kuttl/kuttl-test-helm-upgrade.yaml +++ b/kuttl/kuttl-test-helm-upgrade.yaml @@ -4,7 +4,7 @@ kind: TestSuite testDirs: - ./kuttl/tests/upgrade parallel: 3 -timeout: 500 +timeout: 5000 startKIND: false artifactsDir: kuttl-artifacts kindNodeCache: false diff --git a/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml new file mode 100644 index 000000000..c3c560798 --- /dev/null +++ b/kuttl/tests/upgrade/c3-with-operator/03-assert.yaml @@ -0,0 +1,40 @@ +--- +# assert for SearchHeadCluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: SearchHeadCluster +metadata: + name: shc +status: + phase: Ready + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-deployer-secret-v1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-shc-search-head-secret-v1 + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-search-head +status: + replicas: 3 + +--- +# check for statefull set +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-shc-deployer +status: + replicas: 1 \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml index 4f883ab81..368902426 100644 --- a/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/04-assert.yaml @@ -1,38 +1,3 @@ ---- -# assert for splunk operator pod to be ready -apiVersion: apps/v1 -kind: Deployment -metadata: - name: splunk-operator-controller-manager -status: - readyReplicas: 1 - availableReplicas: 1 - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - --- # assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 @@ -57,140 +22,3 @@ apiVersion: v1 kind: Secret metadata: name: splunk-idxc-indexer-secret-v1 - ---- -# assert for SearchHeadCluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: SearchHeadCluster -metadata: - name: shc -status: - phase: Ready - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-deployer-secret-v1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-shc-search-head-secret-v1 - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-search-head -status: - replicas: 3 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-shc-deployer -status: - replicas: 1 - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-idxc-indexer-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for statefull set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-cm-cluster-manager-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-1 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-search-head-2 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true - ---- -# check for pod set -apiVersion: apps/v1 -kind: Pod -metadata: - name: splunk-shc-deployer-0 -status: - containerStatuses: - - image: splunk/splunk:9.0.5 - name: splunk - ready: true diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 8ff920be8..3c3506886 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,6 +25,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -225,3 +226,39 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } + +// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong. +func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} + err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterMasterInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image + + clusterMasterInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterMasterInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} From 4a945eb3fa281e5b624ab77ab78e49a10fa10bf5 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 22 Jun 2023 16:40:45 -0700 Subject: [PATCH 27/85] Fixed unit test --- pkg/splunk/enterprise/clustermanager.go | 37 +++++++++++++++++++++++++ pkg/splunk/enterprise/licensemanager.go | 37 ------------------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index dd569d333..c08f4ea52 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -34,6 +34,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" rclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -591,3 +592,39 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } + +// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong +func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + clusterManagerInstance := &enterpriseApi.ClusterManager{} + err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + annotations := clusterManagerInstance.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["checkUpdateImage"]; ok { + if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + return nil + } + } + + annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + + clusterManagerInstance.SetAnnotations(annotations) + err = client.Update(ctx, clusterManagerInstance) + if err != nil { + fmt.Println("Error in Change Annotation UPDATE", err) + return err + } + + return nil + +} diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 418bea803..60d8a95a9 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -27,7 +27,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -282,39 +281,3 @@ func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl return "", nil } - -// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image - - clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} From a2c9f6df46a5c326553f9ef5b49d21135a94dff2 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 23 Jun 2023 14:40:57 -0700 Subject: [PATCH 28/85] Removed changeAnnotation from licenseMaster --- pkg/splunk/enterprise/licensemaster.go | 37 -------------------------- 1 file changed, 37 deletions(-) diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 3c3506886..8ff920be8 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -25,7 +25,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" @@ -226,39 +225,3 @@ func getLicenseMasterList(ctx context.Context, c splcommon.ControllerClient, cr return numOfObjects, nil } - -// changeClusterMasterAnnotations updates the checkUpdateImage field of the CLuster Master Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterMasterAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApiV3.LicenseMaster) error { - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterMasterInstance := &enterpriseApiV3.ClusterMaster{} - err := client.Get(context.TODO(), namespacedName, clusterMasterInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - annotations := clusterMasterInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterMasterInstance.Spec.Image { - return nil - } - } - - annotations["checkUpdateImage"] = clusterMasterInstance.Spec.Image - - clusterMasterInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterMasterInstance) - if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) - return err - } - - return nil - -} From 86baa2177bb51e5f6b707526903e05d7472fd212 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 26 Jun 2023 16:42:05 -0700 Subject: [PATCH 29/85] Completed code coverage tests --- pkg/splunk/enterprise/clustermanager.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index c08f4ea52..947419f80 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -597,15 +597,20 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl // on update, and returns error if something is wrong func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name, } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(context.TODO(), namespacedName, clusterManagerInstance) + err := client.Get(ctx, namespacedName, clusterManagerInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + + // fetch and check the annotation fields of the ClusterManager annotations := clusterManagerInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -616,12 +621,13 @@ func changeClusterManagerAnnotations(ctx context.Context, client splcommon.Contr } } + // create/update the checkUpdateImage annotation field annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image clusterManagerInstance.SetAnnotations(annotations) err = client.Update(ctx, clusterManagerInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) return err } From b24fca74cc2cbbb92515e4892ff0be5219fce128 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 28 Jun 2023 11:17:03 -0700 Subject: [PATCH 30/85] Resolved all conflict issues --- env.sh | 1 - .../upgrade/c3-with-operator/05-uninstall-c3 | 5 - .../c3-with-operator/05-uninstall-c3.yaml | 2 +- pkg/splunk/enterprise/clustermanager.go | 17 +- pkg/splunk/enterprise/clustermanager_test.go | 163 ++++++++++++++++++ 5 files changed, 174 insertions(+), 14 deletions(-) delete mode 100644 env.sh delete mode 100644 kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 diff --git a/env.sh b/env.sh deleted file mode 100644 index f1f641af1..000000000 --- a/env.sh +++ /dev/null @@ -1 +0,0 @@ -#!/usr/bin/env bash diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 deleted file mode 100644 index 95f8297ca..000000000 --- a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3 +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} - namespaced: true \ No newline at end of file diff --git a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml index abb75c68d..cf9d19cf8 100644 --- a/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/05-uninstall-c3.yaml @@ -1,5 +1,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - command: helm uninstall splunk-c3--namespace ${NAMESPACE} + - command: helm uninstall splunk-c3 --namespace ${NAMESPACE} namespaced: true diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 947419f80..077a566e3 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -191,7 +191,6 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, // TODO: Right now if the CM is not ready for upgrade the reconcile loop goes into // an infite loop and ives Time Out. We still want the other functions to run if // a proper upgrade does not happen - if !checkUpgradeReady { return result, err } @@ -595,39 +594,43 @@ func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerCl // changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop // on update, and returns error if something is wrong -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { +func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterManagerRef.Name, } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(ctx, namespacedName, clusterManagerInstance) + err := c.Get(ctx, namespacedName, clusterManagerInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + image, _ := getLicenseManagerCurrentImage(ctx, c, cr) + // fetch and check the annotation fields of the ClusterManager annotations := clusterManagerInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} } if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { + if annotations["checkUpdateImage"] == image { return nil } } // create/update the checkUpdateImage annotation field - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image + annotations["checkUpdateImage"] = image clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) + err = c.Update(ctx, clusterManagerInstance) if err != nil { - scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) + eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 705823f68..c694ca9bd 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1694,6 +1694,169 @@ func TestGetClusterManagerCurrentImage(t *testing.T) { } } +func TestChangeClusterManagerAnnotations(t *testing.T) { + ctx := context.TODO() + + // define LM and CM + lm := &enterpriseApi.LicenseManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-lm", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test-cm", + }, + }, + }, + } + replicas := int32(1) + labels := map[string]string{ + "app": "test", + "tier": "splunk", + } + lmstatefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-lm-license-manager", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "splunk-test-lm-license-manager-headless", + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + }, + Replicas: &replicas, + }, + } + lmstatefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + lm.Spec.Image = "splunk/splunk:latest" + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create the instances + client.Create(ctx, lm) + client.Create(ctx, lmstatefulset) + _, err := ApplyLicenseManager(ctx, client, lm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + lm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, lm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, cm) + _, err = ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + // create LM pod + lmstpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-license-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + lmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update pod + lmstpod.Status.Phase = corev1.PodRunning + lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, lmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + err = changeClusterManagerAnnotations(ctx, client, lm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, clusterManager) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + annotations := clusterManager.GetAnnotations() + if annotations["checkUpdateImage"] != lm.Spec.Image { + t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + } + +} + func TestClusterManagerWitReadyState(t *testing.T) { // create directory for app framework newpath := filepath.Join("/tmp", "appframework") From 1d0cc579fab6583c264a97ecc034e9a33132fd43 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 28 Jun 2023 11:37:36 -0700 Subject: [PATCH 31/85] Added comments --- pkg/splunk/enterprise/clustermanager.go | 43 +------------------- pkg/splunk/enterprise/clustermanager_test.go | 2 +- 2 files changed, 2 insertions(+), 43 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 077a566e3..7b037d73e 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -34,7 +34,6 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" rclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -453,47 +452,7 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// changeClusterMasterAnnotations updates the checkUpdateImage field of the Cluster Master Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong. -func changeClusterManagerAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } - clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := client.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil - } - - // fetch and check the annotation fields of the ClusterManager - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == clusterManagerInstance.Spec.Image { - return nil - } - } - - // create/update the checkUpdateImage annotation field - annotations["checkUpdateImage"] = clusterManagerInstance.Spec.Image - - clusterManagerInstance.SetAnnotations(annotations) - err = client.Update(ctx, clusterManagerInstance) - if err != nil { - scopedLog.Error(err, "ClusterManager types updated after changing annotations failed with", "error", err) - return err - } - - return nil -} - +// upgradeScenario checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { reqLogger := log.FromContext(ctx) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index c694ca9bd..afc627149 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1795,7 +1795,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { // create LM pod lmstpod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-license-manager-0", + Name: "splunk-test-lm-license-manager-0", Namespace: "test", }, Spec: corev1.PodSpec{ From 10cc0b6bd950dad46c98587542ddcb2a960b1d53 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 30 Jun 2023 09:52:49 -0700 Subject: [PATCH 32/85] Updated upgradeScenario to check if statefulSet exists --- pkg/splunk/enterprise/clustermanager.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 7b037d73e..eb9534def 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -459,14 +459,26 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + licenseManagerRef := cr.Spec.LicenseManagerRef - namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} // create new object licenseManager := &enterpriseApi.LicenseManager{} // get the license manager referred in cluster manager - err := c.Get(ctx, namespacedName, licenseManager) + err = c.Get(ctx, namespacedName, licenseManager) if err != nil { return true, nil } From 27ddd678f2b2d3e082f6898f54283f5da4b6e3c0 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 30 Jun 2023 10:18:23 -0700 Subject: [PATCH 33/85] Fixed Unit tests --- pkg/splunk/enterprise/clustermanager_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index afc627149..99a524e82 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -66,7 +66,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -82,6 +82,7 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, @@ -498,6 +499,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, @@ -520,6 +522,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, From 3cfce1d62e0f430e8291f9120842ba629073d08c Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 30 Jun 2023 16:08:59 -0700 Subject: [PATCH 34/85] Allow update if no change in image --- pkg/splunk/enterprise/clustermanager.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index eb9534def..01b5686d9 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -506,6 +506,11 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente return true, nil } + // want ordered upgrade only for version/image upgrade + if cr.Spec.Image == cmImage { + return true, nil + } + return false, nil } From d8565e3f2f21fd7b30f476b579ce37f7717c6757 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 29 Jun 2023 13:45:35 -0700 Subject: [PATCH 35/85] Added methods and tests for MC --- pkg/splunk/enterprise/clustermanager.go | 4 +- pkg/splunk/enterprise/clustermanager_test.go | 4 +- pkg/splunk/enterprise/monitoringconsole.go | 100 ++++ .../enterprise/monitoringconsole_test.go | 469 ++++++++++++++++++ 4 files changed, 573 insertions(+), 4 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 01b5686d9..144e417d7 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -182,7 +182,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } - checkUpgradeReady, err := upgradeScenario(ctx, client, cr) + checkUpgradeReady, err := upgradeScenarioClusterManager(ctx, client, cr) if err != nil { return result, err } @@ -453,7 +453,7 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, } // upgradeScenario checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly -func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { +func upgradeScenarioClusterManager(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 99a524e82..2d3245cfd 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1391,7 +1391,7 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } -func TestUpgradeScenario(t *testing.T) { +func TestUpgradeScenarioClusterManager(t *testing.T) { ctx := context.TODO() @@ -1593,7 +1593,7 @@ func TestUpgradeScenario(t *testing.T) { cm.Spec.Image = "splunk2" lmstpod.Status.ContainerStatuses[0].Image = "splunk2" err = client.Status().Update(ctx, lmstpod) - check, err := upgradeScenario(ctx, client, &cm) + check, err := upgradeScenarioClusterManager(ctx, client, &cm) if err != nil { t.Errorf("Unexpected upgradeScenario error %v", err) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index a042a0c6f..42bbb1407 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -34,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -356,6 +357,105 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } +// upgradeScenario checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly +func upgradeScenarioMonitoringConsole(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.MonitoringConsole) (bool, error) { + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("upgradeScenarioMonitoringConsole").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + clusterManagerRef := cr.Spec.ClusterManagerRef + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} + + // create new object + clusterManager := &enterpriseApi.ClusterManager{} + + // get the license manager referred in cluster manager + err := c.Get(ctx, namespacedName, clusterManager) + if err != nil { + return true, nil + } + + cmImage, err := getClusterManagerCurrentImage(ctx, c, clusterManager) + if err != nil { + eventPublisher.Warning(ctx, "upgradeScenarioMonitoringConsole", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") + return false, err + } + mcImage, err := getMonitoringConsoleCurrentImage(ctx, c, cr) + if err != nil { + eventPublisher.Warning(ctx, "upgradeScenarioMonitoringConsole", fmt.Sprintf("Could not get the Monitoring Console Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get monitoringConsole current image") + return false, err + } + + // check conditions for upgrade + if cr.Spec.Image != mcImage && cmImage == cr.Spec.Image && clusterManager.Status.Phase == enterpriseApi.PhaseReady { + return true, nil + } + + // Temporary workaround to keep the clusterManager method working only when the LM is ready + if clusterManager.Status.Phase == enterpriseApi.PhaseReady { + return true, nil + } + + return false, nil +} + +// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, +// returns the image, and error if something goes wring +func getMonitoringConsoleCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.MonitoringConsole) (string, error) { + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("getMonitoringConsoleCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil { + eventPublisher.Warning(ctx, "getMonitoringConsoleCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) + scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) + return "", err + } + labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) + if err != nil { + eventPublisher.Warning(ctx, "getMonitoringConsoleCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) + scopedLog.Error(err, "Unable to get labels") + return "", err + } + + // get a list of all pods in the namespace with matching labels as the statefulset + statefulsetPods := &corev1.PodList{} + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + rclient.MatchingLabelsSelector{Selector: labelSelector}, + } + + err = c.List(ctx, statefulsetPods, opts...) + if err != nil { + eventPublisher.Warning(ctx, "getMonitoringConsoleCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) + scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) + return "", err + } + + // find the container with the phrase 'splunk' in it + for _, v := range statefulsetPods.Items { + for _, container := range v.Status.ContainerStatuses { + if strings.Contains(container.Name, "splunk") { + image := container.Image + return image, nil + } + + } + } + + return "", nil +} + // changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index 72efd15a7..f41735fef 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1100,3 +1100,472 @@ func TestGetMonitoringConsoleList(t *testing.T) { t.Errorf("Got wrong number of IndexerCluster objects. Expected=%d, Got=%d", 1, numOfObjects) } } + +func TestUpgradeScenarioMonitoringConsole(t *testing.T) { + + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create License Manager + cm := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + + err := client.Create(ctx, &cm) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &cm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + // get StatefulSet labels + + namespacedName := types.NamespacedName{ + Namespace: cm.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cm.GetName()), + } + cmstatefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, cmstatefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := cmstatefulSet.Spec.Template.ObjectMeta.Labels + + // create LM pod + cmstpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cluster-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + cmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update pod + cmstpod.Status.Phase = corev1.PodRunning + cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + // Create Cluster Manager + mc := enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + LicenseManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + replicas := int32(1) + labels = map[string]string{ + "app": "test", + "tier": "splunk", + } + mcstatefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-monitoring-console", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "splunk-test-monitoring-console-headless", + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + }, + Replicas: &replicas, + }, + } + mcstatefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + + err = client.Create(ctx, &mc) + err = client.Create(ctx, mcstatefulset) + _, err = ApplyMonitoringConsole(ctx, client, &mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + // Create CM pod + mcstpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-monitoring-console-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + mcstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, mcstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update CM pod + mcstpod.Status.Phase = corev1.PodRunning + mcstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, mcstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + mc.Spec.Image = "splunk2" + cmstpod.Status.ContainerStatuses[0].Image = "splunk2" + err = client.Status().Update(ctx, cmstpod) + check, err := upgradeScenarioMonitoringConsole(ctx, client, &mc) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("upgradeScenario: MC should be ready for upgrade") + } + +} + +func TestGetMonitoringConsoleCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + err := client.Create(ctx, ¤t) + _, err = ApplyMonitoringConsole(ctx, client, ¤t) + if err != nil { + t.Errorf("applyMonitoringConsol should not have returned error; err=%v", err) + } + + namespacedName := types.NamespacedName{ + Namespace: current.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, current.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err = client.Get(ctx, namespacedName, statefulSet) + if err != nil { + t.Errorf("Unexpected get statefulset %v", err) + } + labels := statefulSet.Spec.Template.ObjectMeta.Labels + + stpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-monitoring-console-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + stpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, stpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update statefulset + stpod.Status.Phase = corev1.PodRunning + stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, stpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + image, err := getMonitoringConsoleCurrentImage(ctx, client, ¤t) + + if err != nil { + t.Errorf("Unexpected geMonitoringConsoleCurrentImage error %v", err) + } + if image != stpod.Status.ContainerStatuses[0].Image { + t.Errorf("getMonitoringConsoleCurrentImage does not return the current pod image") + } +} + +func TestChangeMonitoringConsoleAnnotations(t *testing.T) { + ctx := context.TODO() + + // define LM and CM + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test-mc", + }, + }, + }, + } + replicas := int32(1) + labels := map[string]string{ + "app": "test", + "tier": "splunk", + } + cmstatefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cm-cluster-manager", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + ServiceName: "splunk-test-cm-cluster-manager-headless", + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + }, + Replicas: &replicas, + }, + } + cmstatefulset.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + + mc := &enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + cm.Spec.Image = "splunk/splunk:latest" + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create the instances + client.Create(ctx, cm) + client.Create(ctx, cmstatefulset) + _, err := ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, cm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, mc) + _, err = ApplyMonitoringConsole(ctx, client, mc) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + // create LM pod + cmstpod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-test-cm-cluster-manager-0", + Namespace: "test", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "splunk", + Image: "splunk/splunk:latest", + Env: []corev1.EnvVar{ + { + Name: "test", + Value: "test", + }, + }, + }, + }, + }, + } + cmstpod.ObjectMeta.Labels = labels + // simulate create pod + err = client.Create(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected create pod failed %v", err) + debug.PrintStack() + } + + // update pod + cmstpod.Status.Phase = corev1.PodRunning + cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Image: "splunk/splunk:latest", + Name: "splunk", + Ready: true, + }, + } + err = client.Status().Update(ctx, cmstpod) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + + err = changeMonitoringConsoleAnnotations(ctx, client, cm) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + } + + annotations := monitoringConsole.GetAnnotations() + if annotations["checkUpdateImage"] != cm.Spec.Image { + t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + } + +} From 5093bc7d501fc1fdb664ca85eae3e4d153f35bb6 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 6 Jul 2023 10:25:36 -0700 Subject: [PATCH 36/85] Check if statefulSet exists for MC --- pkg/splunk/enterprise/monitoringconsole.go | 43 ++++++++++++++++--- .../enterprise/monitoringconsole_test.go | 20 +++++---- 2 files changed, 49 insertions(+), 14 deletions(-) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 42bbb1407..4074fc388 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -137,6 +137,18 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie return result, err } + checkUpgradeReady, err := upgradeScenarioMonitoringConsole(ctx, client, cr) + if err != nil { + return result, err + } + + // TODO: Right now if the MC is not ready for upgrade the reconcile loop goes into + // an infite loop and gives Time Out. We still want the other functions to run if + // a proper upgrade does not happen + if !checkUpgradeReady { + return result, err + } + mgr := splctrl.DefaultStatefulSetPodManager{} phase, err := mgr.Update(ctx, client, statefulSet, 1) if err != nil { @@ -364,14 +376,26 @@ func upgradeScenarioMonitoringConsole(ctx context.Context, c splcommon.Controlle scopedLog := reqLogger.WithName("upgradeScenarioMonitoringConsole").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + clusterManagerRef := cr.Spec.ClusterManagerRef - namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} // create new object clusterManager := &enterpriseApi.ClusterManager{} // get the license manager referred in cluster manager - err := c.Get(ctx, namespacedName, clusterManager) + err = c.Get(ctx, namespacedName, clusterManager) if err != nil { return true, nil } @@ -394,7 +418,7 @@ func upgradeScenarioMonitoringConsole(ctx context.Context, c splcommon.Controlle return true, nil } - // Temporary workaround to keep the clusterManager method working only when the LM is ready + // Temporary workaround to keep the monitoringConsole method working only when the CM is ready if clusterManager.Status.Phase == enterpriseApi.PhaseReady { return true, nil } @@ -460,16 +484,23 @@ func getMonitoringConsoleCurrentImage(ctx context.Context, c splcommon.Controlle // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(client, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.MonitoringConsoleRef.Name, } monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} - err := client.Get(context.TODO(), namespacedName, monitoringConsoleInstance) + err := client.Get(ctx, namespacedName, monitoringConsoleInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } + image, _ := getClusterManagerCurrentImage(ctx, client, cr) + + // fetch and check the annotation fields of the ClusterManager annotations := monitoringConsoleInstance.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -480,12 +511,14 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } } + // create/update the checkUpdateImage annotation field annotations["checkUpdateImage"] = image monitoringConsoleInstance.SetAnnotations(annotations) err = client.Update(ctx, monitoringConsoleInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "MonitoringConsole types update after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index f41735fef..73439e345 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -71,6 +71,7 @@ func TestApplyMonitoringConsole(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"}, {MetaName: "*v4.MonitoringConsole-test-stack1"}, {MetaName: "*v4.MonitoringConsole-test-stack1"}, @@ -88,6 +89,7 @@ func TestApplyMonitoringConsole(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, + {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"}, {MetaName: "*v4.MonitoringConsole-test-stack1"}, @@ -1422,7 +1424,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ + MonitoringConsoleRef: corev1.ObjectReference{ Name: "test-mc", }, }, @@ -1468,7 +1470,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { mc := &enterpriseApi.MonitoringConsole{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", + Name: "test-mc", Namespace: "test", }, Spec: enterpriseApi.MonitoringConsoleSpec{ @@ -1491,7 +1493,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { client.Create(ctx, cmstatefulset) _, err := ApplyClusterManager(ctx, client, cm) if err != nil { - t.Errorf("applyLicenseManager should not have returned error; err=%v", err) + t.Errorf("applyClusterManager should not have returned error; err=%v", err) } cm.Status.Phase = enterpriseApi.PhaseReady err = client.Status().Update(ctx, cm) @@ -1502,7 +1504,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { client.Create(ctx, mc) _, err = ApplyMonitoringConsole(ctx, client, mc) if err != nil { - t.Errorf("applyClusterManager should not have returned error; err=%v", err) + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) } // create LM pod @@ -1551,21 +1553,21 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { err = changeMonitoringConsoleAnnotations(ctx, client, cm) if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) } monitoringConsole := &enterpriseApi.MonitoringConsole{} namespacedName := types.NamespacedName{ - Name: cm.Name, - Namespace: cm.Namespace, + Name: mc.Name, + Namespace: mc.Namespace, } err = client.Get(ctx, namespacedName, monitoringConsole) if err != nil { - t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) } annotations := monitoringConsole.GetAnnotations() if annotations["checkUpdateImage"] != cm.Spec.Image { - t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") + t.Errorf("changeMonitoringConsoleAnnotations should have set the checkUpdateImage annotation field to the current image") } } From ffe679fe7c863c0ca1aec643721a0fe4677f49ba Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 7 Jul 2023 09:34:13 -0700 Subject: [PATCH 37/85] Added gateway and clusterManager health model --- pkg/splunk/model/error_types.go | 15 +++++ .../services/cluster/manager/health_types.go | 64 ++++++++++++++++++ .../model/services/cluster/url_types.go | 31 +++++++++ pkg/splunk/model/types.go | 55 +++++++++++++++ pkg/splunk/services/gateway.go | 28 ++++++++ .../implementation/cluster_manager_impl.go | 67 +++++++++++++++++++ 6 files changed, 260 insertions(+) create mode 100644 pkg/splunk/model/error_types.go create mode 100644 pkg/splunk/model/services/cluster/manager/health_types.go create mode 100644 pkg/splunk/model/services/cluster/url_types.go create mode 100644 pkg/splunk/model/types.go create mode 100644 pkg/splunk/services/gateway.go create mode 100644 pkg/splunk/services/implementation/cluster_manager_impl.go diff --git a/pkg/splunk/model/error_types.go b/pkg/splunk/model/error_types.go new file mode 100644 index 000000000..578b2b004 --- /dev/null +++ b/pkg/splunk/model/error_types.go @@ -0,0 +1,15 @@ +package model + +type SplunkError struct { + Messages []struct { + Type string `json:"type,omitempty"` + Text string `json:"text,omitempty"` + } `json:"messages,omitempty"` +} + +func (s *SplunkError) Error() string { + if len(s.Messages) > 0 { + return s.Messages[0].Text + } + return "unknown error" +} diff --git a/pkg/splunk/model/services/cluster/manager/health_types.go b/pkg/splunk/model/services/cluster/manager/health_types.go new file mode 100644 index 000000000..eeb1cdc60 --- /dev/null +++ b/pkg/splunk/model/services/cluster/manager/health_types.go @@ -0,0 +1,64 @@ +package manager + +import ( + "time" +) + +// Description: Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. +// Rest End Point API: services/cluster/manager/health +type ClusterManagerHealthContent struct { + AllDataIsSearchable string `json:"all_data_is_searchable,omitempty"` + AllPeersAreUp string `json:"all_peers_are_up,omitempty"` + CmVersionIsCompatible string `json:"cm_version_is_compatible,omitempty"` + EaiAcl interface{} `json:"eai:acl,omitempty"` + Multisite string `json:"multisite,omitempty"` + NoFixupTasksInProgress string `json:"no_fixup_tasks_in_progress,omitempty"` + PreFlightCheck string `json:"pre_flight_check,omitempty"` + ReadyForSearchableRollingRestart string `json:"ready_for_searchable_rolling_restart,omitempty"` + ReplicationFactorMet string `json:"replication_factor_met,omitempty"` + SearchFactorMet string `json:"search_factor_met,omitempty"` + SiteReplicationFactorMet string `json:"site_replication_factor_met,omitempty"` + SiteSearchFactorMet string `json:"site_search_factor_met,omitempty"` + SplunkVersionPeerCount string `json:"splunk_version_peer_count,omitempty"` +} + +type ClusterManagerHealthHeader struct { + Links struct { + } `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` + } `json:"generator,omitempty"` + Entry []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` + } `json:"links,omitempty"` + Author string `json:"author,omitempty"` + Acl struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + } `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` + } `json:"acl,omitempty"` + Content ClusterManagerHealthContent `json:"content,omitempty"` + } `json:"entry,omitempty"` + Paging struct { + Total int `json:"total,omitempty"` + PerPage int `json:"perPage,omitempty"` + Offset int `json:"offset,omitempty"` + } `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/splunk/model/services/cluster/url_types.go b/pkg/splunk/model/services/cluster/url_types.go new file mode 100644 index 000000000..24081e9c2 --- /dev/null +++ b/pkg/splunk/model/services/cluster/url_types.go @@ -0,0 +1,31 @@ +package cluster + +const ( + GetClusterConfigUrl = "/services/cluster/config" + + GetClusterManagerBucketUrl = "/services/cluster/manager/buckets" + + GetClusterManagerHealthUrl = "/services/cluster/manager/health" + + GetClusterManagerGenerationUrl = "/services/cluster/manager/generation" + + GetClusterManagerIndexesUrl = "/services/cluster/manager/indexes" + + GetClusterManagerPeersUrl = "/services/cluster/manager/peers" + + GetClusterManagerInfoUrl = "/services/cluster/manager/info" + + GetClusterManagerRedundancyUrl = "/services/cluster/manager/redundancy" + + GetClusterManagerSitesUrl = "/services/cluster/manager/sites" + + GetClusterManagerSearchHeadUrl = "/services/cluster/manager/searchheads" + + GetClusterPeerBucketsUrl = "/services/cluster/peer/buckets" + + GetClusterPeerInfoUrl = "/services/cluster/peer/info" + + GetLicenseManagerLocalPeers = "/services/licenser/localslave" + + GetSearchHeadCaptainInfoUrl = "/services/shcluster/captain/info" +) diff --git a/pkg/splunk/model/types.go b/pkg/splunk/model/types.go new file mode 100644 index 000000000..ede3fc43e --- /dev/null +++ b/pkg/splunk/model/types.go @@ -0,0 +1,55 @@ +package model + +import ( + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" +) + +// SplunkCredentials contains the information necessary to communicate with +// the Splunk service +type SplunkCredentials struct { + + // Address holds the URL for splunk service + Address string `json:"address"` + + //Port port to connect + Port int32 `json:"port"` + + //Namespace where the splunk services are created + Namespace string `json:"namespace,omitempty"` + + //ServicesNamespace optional for services endpoints + ServicesNamespace string `json:"servicesNs,omitempty"` + + //User optional for services endpoints + User string `json:"user,omitempty"` + + //App optional for services endpoints + App string `json:"app,omitempty"` + + //CredentialsName The name of the secret containing the Splunk credentials (requires + // keys "username" and "password"). + // TODO FIXME need to change this to map as key value + CredentialsName string `json:"credentialsName"` + + //TrustedCAFile Server trusted CA file + TrustedCAFile string `json:"trustedCAFile,omitempty"` + + //ClientCertificateFile client certification if we are using to connect to server + ClientCertificateFile string `json:"clientCertificationFile,omitempty"` + + //ClientPrivateKeyFile client private key if we are using to connect to server + ClientPrivateKeyFile string `json:"clientPrivateKeyFile,omitempty"` + + // DisableCertificateVerification disables verification of splunk + // certificates when using HTTPS to connect to the Splunk. + DisableCertificateVerification bool `json:"disableCertificateVerification,omitempty"` +} + +type splunkGatewayFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *SplunkCredentials + // client for talking to splunk + client *resty.Client +} diff --git a/pkg/splunk/services/gateway.go b/pkg/splunk/services/gateway.go new file mode 100644 index 000000000..ddcba5705 --- /dev/null +++ b/pkg/splunk/services/gateway.go @@ -0,0 +1,28 @@ +package indexer + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model/services/cluster/manager" +) + +// EventPublisher is a function type for publishing events associated +// with gateway functions. +type EventPublisher func(ctx context.Context, eventType, reason, message string) + +// Factory is the interface for creating new Gateway objects. +type Factory interface { + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher EventPublisher) (Gateway, error) +} + +// Gateway holds the state information for talking to +// splunk gateway backend. +type Gateway interface { + + // Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. + // Authentication and Authorization: + // Requires the admin role or list_indexer_cluster capability. + // endpoint: https://:/services/cluster/manager/health + GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) +} diff --git a/pkg/splunk/services/implementation/cluster_manager_impl.go b/pkg/splunk/services/implementation/cluster_manager_impl.go new file mode 100644 index 000000000..5c0c6fa38 --- /dev/null +++ b/pkg/splunk/services/implementation/cluster_manager_impl.go @@ -0,0 +1,67 @@ +package impl + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + splunkmodel "github.com/splunk/splunk-operator/pkg/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/splunk/model/services/cluster" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model/services/cluster/manager" + gateway "github.com/splunk/splunk-operator/pkg/splunk/services" +) + +// splunkGateway implements the gateway.Gateway interface +// and uses gateway to manage the host. +type splunkGateway struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher gateway.EventPublisher + // client for talking to splunk + client *resty.Client + // credentials + credentials *splunkmodel.SplunkCredentials +} + +// Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. +// Authentication and Authorization: +// +// Requires the admin role or list_indexer_cluster capability. +// +// endpoint: https://:/services/cluster/manager/health +func (p *splunkGateway) GetClusterManagerHealth(context context.Context) (*[]managermodel.ClusterManagerHealthContent, error) { + url := clustermodel.GetClusterManagerHealthUrl + + p.log.Info("getting cluster manager health information") + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerHealthHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager health failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerHealthContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} From 0378e943eb726c287b6ccd646c3deab06a054b60 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 12:58:00 -0700 Subject: [PATCH 38/85] Added common APIs, changed upgrade condition --- Makefile | 2 +- pkg/splunk/enterprise/clustermanager.go | 129 ++------- pkg/splunk/enterprise/clustermanager_test.go | 264 ++----------------- pkg/splunk/enterprise/licensemanager.go | 54 ---- pkg/splunk/enterprise/licensemanager_test.go | 92 ------- pkg/splunk/enterprise/monitoringconsole.go | 24 +- pkg/splunk/enterprise/util.go | 47 ++++ pkg/splunk/enterprise/util_test.go | 40 +++ 8 files changed, 140 insertions(+), 512 deletions(-) diff --git a/Makefile b/Makefile index aef47f310..dd59513ae 100644 --- a/Makefile +++ b/Makefile @@ -137,7 +137,7 @@ build: setup/ginkgo manifests generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go -docker-build: #test ## Build docker image with the manager. +docker-build: test ## Build docker image with the manager. docker build -t ${IMG} . docker-push: ## Push docker image with the manager. diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index eb9534def..3fb52fab3 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "reflect" - "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -34,9 +33,7 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -182,15 +179,8 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } - checkUpgradeReady, err := upgradeScenario(ctx, client, cr) - if err != nil { - return result, err - } - - // TODO: Right now if the CM is not ready for upgrade the reconcile loop goes into - // an infite loop and ives Time Out. We still want the other functions to run if - // a proper upgrade does not happen - if !checkUpgradeReady { + continueReconcile, err := isClusterManagerReadyForUpgrade(ctx, client, cr) + if err != nil || !continueReconcile { return result, err } @@ -452,11 +442,11 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// upgradeScenario checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly -func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { +// isClusterManagerReadyForUpgrade checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly +func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("upgradeScenario").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) namespacedName := types.NamespacedName{ @@ -472,6 +462,10 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente } licenseManagerRef := cr.Spec.LicenseManagerRef + if licenseManagerRef.Name == "" { + return true, nil + } + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} // create new object @@ -480,90 +474,35 @@ func upgradeScenario(ctx context.Context, c splcommon.ControllerClient, cr *ente // get the license manager referred in cluster manager err = c.Get(ctx, namespacedName, licenseManager) if err != nil { - return true, nil + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the License Manager. Reason %v", err)) + scopedLog.Error(err, "Unable to get licenseManager") + return true, err } - lmImage, err := getLicenseManagerCurrentImage(ctx, c, licenseManager) - if err != nil { - eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) - scopedLog.Error(err, "Unable to licenseManager current image") - return false, err - } - cmImage, err := getClusterManagerCurrentImage(ctx, c, cr) + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) if err != nil { - eventPublisher.Warning(ctx, "upgradeScenario", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) - scopedLog.Error(err, "Unable to clusterManager current image") + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") return false, err } // check conditions for upgrade - if cr.Spec.Image != cmImage && lmImage == cr.Spec.Image && licenseManager.Status.Phase == enterpriseApi.PhaseReady { - return true, nil - } - - // Temporary workaround to keep the clusterManager method working only when the LM is ready - if licenseManager.Status.Phase == enterpriseApi.PhaseReady { - return true, nil - } - - return false, nil -} - -// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, -// returns the image, and error if something goes wring -func getClusterManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (string, error) { - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("getClusterManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - eventPublisher, _ := newK8EventPublisher(c, cr) - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err := c.Get(ctx, namespacedName, statefulSet) - if err != nil { - eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) - scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err - } - labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) - if err != nil { - eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) - scopedLog.Error(err, "Unable to get labels") - return "", err - } - - // get a list of all pods in the namespace with matching labels as the statefulset - statefulsetPods := &corev1.PodList{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), - rclient.MatchingLabelsSelector{Selector: labelSelector}, - } - - err = c.List(ctx, statefulsetPods, opts...) - if err != nil { - eventPublisher.Warning(ctx, "getClusterManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) - scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err + annotations := cr.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} } - - // find the container with the phrase 'splunk' in it - for _, v := range statefulsetPods.Items { - for _, container := range v.Status.ContainerStatuses { - if strings.Contains(container.Name, "splunk") { - image := container.Image - return image, nil - } - + if _, ok := annotations["splunk/image-tag"]; ok { + if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { + return false, nil } + } else { + return false, nil } - return "", nil + return true, nil } -// changeClusterManagerAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// changeClusterManagerAnnotations updates the checkUpdateImage field of the clusterManager annotations to trigger the reconcile loop // on update, and returns error if something is wrong func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { @@ -581,24 +520,10 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller return nil } - image, _ := getLicenseManagerCurrentImage(ctx, c, cr) - - // fetch and check the annotation fields of the ClusterManager - annotations := clusterManagerInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == image { - return nil - } - } + image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - // create/update the checkUpdateImage annotation field - annotations["checkUpdateImage"] = image + err = changeAnnotations(ctx, c, image, clusterManagerInstance) - clusterManagerInstance.SetAnnotations(annotations) - err = c.Update(ctx, clusterManagerInstance) if err != nil { eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 99a524e82..86c1507a2 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -83,7 +83,6 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -500,7 +499,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, @@ -523,7 +521,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v4.LicenseManager-test-"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -1391,7 +1388,7 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { mockPodExecClient.CheckPodExecCommands(t, "CheckIfsmartstoreConfigMapUpdatedToPod") } -func TestUpgradeScenario(t *testing.T) { +func TestIsClusterManagerReadyForUpgrade(t *testing.T) { ctx := context.TODO() @@ -1412,6 +1409,9 @@ func TestUpgradeScenario(t *testing.T) { Image: "splunk/splunk:latest", }, Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, }, }, } @@ -1428,63 +1428,6 @@ func TestUpgradeScenario(t *testing.T) { debug.PrintStack() } - // get StatefulSet labels - - namespacedName := types.NamespacedName{ - Namespace: lm.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkLicenseManager, lm.GetName()), - } - lmstatefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, lmstatefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := lmstatefulSet.Spec.Template.ObjectMeta.Labels - - // create LM pod - lmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-license-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - lmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update pod - lmstpod.Status.Phase = corev1.PodRunning - lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - // Create Cluster Manager cm := enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ @@ -1505,10 +1448,7 @@ func TestUpgradeScenario(t *testing.T) { }, } replicas := int32(1) - labels = map[string]string{ - "app": "test", - "tier": "splunk", - } + cmstatefulset := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-test-cluster-manager", @@ -1535,9 +1475,6 @@ func TestUpgradeScenario(t *testing.T) { Replicas: &replicas, }, } - cmstatefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } err = client.Create(ctx, &cm) err = client.Create(ctx, cmstatefulset) @@ -1546,54 +1483,21 @@ func TestUpgradeScenario(t *testing.T) { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } - // Create CM pod - cmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - cmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, cmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } + cm.Spec.Image = "splunk2" + lm.Spec.Image = "splunk2" + _, err = ApplyLicenseManager(ctx, client, &lm) - // update CM pod - cmstpod.Status.Phase = corev1.PodRunning - cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, + clusterManager := &enterpriseApi.ClusterManager{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, } - err = client.Status().Update(ctx, cmstpod) + err = client.Get(ctx, namespacedName, clusterManager) if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() + t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) } - cm.Spec.Image = "splunk2" - lmstpod.Status.ContainerStatuses[0].Image = "splunk2" - err = client.Status().Update(ctx, lmstpod) - check, err := upgradeScenario(ctx, client, &cm) + check, err := isClusterManagerReadyForUpgrade(ctx, client, clusterManager) if err != nil { t.Errorf("Unexpected upgradeScenario error %v", err) @@ -1605,98 +1509,6 @@ func TestUpgradeScenario(t *testing.T) { } -func TestGetClusterManagerCurrentImage(t *testing.T) { - - ctx := context.TODO() - current := enterpriseApi.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - }, - }, - } - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - err := client.Create(ctx, ¤t) - _, err = ApplyClusterManager(ctx, client, ¤t) - if err != nil { - t.Errorf("applyClusterManager should not have returned error; err=%v", err) - } - - namespacedName := types.NamespacedName{ - Namespace: current.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, current.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, statefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := statefulSet.Spec.Template.ObjectMeta.Labels - - stpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - stpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, stpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update statefulset - stpod.Status.Phase = corev1.PodRunning - stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, stpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - image, err := getClusterManagerCurrentImage(ctx, client, ¤t) - - if err != nil { - t.Errorf("Unexpected getClusterManagerCurrentImage error %v", err) - } - if image != stpod.Status.ContainerStatuses[0].Image { - t.Errorf("getClusterManagerCurrentImage does not return the current pod image") - } -} - func TestChangeClusterManagerAnnotations(t *testing.T) { ctx := context.TODO() @@ -1795,50 +1607,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } - // create LM pod - lmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-lm-license-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - lmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update pod - lmstpod.Status.Phase = corev1.PodRunning - lmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, lmstpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - err = changeClusterManagerAnnotations(ctx, client, lm) if err != nil { t.Errorf("changeClusterManagerAnnotations should not have returned error=%v", err) @@ -1854,7 +1622,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { } annotations := clusterManager.GetAnnotations() - if annotations["checkUpdateImage"] != lm.Spec.Image { + if annotations["splunk/image-tag"] != lm.Spec.Image { t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 60d8a95a9..828a169d5 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "reflect" - "strings" "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -27,10 +26,8 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" - rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -230,54 +227,3 @@ func getLicenseManagerList(ctx context.Context, c splcommon.ControllerClient, cr return objectList, nil } -func getLicenseManagerCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (string, error) { - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("getLicenseManagerCurrentImage").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - eventPublisher, _ := newK8EventPublisher(c, cr) - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkLicenseManager, cr.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err := c.Get(ctx, namespacedName, statefulSet) - if err != nil { - eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Stateful Set. Reason %v", err)) - scopedLog.Error(err, "StatefulSet types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err - } - labelSelector, err := metav1.LabelSelectorAsSelector(statefulSet.Spec.Selector) - if err != nil { - eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get labels. Reason %v", err)) - scopedLog.Error(err, "Unable to get labels") - return "", err - } - - // get a list of all pods in the namespace with matching labels as the statefulset - statefulsetPods := &corev1.PodList{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), - rclient.MatchingLabelsSelector{Selector: labelSelector}, - } - - err = c.List(ctx, statefulsetPods, opts...) - if err != nil { - eventPublisher.Warning(ctx, "getLicenseManagerCurrentImage", fmt.Sprintf("Could not get Pod list. Reason %v", err)) - scopedLog.Error(err, "Pods types not found in namespace", "namsespace", cr.GetNamespace()) - return "", err - } - - // find the container with the phrase 'splunk' in it - for _, v := range statefulsetPods.Items { - for _, container := range v.Status.ContainerStatuses { - if strings.Contains(container.Name, "splunk") { - image := container.Image - return image, nil - } - - } - } - - return "", nil -} diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 25ffd6f0b..8c7d597c9 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -720,98 +720,6 @@ func TestLicenseManagerList(t *testing.T) { } } -func TestGetLicenseManagerCurrentImage(t *testing.T) { - - ctx := context.TODO() - current := enterpriseApi.LicenseManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.LicenseManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - }, - }, - } - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - err := client.Create(ctx, ¤t) - _, err = ApplyLicenseManager(ctx, client, ¤t) - if err != nil { - t.Errorf("applyLicenseManager should not have returned error; err=%v", err) - } - - namespacedName := types.NamespacedName{ - Namespace: current.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkLicenseManager, current.GetName()), - } - statefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, statefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := statefulSet.Spec.Template.ObjectMeta.Labels - - stpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-license-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - stpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, stpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update statefulset - stpod.Status.Phase = corev1.PodRunning - stpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, stpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - image, err := getLicenseManagerCurrentImage(ctx, client, ¤t) - - if err != nil { - t.Errorf("Unexpected getLicenseManagerCurrentImage error %v", err) - } - if image != stpod.Status.ContainerStatuses[0].Image { - t.Errorf("getLicenseManagerCurrentImage does not return the current pod image") - } -} - func TestLicenseManagerWithReadyState(t *testing.T) { mclient := &spltest.MockHTTPClient{} diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index a042a0c6f..06a7c95b9 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -360,32 +360,26 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(client, cr) + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: cr.Spec.MonitoringConsoleRef.Name, } monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} - err := client.Get(context.TODO(), namespacedName, monitoringConsoleInstance) + err := client.Get(ctx, namespacedName, monitoringConsoleInstance) if err != nil && k8serrors.IsNotFound(err) { return nil } - image, _ := getClusterManagerCurrentImage(ctx, client, cr) - annotations := monitoringConsoleInstance.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["checkUpdateImage"]; ok { - if annotations["checkUpdateImage"] == image { - return nil - } - } + image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - annotations["checkUpdateImage"] = image + err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) - monitoringConsoleInstance.SetAnnotations(annotations) - err = client.Update(ctx, monitoringConsoleInstance) if err != nil { - fmt.Println("Error in Change Annotation UPDATE", err) + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) + scopedLog.Error(err, "MonitoringConsole types update after changing annotations failed with", "error", err) return err } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 35c1f3cbf..b180e3271 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2272,3 +2272,50 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32) } return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) } + +// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, +// returns the image, and error if something goes wrong +func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) (string, error) { + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(instanceType, cr.GetName()), + } + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil { + return "", err + } + + image := statefulSet.Spec.Template.Spec.Containers[0].Image + + return image, nil + +} + +// changeAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop +// on update, and returns error if something is wrong +func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image string, cr splcommon.MetaObject) error { + + annotations := cr.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + if _, ok := annotations["splunk/image-tag"]; ok { + if annotations["splunk/image-tag"] == image { + return nil + } + } + + // create/update the checkUpdateImage annotation field + annotations["splunk/image-tag"] = image + + cr.SetAnnotations(annotations) + err := c.Update(ctx, cr) + if err != nil { + return err + } + + return nil + +} diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index a15d5913e..64587db8a 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -3149,3 +3149,43 @@ func TestGetLicenseMasterURL(t *testing.T) { t.Errorf("Expected a valid return value") } } +func TestGetCurrentImage(t *testing.T) { + + ctx := context.TODO() + current := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + err := client.Create(ctx, ¤t) + _, err = ApplyClusterManager(ctx, client, ¤t) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + + instanceType := SplunkClusterManager + + image, err := getCurrentImage(ctx, client, ¤t, instanceType) + + if err != nil { + t.Errorf("Unexpected getCurrentImage error %v", err) + } + if image != current.Spec.Image { + t.Errorf("getCurrentImage does not return the current statefulset image") + } + +} From 6904df6bdfe6b50476a5f6292cc389359e7ecd89 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 14:27:35 -0700 Subject: [PATCH 39/85] Added provisioner --- pkg/{ => gateway}/splunk/model/error_types.go | 0 .../services/cluster/manager/health_types.go | 0 .../model/services/cluster/url_types.go | 0 pkg/{ => gateway}/splunk/model/types.go | 0 .../splunk/services/fixture/fixture.go | 103 ++++++++++ pkg/{ => gateway}/splunk/services/gateway.go | 4 +- .../implementation/cluster_manager_impl.go | 8 +- .../splunk/services/implementation/factory.go | 88 +++++++++ .../splunk/implementation/factory.go | 82 ++++++++ .../splunk/implementation/splunk.go | 179 ++++++++++++++++++ pkg/provisioner/splunk/provisioner.go | 26 +++ pkg/splunk/manager.go | 17 ++ 12 files changed, 501 insertions(+), 6 deletions(-) rename pkg/{ => gateway}/splunk/model/error_types.go (100%) rename pkg/{ => gateway}/splunk/model/services/cluster/manager/health_types.go (100%) rename pkg/{ => gateway}/splunk/model/services/cluster/url_types.go (100%) rename pkg/{ => gateway}/splunk/model/types.go (100%) create mode 100644 pkg/gateway/splunk/services/fixture/fixture.go rename pkg/{ => gateway}/splunk/services/gateway.go (84%) rename pkg/{ => gateway}/splunk/services/implementation/cluster_manager_impl.go (85%) create mode 100644 pkg/gateway/splunk/services/implementation/factory.go create mode 100644 pkg/provisioner/splunk/implementation/factory.go create mode 100644 pkg/provisioner/splunk/implementation/splunk.go create mode 100644 pkg/provisioner/splunk/provisioner.go create mode 100644 pkg/splunk/manager.go diff --git a/pkg/splunk/model/error_types.go b/pkg/gateway/splunk/model/error_types.go similarity index 100% rename from pkg/splunk/model/error_types.go rename to pkg/gateway/splunk/model/error_types.go diff --git a/pkg/splunk/model/services/cluster/manager/health_types.go b/pkg/gateway/splunk/model/services/cluster/manager/health_types.go similarity index 100% rename from pkg/splunk/model/services/cluster/manager/health_types.go rename to pkg/gateway/splunk/model/services/cluster/manager/health_types.go diff --git a/pkg/splunk/model/services/cluster/url_types.go b/pkg/gateway/splunk/model/services/cluster/url_types.go similarity index 100% rename from pkg/splunk/model/services/cluster/url_types.go rename to pkg/gateway/splunk/model/services/cluster/url_types.go diff --git a/pkg/splunk/model/types.go b/pkg/gateway/splunk/model/types.go similarity index 100% rename from pkg/splunk/model/types.go rename to pkg/gateway/splunk/model/types.go diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go new file mode 100644 index 000000000..eb84836ee --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -0,0 +1,103 @@ +package fixture + +import ( + "context" + //"encoding/json" + "io/ioutil" + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + + // peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" + // searchheadmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/searchhead" + // commonmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/common" + // lmmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license-manager" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var log = logz.New().WithName("gateway").WithName("fixture") + +// fixtureGateway implements the gateway.fixtureGateway interface +// and uses splunk to manage the host. +type fixtureGateway struct { + // client for talking to splunk + client *resty.Client + // the splunk credentials + credentials splunkmodel.SplunkCredentials + // a logger configured for this host + log logr.Logger + // an event publisher for recording significant events + publisher gateway.EventPublisher + // state of the splunk + state *Fixture +} + +// Fixture contains persistent state for a particular splunk instance +type Fixture struct { +} + +// NewGateway returns a new Fixture Gateway +func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (gateway.Gateway, error) { + p := &fixtureGateway{ + log: log.WithValues("splunk", sad.Address), + publisher: publisher, + state: f, + client: resty.New(), + } + return p, nil +} + +// GetClusterManagerHealth Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. +// Authentication and Authorization: +// +// Requires the admin role or list_indexer_cluster capability. +// +// endpoint: https://:/services/cluster/manager/health +func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := ioutil.ReadFile("cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerHealthUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerHealthHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerHealthContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} diff --git a/pkg/splunk/services/gateway.go b/pkg/gateway/splunk/services/gateway.go similarity index 84% rename from pkg/splunk/services/gateway.go rename to pkg/gateway/splunk/services/gateway.go index ddcba5705..e8c8aa999 100644 --- a/pkg/splunk/services/gateway.go +++ b/pkg/gateway/splunk/services/gateway.go @@ -3,8 +3,8 @@ package indexer import ( "context" - splunkmodel "github.com/splunk/splunk-operator/pkg/splunk/model" - managermodel "github.com/splunk/splunk-operator/pkg/splunk/model/services/cluster/manager" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" ) // EventPublisher is a function type for publishing events associated diff --git a/pkg/splunk/services/implementation/cluster_manager_impl.go b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go similarity index 85% rename from pkg/splunk/services/implementation/cluster_manager_impl.go rename to pkg/gateway/splunk/services/implementation/cluster_manager_impl.go index 5c0c6fa38..ecb797bef 100644 --- a/pkg/splunk/services/implementation/cluster_manager_impl.go +++ b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go @@ -6,10 +6,10 @@ import ( "github.com/go-logr/logr" "github.com/go-resty/resty/v2" - splunkmodel "github.com/splunk/splunk-operator/pkg/splunk/model" - clustermodel "github.com/splunk/splunk-operator/pkg/splunk/model/services/cluster" - managermodel "github.com/splunk/splunk-operator/pkg/splunk/model/services/cluster/manager" - gateway "github.com/splunk/splunk-operator/pkg/splunk/services" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" ) // splunkGateway implements the gateway.Gateway interface diff --git a/pkg/gateway/splunk/services/implementation/factory.go b/pkg/gateway/splunk/services/implementation/factory.go new file mode 100644 index 000000000..3cb48e6f6 --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/factory.go @@ -0,0 +1,88 @@ +package impl + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + + //model "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + //cmmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/cluster-manager/model" + "time" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkGatewayFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *splunkmodel.SplunkCredentials + // client for talking to splunk + client *resty.Client +} + +// NewGatewayFactory new gateway factory to create gateway interface +func NewGatewayFactory() gateway.Factory { + factory := splunkGatewayFactory{} + err := factory.init() + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkGatewayFactory) init() error { + return nil +} + +func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (*splunkGateway, error) { + gatewayLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkGateway") + + f.client = resty.New() + // Enable debug mode + f.client.SetDebug(true) + // or One can disable security check (https) + f.client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: sad.DisableCertificateVerification}) + // Set client timeout as per your need + f.client.SetTimeout(1 * time.Minute) + namespace := "default" + if len(sad.Namespace) > 0 { + namespace = sad.Namespace + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s.%s:%d", sad.Address, namespace, sad.Port) + f.client.SetBaseURL(splunkURL) + f.client.SetBasicAuth("admin", sad.CredentialsName) + f.client.SetHeader("Content-Type", "application/json") + f.client.SetHeader("Accept", "application/json") + f.credentials = sad + + gatewayLogger.Info("new splunk manager created to access rest endpoint") + newGateway := &splunkGateway{ + credentials: f.credentials, + client: f.client, + log: f.log, + debugLog: f.log, + publisher: publisher, + } + f.log.Info("splunk settings", + "endpoint", f.credentials.Address, + "CACertFile", f.credentials.TrustedCAFile, + "ClientCertFile", f.credentials.ClientCertificateFile, + "ClientPrivKeyFile", f.credentials.ClientPrivateKeyFile, + "TLSInsecure", f.credentials.DisableCertificateVerification, + ) + return newGateway, nil +} + +// NewGateway returns a new Splunk Gateway using global +// configuration for finding the Splunk services. +func (f splunkGatewayFactory) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (gateway.Gateway, error) { + return f.splunkGateway(ctx, sad, publisher) +} diff --git a/pkg/provisioner/splunk/implementation/factory.go b/pkg/provisioner/splunk/implementation/factory.go new file mode 100644 index 000000000..21cb14d42 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/factory.go @@ -0,0 +1,82 @@ +package impl + +import ( + "context" + + "github.com/go-logr/logr" + + //model "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/fixture" + splunkgatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/implementation" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + + //cmmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/cluster-manager/model" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkProvisionerFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *splunkmodel.SplunkCredentials + // Gateway Factory + gatewayFactory gateway.Factory +} + +// NewProvisionerFactory new provisioner factory to create provisioner interface +func NewProvisionerFactory(runInTestMode bool) provisioner.Factory { + factory := splunkProvisionerFactory{} + + err := factory.init(runInTestMode) + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkProvisionerFactory) init(runInTestMode bool) error { + if runInTestMode { + f.gatewayFactory = &fixture.Fixture{} + } else { + f.gatewayFactory = splunkgatewayimpl.NewGatewayFactory() + } + return nil +} + +func (f splunkProvisionerFactory) splunkProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (*splunkProvisioner, error) { + provisionerLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkProvisioner") + + f.credentials = sad + + provisionerLogger.Info("new splunk manager created to access rest endpoint") + gateway, err := f.gatewayFactory.NewGateway(ctx, sad, publisher) + if err != nil { + return nil, err + } + newProvisioner := &splunkProvisioner{ + credentials: f.credentials, + log: f.log, + debugLog: f.log, + publisher: publisher, + gateway: gateway, + } + + f.log.Info("splunk settings", + "endpoint", f.credentials.Address, + "CACertFile", f.credentials.TrustedCAFile, + "ClientCertFile", f.credentials.ClientCertificateFile, + "ClientPrivKeyFile", f.credentials.ClientPrivateKeyFile, + "TLSInsecure", f.credentials.DisableCertificateVerification, + ) + return newProvisioner, nil +} + +// NewProvisioner returns a new Splunk Provisioner using global +// configuration for finding the Splunk services. +func (f splunkProvisionerFactory) NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (provisioner.Provisioner, error) { + return f.splunkProvisioner(ctx, sad, publisher) +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go new file mode 100644 index 000000000..b73fdd521 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -0,0 +1,179 @@ +package impl + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// splunkProvisioner implements the provisioner.Provisioner interface +// and uses provisioner to manage the host. +type splunkProvisioner struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher gateway.EventPublisher + // credentials + credentials *splunkmodel.SplunkCredentials + // gateway factory + gateway gateway.Gateway +} + +// var callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { +// cminfo, err := p.gateway.GetClusterManagerInfo(ctx) +// if err != nil { +// return nil, err +// } else if cminfo == nil { +// return nil, fmt.Errorf("cluster manager info data is empty") +// } +// return cminfo, err +// } + +var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { + healthList, err := p.gateway.GetClusterManagerHealth(ctx) + if err != nil { + return nil, err + } else if healthList == nil { + return nil, fmt.Errorf("health data is empty") + } + return healthList, err +} + +// var callGetClusterManagerSearchHeadStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.SearchHeadContent, error) { +// sclist, err := p.gateway.GetClusterManagerSearchHeadStatus(ctx) +// if err != nil { +// return nil, err +// } else if sclist == nil { +// return nil, fmt.Errorf("search head list is empty") +// } +// return sclist, err +// } + +// var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { +// peerlist, err := p.gateway.GetClusterManagerPeers(ctx) +// if err != nil { +// return nil, err +// } else if peerlist == nil { +// return nil, fmt.Errorf("peer list is empty") +// } +// return peerlist, err +// } + +// var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { +// peerlist, err := p.gateway.GetClusterManagerPeers(ctx) +// if err != nil { +// return nil, err +// } else if peerlist == nil { +// return nil, fmt.Errorf("peer list is empty") +// } +// return peerlist, err +// } + +// SetClusterManagerStatus Access cluster node configuration details. +func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error { + + // peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) + // if err != nil { + // return err + // } else { + // peerlist := *peerlistptr + // for _, peer := range peerlist { + // condition := metav1.Condition{ + // Type: "Peers", + // Message: fmt.Sprintf("%s with %s is %s ", peer.Site, peer.Label, peer.Status), + // Reason: peer.Site, + // } + // if peer.Status == "Up" { + // condition.Status = metav1.ConditionTrue + // } else { + // condition.Status = metav1.ConditionFalse + + // } + // // set condition to existing conditions list + // meta.SetStatusCondition(conditions, condition) + // } + // } + + // cminfolistptr, err := callGetClusterManagerInfo(ctx, p) + // if err != nil { + // return err + // } + // cminfolist := *cminfolistptr + // if cminfolist[0].Multisite { + // var site string + // multiSiteStatus := metav1.ConditionTrue + // message := "multisite is up" + // peerlist := *peerlistptr + // for _, peer := range peerlist { + // if !strings.Contains(peer.Status, "Up") { + // site = peer.Site + // multiSiteStatus = metav1.ConditionFalse + // message = fmt.Sprintf("site %s with label %s status is %s", peer.Site, peer.Label, peer.Status) + // break + // } // set condition to existing conditions list + // } + // condition := metav1.Condition{ + // Type: "Multisite", + // Message: message, + // Reason: site, + // Status: multiSiteStatus, + // } + // meta.SetStatusCondition(conditions, condition) + // } + + // business logic starts here + //healthList, err := callGetClusterManagerHealth(ctx, p) + healthList, err := callGetClusterManagerHealth(ctx, p) + if err != nil { + return err + } else { + hllist := *healthList + // prepare fields for conditions + for _, health := range hllist { + condition := metav1.Condition{ + Type: "Health", + Message: "all the peers of indexer cluster status", + Reason: "PeersStatus", + } + if health.AllPeersAreUp == "1" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + } + + // sclistptr, err := callGetClusterManagerSearchHeadStatus(ctx, p) + // if err != nil { + // return err + // } else { + // sclist := *sclistptr + // for _, sc := range sclist { + // condition := metav1.Condition{ + // Type: "SearchHead", + // Message: sc.Label, + // Reason: sc.Site, + // } + // if sc.Label == "Connected" { + // condition.Status = metav1.ConditionTrue + // } else { + // condition.Status = metav1.ConditionFalse + + // } + // // set condition to existing conditions list + // meta.SetStatusCondition(conditions, condition) + // } + // } + + return nil +} diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go new file mode 100644 index 000000000..ff7bcbda1 --- /dev/null +++ b/pkg/provisioner/splunk/provisioner.go @@ -0,0 +1,26 @@ +package indexer + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EventPublisher is a function type for publishing events associated +// with gateway functions. +type EventPublisher func(ctx context.Context, eventType, reason, message string) + +// Factory is the interface for creating new Provisioner objects. +type Factory interface { + NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (Provisioner, error) +} + +// Provisioner holds the state information for talking to +// splunk provisioner backend. +type Provisioner interface { + + // SetClusterManagerStatus set cluster manager status + SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error +} diff --git a/pkg/splunk/manager.go b/pkg/splunk/manager.go new file mode 100644 index 000000000..07e7ba442 --- /dev/null +++ b/pkg/splunk/manager.go @@ -0,0 +1,17 @@ +package splunk + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + enterpriseApi "github.com/splunk/splunk-operator/api/v3" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" +) + +type Factory func(client splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster, gatewayFactory gateway.Factory) (SplunkManager, error) + +type SplunkManager interface { + ApplyClusterManager(ctx context.Context) (reconcile.Result, error) +} From a116e9cf7e9cf1a80b78074573c9885b2753c8df Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 14:56:15 -0700 Subject: [PATCH 40/85] Added only warning if annotation not found --- pkg/splunk/enterprise/clustermanager.go | 3 +- pkg/splunk/enterprise/clustermanager_test.go | 67 -------------------- 2 files changed, 1 insertion(+), 69 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 3fb52fab3..ebb7eee36 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -179,6 +179,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, err } + // check if the ClusterManager is ready for version upgrade, if required continueReconcile, err := isClusterManagerReadyForUpgrade(ctx, client, cr) if err != nil || !continueReconcile { return result, err @@ -495,8 +496,6 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { return false, nil } - } else { - return false, nil } return true, nil diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 86c1507a2..b7f43356b 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1447,37 +1447,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { }, }, } - replicas := int32(1) - - cmstatefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager", - Namespace: "test", - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: "splunk-test-cluster-manager-headless", - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - }, - Replicas: &replicas, - }, - } err = client.Create(ctx, &cm) - err = client.Create(ctx, cmstatefulset) _, err = ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) @@ -1530,43 +1501,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { }, }, } - replicas := int32(1) - labels := map[string]string{ - "app": "test", - "tier": "splunk", - } - lmstatefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-lm-license-manager", - Namespace: "test", - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: "splunk-test-lm-license-manager-headless", - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - }, - Replicas: &replicas, - }, - } - lmstatefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } cm := &enterpriseApi.ClusterManager{ ObjectMeta: metav1.ObjectMeta{ @@ -1590,7 +1524,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { // Create the instances client.Create(ctx, lm) - client.Create(ctx, lmstatefulset) _, err := ApplyLicenseManager(ctx, client, lm) if err != nil { t.Errorf("applyLicenseManager should not have returned error; err=%v", err) From 3695b41f86aa8659c209e2a5d8a0aab6c88e7dbd Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 14:59:37 -0700 Subject: [PATCH 41/85] Add warning --- pkg/splunk/enterprise/clustermanager.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index ebb7eee36..2589caca1 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -496,6 +496,8 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { return false, nil } + } else { + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the annotations. Reason %v", err)) } return true, nil From 43dfc5309bb56c1dc2901a71eeb29628fc17f32f Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 16:48:56 -0700 Subject: [PATCH 42/85] Added provisioner to controller --- controllers/clustermanager_controller.go | 11 +++++++---- pkg/splunk/enterprise/clustermanager.go | 3 ++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index 3f3b11dee..6175ac927 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -18,11 +18,13 @@ package controllers import ( "context" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" "time" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/pkg/errors" common "github.com/splunk/splunk-operator/controllers/common" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -41,7 +43,8 @@ import ( // ClusterManagerReconciler reconciles a ClusterManager object type ClusterManagerReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + ProvisionerFactory provisioner.Factory } //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=clustermanagers,verbs=get;list;watch;create;update;patch;delete @@ -102,7 +105,7 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyClusterManager(ctx, r.Client, instance) + result, err := ApplyClusterManager(ctx, r.Client, instance, r.ProvisionerFactory) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -112,7 +115,7 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyClusterManager adding to handle unit test case var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { - return enterprise.ApplyClusterManager(ctx, client, instance) + return enterprise.ApplyClusterManager(ctx, client, instance, r.ProvisionerFactory) } // SetupWithManager sets up the controller with the Manager. diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 144e417d7..d2af4fb52 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -42,7 +43,7 @@ import ( ) // ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager. -func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { +func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ From 80e6accb84c787df75454cf9c8336ce735da4703 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 10 Jul 2023 17:19:32 -0700 Subject: [PATCH 43/85] Updated upgradeCondition --- pkg/splunk/enterprise/clustermanager.go | 44 +++++++++----------- pkg/splunk/enterprise/clustermanager_test.go | 11 +---- pkg/splunk/enterprise/monitoringconsole.go | 4 +- pkg/splunk/enterprise/util.go | 11 +++-- 4 files changed, 27 insertions(+), 43 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 2589caca1..59f9c4ae1 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -443,13 +443,19 @@ func VerifyCMisMultisite(ctx context.Context, cr *enterpriseApi.ClusterManager, return extraEnv, err } -// isClusterManagerReadyForUpgrade checks if it is suitable to update the clusterManager based on the Status of the licenseManager, returns bool, err accordingly +// isClusterManagerReadyForUpgrade checks if ClusterManager can be upgraded if a version upgrade is in-progress +// No-operation otherwise; returns bool, err accordingly func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (bool, error) { - reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("isClusterManagerReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) + // check if a LicenseManager is attached to the instance + licenseManagerRef := cr.Spec.LicenseManagerRef + if licenseManagerRef.Name == "" { + return true, nil + } + namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(SplunkClusterManager, cr.GetName()), @@ -462,14 +468,7 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return true, nil } - licenseManagerRef := cr.Spec.LicenseManagerRef - if licenseManagerRef.Name == "" { - return true, nil - } - namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: licenseManagerRef.Name} - - // create new object licenseManager := &enterpriseApi.LicenseManager{} // get the license manager referred in cluster manager @@ -480,6 +479,13 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return true, err } + lmImage, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) + if err != nil { + eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the License Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get licenseManager current image") + return false, err + } + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) if err != nil { eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) @@ -487,26 +493,17 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller return false, err } - // check conditions for upgrade - annotations := cr.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} - } - if _, ok := annotations["splunk/image-tag"]; ok { - if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || licenseManager.Spec.Image != annotations["splunk/image-tag"]) { - return false, nil - } - } else { - eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the annotations. Reason %v", err)) + // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + if (cr.Spec.Image != cmImage) && (licenseManager.Status.Phase != enterpriseApi.PhaseReady || lmImage != cr.Spec.Image) { + return false, nil } return true, nil } -// changeClusterManagerAnnotations updates the checkUpdateImage field of the clusterManager annotations to trigger the reconcile loop +// changeClusterManagerAnnotations updates the splunk/image-tag field of the ClusterManager annotations to trigger the reconcile loop // on update, and returns error if something is wrong func changeClusterManagerAnnotations(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) error { - reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) @@ -520,9 +517,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller if err != nil && k8serrors.IsNotFound(err) { return nil } - image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - err = changeAnnotations(ctx, c, image, clusterManagerInstance) if err != nil { @@ -532,5 +527,4 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } return nil - } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index b7f43356b..fcec6a19a 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -67,7 +67,6 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -84,7 +83,6 @@ func TestApplyClusterManager(t *testing.T) { {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -499,7 +497,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermanager-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, {MetaName: "*v4.ClusterManager-test-stack1"}, @@ -522,7 +519,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -1389,7 +1385,6 @@ func TestCheckIfsmartstoreConfigMapUpdatedToPod(t *testing.T) { } func TestIsClusterManagerReadyForUpgrade(t *testing.T) { - ctx := context.TODO() builder := fake.NewClientBuilder() @@ -1424,7 +1419,7 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { lm.Status.Phase = enterpriseApi.PhaseReady err = client.Status().Update(ctx, &lm) if err != nil { - t.Errorf("Unexpected update pod %v", err) + t.Errorf("Unexpected status update %v", err) debug.PrintStack() } @@ -1475,9 +1470,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } if !check { - t.Errorf("upgradeScenario: CM should be ready for upgrade") + t.Errorf("isClusterManagerReadyForUpgrade: CM should be ready for upgrade") } - } func TestChangeClusterManagerAnnotations(t *testing.T) { @@ -1558,7 +1552,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { if annotations["splunk/image-tag"] != lm.Spec.Image { t.Errorf("changeClusterManagerAnnotations should have set the checkUpdateImage annotation field to the current image") } - } func TestClusterManagerWitReadyState(t *testing.T) { diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 06a7c95b9..d8197bf04 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -356,10 +356,9 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } -// changeMonitoringConsoleAnnotations updates the checkUpdateImage field of the Monitoring Console Annotations to trigger the reconcile loop +// changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { - reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(client, cr) @@ -374,7 +373,6 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co return nil } image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) if err != nil { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index b180e3271..ff40a7af7 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2273,10 +2273,8 @@ func getApplicablePodNameForK8Probes(cr splcommon.MetaObject, ordinalIdx int32) return fmt.Sprintf("splunk-%s-%s-%d", cr.GetName(), podType, ordinalIdx) } -// getClusterManagerCurrentImage gets the image of the pods of the clusterManager before any upgrade takes place, -// returns the image, and error if something goes wrong +// getCurrentImage gets the image of the statefulset, returns the image, and error if something goes wrong func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) (string, error) { - namespacedName := types.NamespacedName{ Namespace: cr.GetNamespace(), Name: GetSplunkStatefulsetName(instanceType, cr.GetName()), @@ -2287,16 +2285,17 @@ func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splco return "", err } + if statefulSet.Spec.Template.Spec.Containers == nil { + return "", nil + } image := statefulSet.Spec.Template.Spec.Containers[0].Image return image, nil } -// changeAnnotations updates the checkUpdateImage field of the CLuster Manager Annotations to trigger the reconcile loop -// on update, and returns error if something is wrong +// changeAnnotations updates the splunk/image-tag field to trigger the reconcile loop, and returns error if something is wrong func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image string, cr splcommon.MetaObject) error { - annotations := cr.GetAnnotations() if annotations == nil { annotations = map[string]string{} From d9f912dc53280535223f277ee5e0f085f8165022 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 13:19:49 -0700 Subject: [PATCH 44/85] Added Warning Phase --- api/v4/common_types.go | 3 + controllers/clustermanager_controller.go | 4 +- .../services/cluster/manager/info_types.go | 106 ++++++++++++++++++ .../splunk/implementation/splunk.go | 40 ++----- pkg/provisioner/splunk/provisioner.go | 4 +- pkg/splunk/enterprise/clustermanager.go | 51 +++++++++ 6 files changed, 171 insertions(+), 37 deletions(-) create mode 100644 pkg/gateway/splunk/model/services/cluster/manager/info_types.go diff --git a/api/v4/common_types.go b/api/v4/common_types.go index 968ecd8ed..0ca140367 100644 --- a/api/v4/common_types.go +++ b/api/v4/common_types.go @@ -139,6 +139,9 @@ const ( // PhaseError means an error occured with custom resource management PhaseError Phase = "Error" + + // PhaseWarning means an error occured with in the process of version upgrade + PhaseWarning Phase = "Warning" ) // Probe defines set of configurable values for Startup, Readiness, and Liveness probes diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index 6175ac927..b3b18989f 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -114,8 +114,8 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // ApplyClusterManager adding to handle unit test case -var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { - return enterprise.ApplyClusterManager(ctx, client, instance, r.ProvisionerFactory) +var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { + return enterprise.ApplyClusterManager(ctx, client, instance, provisionerFactory) } // SetupWithManager sets up the controller with the Manager. diff --git a/pkg/gateway/splunk/model/services/cluster/manager/info_types.go b/pkg/gateway/splunk/model/services/cluster/manager/info_types.go new file mode 100644 index 000000000..b5ed1c6ae --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/info_types.go @@ -0,0 +1,106 @@ +package manager + +import "time" + +// Description: Access information about cluster manager node. +// Rest End Point API: services/cluster/manager/info + +type ClusterManagerInfoContent struct { + ActiveBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"active_bundle"` + ApplyBundleStatus struct { + InvalidBundle struct { + BundlePath string `json:"bundle_path"` + BundleValidationErrorsOnMaster []interface{} `json:"bundle_validation_errors_on_master"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"invalid_bundle"` + ReloadBundleIssued bool `json:"reload_bundle_issued"` + Status string `json:"status"` + } `json:"apply_bundle_status"` + AvailableSites string `json:"available_sites"` + BackupAndRestorePrimaries bool `json:"backup_and_restore_primaries"` + ControlledRollingRestartFlag bool `json:"controlled_rolling_restart_flag"` + EaiAcl interface{} `json:"eai:acl"` + ForwarderSiteFailover string `json:"forwarder_site_failover"` + IndexingReadyFlag bool `json:"indexing_ready_flag"` + InitializedFlag bool `json:"initialized_flag"` + Label string `json:"label"` + LastCheckRestartBundleResult bool `json:"last_check_restart_bundle_result"` + LastDryRunBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"last_dry_run_bundle"` + LastValidatedBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + IsValidBundle bool `json:"is_valid_bundle"` + Timestamp int `json:"timestamp"` + } `json:"last_validated_bundle"` + LatestBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"latest_bundle"` + MaintenanceMode bool `json:"maintenance_mode"` + Multisite bool `json:"multisite"` + PreviousActiveBundle struct { + BundlePath string `json:"bundle_path"` + Checksum string `json:"checksum"` + Timestamp int `json:"timestamp"` + } `json:"previous_active_bundle"` + PrimariesBackupStatus string `json:"primaries_backup_status"` + QuietPeriodFlag bool `json:"quiet_period_flag"` + RollingRestartFlag bool `json:"rolling_restart_flag"` + RollingRestartOrUpgrade bool `json:"rolling_restart_or_upgrade"` + ServiceReadyFlag bool `json:"service_ready_flag"` + SiteReplicationFactor string `json:"site_replication_factor"` + SiteSearchFactor string `json:"site_search_factor"` + StartTime int `json:"start_time"` + SummaryReplication string `json:"summary_replication"` +} + +type ClusterManagerInfoHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content ClusterManagerInfoContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index b73fdd521..a960d46a0 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -8,8 +8,8 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) // splunkProvisioner implements the provisioner.Provisioner interface @@ -78,7 +78,7 @@ var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner // } // SetClusterManagerStatus Access cluster node configuration details. -func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error { +func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splcommon.MetaObject) error { // peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) // if err != nil { @@ -138,42 +138,16 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditi hllist := *healthList // prepare fields for conditions for _, health := range hllist { - condition := metav1.Condition{ - Type: "Health", - Message: "all the peers of indexer cluster status", - Reason: "PeersStatus", - } if health.AllPeersAreUp == "1" { - condition.Status = metav1.ConditionTrue + continue } else { - condition.Status = metav1.ConditionFalse + cr.Status.Phase = enterprise.PhaseWarning } + // set condition to existing conditions list - meta.SetStatusCondition(conditions, condition) + } } - // sclistptr, err := callGetClusterManagerSearchHeadStatus(ctx, p) - // if err != nil { - // return err - // } else { - // sclist := *sclistptr - // for _, sc := range sclist { - // condition := metav1.Condition{ - // Type: "SearchHead", - // Message: sc.Label, - // Reason: sc.Site, - // } - // if sc.Label == "Connected" { - // condition.Status = metav1.ConditionTrue - // } else { - // condition.Status = metav1.ConditionFalse - - // } - // // set condition to existing conditions list - // meta.SetStatusCondition(conditions, condition) - // } - // } - return nil } diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index ff7bcbda1..3835da614 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -5,7 +5,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" ) // EventPublisher is a function type for publishing events associated @@ -22,5 +22,5 @@ type Factory interface { type Provisioner interface { // SetClusterManagerStatus set cluster manager status - SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error + SetClusterManagerStatus(ctx context.Context, cr splcommon.MetaObject) error } diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index d2af4fb52..26dcdece3 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -22,10 +22,12 @@ import ( "strings" "time" + "github.com/pkg/errors" enterpriseApi "github.com/splunk/splunk-operator/api/v4" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -241,6 +243,10 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + err = SetClusterManagerStatus(ctx, client, cr, provisionerFactory) + if err != nil { + scopedLog.Error(err, "error while setting cluster health") + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. @@ -251,6 +257,51 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, nil } +// SetClusterManagerStatus +func SetClusterManagerStatus(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) error { + eventPublisher, _ := newK8EventPublisher(client, cr) + + defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace()) + defaultSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), defaultSecretObjName) + if err != nil { + eventPublisher.Warning(ctx, "PushManagerAppsBundle", fmt.Sprintf("Could not access default secret object to fetch admin password. Reason %v", err)) + return fmt.Errorf("Could not access default secret object to fetch admin password. Reason %v", err) + } + + //Get the admin password from the secret object + adminPwd, foundSecret := defaultSecret.Data["password"] + if !foundSecret { + eventPublisher.Warning(ctx, "PushManagerAppsBundle", fmt.Sprintf("Could not find admin password ")) + return fmt.Errorf("Could not find admin password ") + } + + service := getSplunkService(ctx, cr, &cr.Spec.CommonSplunkSpec, SplunkClusterManager, false) + + sad := &splunkmodel.SplunkCredentials{ + Address: service.Name, + Port: 8089, + ServicesNamespace: "-", + User: "admin", + App: "-", + CredentialsName: string(adminPwd[:]), + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + Namespace: cr.Namespace, + } + prov, err := provisionerFactory.NewProvisioner(ctx, sad, eventPublisher.publishEvent) + if err != nil { + return errors.Wrap(err, "failed to create gateway") + } + err = prov.SetClusterManagerStatus(ctx, cr) + if err != nil { + return errors.Wrap(err, "failed to update cluster manager health status") + } + + return nil +} + // clusterManagerPodManager is used to manage the cluster manager pod type clusterManagerPodManager struct { log logr.Logger From 3db7d4c6364037ad27d545744ef323fb2e1feb46 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 14:10:19 -0700 Subject: [PATCH 45/85] updated changeAnnotation to work with no ref --- pkg/splunk/enterprise/clustermanager.go | 43 ++++++++++++++++---- pkg/splunk/enterprise/clustermanager_test.go | 6 +-- pkg/splunk/enterprise/monitoringconsole.go | 43 ++++++++++++++++---- pkg/splunk/enterprise/util.go | 8 ++-- 4 files changed, 77 insertions(+), 23 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 59f9c4ae1..53cb8e83c 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -23,6 +23,7 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" @@ -508,17 +509,43 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(c, cr) - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.ClusterManagerRef.Name, - } clusterManagerInstance := &enterpriseApi.ClusterManager{} - err := c.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if len(cr.Spec.ClusterManagerRef.Name) > 0 { + // if the LicenseManager holds the ClusterManagerRef + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + err := c.Get(ctx, namespacedName, clusterManagerInstance) + if err != nil { + return err + } + } else { + // List out all the ClusterManager instances in the namespace + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + objectList := enterpriseApi.ClusterManagerList{} + err := c.List(ctx, &objectList, opts...) + if err != nil { + return err + } + + // check with instance has the required LicenseManagerRef + for _, cm := range objectList.Items { + if cm.Spec.LicenseManagerRef.Name == cr.GetName() { + clusterManagerInstance = &cm + break + } + } + + if len(clusterManagerInstance.GetName()) == 0 { + return nil + } } + image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - err = changeAnnotations(ctx, c, image, clusterManagerInstance) + err := changeAnnotations(ctx, c, image, clusterManagerInstance) if err != nil { eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index fcec6a19a..63314c870 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -1489,9 +1489,6 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ - Name: "test-cm", - }, }, }, } @@ -1507,6 +1504,9 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { ImagePullPolicy: "Always", }, Volumes: []corev1.Volume{}, + LicenseManagerRef: corev1.ObjectReference{ + Name: "test-lm", + }, }, }, } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index d8197bf04..ffd82fc3c 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -34,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -363,17 +364,43 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co scopedLog := reqLogger.WithName("changeMonitoringConsoleAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) eventPublisher, _ := newK8EventPublisher(client, cr) - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: cr.Spec.MonitoringConsoleRef.Name, - } monitoringConsoleInstance := &enterpriseApi.MonitoringConsole{} - err := client.Get(ctx, namespacedName, monitoringConsoleInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if len(cr.Spec.MonitoringConsoleRef.Name) > 0 { + // if the ClusterManager holds the MonitoringConsoleRef + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.MonitoringConsoleRef.Name, + } + err := client.Get(ctx, namespacedName, monitoringConsoleInstance) + if err != nil { + return err + } + } else { + // List out all the MonitoringConsole instances in the namespace + opts := []rclient.ListOption{ + rclient.InNamespace(cr.GetNamespace()), + } + objectList := enterpriseApi.MonitoringConsoleList{} + err := client.List(ctx, &objectList, opts...) + if err != nil { + return err + } + + // check with instance has the required ClusterManagerRef + for _, mc := range objectList.Items { + if mc.Spec.ClusterManagerRef.Name == cr.GetName() { + monitoringConsoleInstance = &mc + break + } + } + + if len(monitoringConsoleInstance.GetName()) == 0 { + return nil + } } + image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) + err := changeAnnotations(ctx, client, image, monitoringConsoleInstance) if err != nil { eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index ff40a7af7..ceba58ace 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2285,12 +2285,12 @@ func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splco return "", err } - if statefulSet.Spec.Template.Spec.Containers == nil { - return "", nil + if len(statefulSet.Spec.Template.Spec.Containers) > 0 { + return statefulSet.Spec.Template.Spec.Containers[0].Image, nil } - image := statefulSet.Spec.Template.Spec.Containers[0].Image + err = fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) - return image, nil + return "", err } From 1c1531a905592faa53f7902eaebd1bba8d6fd9ee Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 14:41:47 -0700 Subject: [PATCH 46/85] Fixed unit tests --- pkg/splunk/enterprise/clustermanager.go | 4 ++-- pkg/splunk/enterprise/licensemanager_test.go | 15 +++++++++------ pkg/splunk/enterprise/monitoringconsole.go | 4 ++-- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 53cb8e83c..392c45f05 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -527,8 +527,8 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } objectList := enterpriseApi.ClusterManagerList{} err := c.List(ctx, &objectList, opts...) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } // check with instance has the required LicenseManagerRef diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 8c7d597c9..4199ae504 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -57,7 +57,6 @@ func TestApplyLicenseManager(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-stack1-license-manager-secret-v1"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-manager"}, - {MetaName: "*v4.ClusterManager-test-"}, {MetaName: "*v4.LicenseManager-test-stack1"}, {MetaName: "*v4.LicenseManager-test-stack1"}, } @@ -70,12 +69,16 @@ func TestApplyLicenseManager(t *testing.T) { client.InNamespace("test"), client.MatchingLabels(labels), } + listOpts1 := []client.ListOption{ + client.InNamespace("test"), + } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}} - - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12], funcCalls[13]} - updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} + {ListOpts: listOpts}, + {ListOpts: listOpts1}, + } + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0], listmockCall[1]}} + updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} + updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0], listmockCall[1]}} current := enterpriseApi.LicenseManager{ TypeMeta: metav1.TypeMeta{ Kind: "LicenseManager", diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index ffd82fc3c..7ac5f622e 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -382,8 +382,8 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } objectList := enterpriseApi.MonitoringConsoleList{} err := client.List(ctx, &objectList, opts...) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } // check with instance has the required ClusterManagerRef From f9c171f0873b8287c2c77eead77d1c66474c0b0b Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 14:52:43 -0700 Subject: [PATCH 47/85] Handled not found error --- pkg/splunk/enterprise/clustermanager.go | 4 ++-- pkg/splunk/enterprise/monitoringconsole.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 392c45f05..748d608a4 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -517,8 +517,8 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller Name: cr.Spec.ClusterManagerRef.Name, } err := c.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } } else { // List out all the ClusterManager instances in the namespace diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 7ac5f622e..7482afc56 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -372,8 +372,8 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co Name: cr.Spec.MonitoringConsoleRef.Name, } err := client.Get(ctx, namespacedName, monitoringConsoleInstance) - if err != nil { - return err + if err != nil && k8serrors.IsNotFound(err) { + return nil } } else { // List out all the MonitoringConsole instances in the namespace From e464fc48259358352b79918dc4ce33dd172ea32f Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Tue, 11 Jul 2023 16:44:24 -0700 Subject: [PATCH 48/85] Added info and peers endpoint --- .../services/cluster/manager/peers_types.go | 149 +++++++++++++++++ .../splunk/services/fixture/fixture.go | 89 ++++++++++ pkg/gateway/splunk/services/gateway.go | 9 + .../implementation/cluster_manager_impl.go | 68 ++++++++ .../splunk/implementation/splunk.go | 156 +++++++++--------- 5 files changed, 394 insertions(+), 77 deletions(-) create mode 100644 pkg/gateway/splunk/model/services/cluster/manager/peers_types.go diff --git a/pkg/gateway/splunk/model/services/cluster/manager/peers_types.go b/pkg/gateway/splunk/model/services/cluster/manager/peers_types.go new file mode 100644 index 000000000..3d5660805 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/peers_types.go @@ -0,0 +1,149 @@ +package manager + +import "time" + +// Description: Access cluster manager peers. +// Rest End Point API: services/cluster/manager/peers +type LastDryRunBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type LastValidatedBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + IsValidBundle bool `json:"is_valid_bundle,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type LatestBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type PreviousActiveBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` +} + +type ClusterManagerPeerContent struct { + ActiveBundleID string `json:"active_bundle_id"` + ApplyBundleStatus struct { + InvalidBundle struct { + BundleValidationErrors []interface{} `json:"bundle_validation_errors"` + InvalidBundleID string `json:"invalid_bundle_id"` + } `json:"invalid_bundle"` + ReasonsForRestart []interface{} `json:"reasons_for_restart"` + RestartRequiredForApplyBundle bool `json:"restart_required_for_apply_bundle"` + Status string `json:"status"` + } `json:"apply_bundle_status"` + BaseGenerationID int `json:"base_generation_id"` + BatchedReplicationCount int `json:"batched_replication_count"` + BucketCount int `json:"bucket_count"` + BucketCountByIndex struct { + Audit int `json:"_audit"` + Internal int `json:"_internal"` + Telemetry int `json:"_telemetry"` + } `json:"bucket_count_by_index"` + BucketsRfByOriginSite struct { + Default int `json:"default"` + Site1 int `json:"site1"` + Site2 int `json:"site2"` + } `json:"buckets_rf_by_origin_site"` + BucketsSfByOriginSite struct { + Default int `json:"default"` + Site1 int `json:"site1"` + Site2 int `json:"site2"` + } `json:"buckets_sf_by_origin_site"` + EaiAcl interface{} `json:"eai:acl"` + FixupSet []interface{} `json:"fixup_set"` + HeartbeatStarted bool `json:"heartbeat_started"` + HostPortPair string `json:"host_port_pair"` + IndexingDiskSpace int64 `json:"indexing_disk_space"` + IsSearchable bool `json:"is_searchable"` + IsValidBundle bool `json:"is_valid_bundle"` + Label string `json:"label"` + LastDryRunBundle string `json:"last_dry_run_bundle"` + LastHeartbeat int `json:"last_heartbeat"` + LastValidatedBundle string `json:"last_validated_bundle"` + LatestBundleID string `json:"latest_bundle_id"` + MergingMode bool `json:"merging_mode"` + PeerRegisteredSummaries bool `json:"peer_registered_summaries"` + PendingJobCount int `json:"pending_job_count"` + PrimaryCount int `json:"primary_count"` + PrimaryCountRemote int `json:"primary_count_remote"` + RegisterSearchAddress string `json:"register_search_address"` + ReplicationCount int `json:"replication_count"` + ReplicationPort int `json:"replication_port"` + ReplicationUseSsl bool `json:"replication_use_ssl"` + RestartRequiredForApplyingDryRunBundle bool `json:"restart_required_for_applying_dry_run_bundle"` + SearchStateCounter struct { + PendingSearchable int `json:"PendingSearchable"` + PendingUnsearchable int `json:"PendingUnsearchable"` + Searchable int `json:"Searchable"` + SearchablePendingMask int `json:"SearchablePendingMask"` + Unknown int `json:"Unknown"` + Unsearchable int `json:"Unsearchable"` + } `json:"search_state_counter"` + Site string `json:"site"` + SplunkVersion string `json:"splunk_version"` + Status string `json:"status"` + StatusCounter struct { + Complete int `json:"Complete"` + NonStreamingTarget int `json:"NonStreamingTarget"` + PendingDiscard int `json:"PendingDiscard"` + PendingTruncate int `json:"PendingTruncate"` + StreamingError int `json:"StreamingError"` + StreamingSource int `json:"StreamingSource"` + StreamingTarget int `json:"StreamingTarget"` + Unset int `json:"Unset"` + } `json:"status_counter"` + SummaryReplicationCount int `json:"summary_replication_count"` + TransientJobCount int `json:"transient_job_count"` +} + +type ClusterManagerPeerHeader struct { + Links struct { + Create string `json:"create"` + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + Edit string `json:"edit"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content ClusterManagerPeerContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index eb84836ee..cbb1987ab 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -53,6 +53,95 @@ func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredent return p, nil } +// GetClusterManagerInfo Access information about cluster manager node. +// get List cluster manager node details. +// endpoint: https://:/services/cluster/manager/info +func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managermodel.ClusterManagerInfoContent, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := ioutil.ReadFile("cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerInfoUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerInfoHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerInfoContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// GetClusterManagerPeersAccess cluster manager peers. +// endpoint: https://:/services/cluster/manager/peers +func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := ioutil.ReadFile("cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerPeersUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerPeerHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerPeerContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + // GetClusterManagerHealth Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. // Authentication and Authorization: // diff --git a/pkg/gateway/splunk/services/gateway.go b/pkg/gateway/splunk/services/gateway.go index e8c8aa999..6e15a549a 100644 --- a/pkg/gateway/splunk/services/gateway.go +++ b/pkg/gateway/splunk/services/gateway.go @@ -25,4 +25,13 @@ type Gateway interface { // Requires the admin role or list_indexer_cluster capability. // endpoint: https://:/services/cluster/manager/health GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) + + // Access information about cluster manager node. + // get List cluster manager node details. + // endpoint: https://:/services/cluster/manager/info + GetClusterManagerInfo(ctx context.Context) (*[]managermodel.ClusterManagerInfoContent, error) + + // Access cluster manager peers. + // endpoint: https://:/services/cluster/manager/peers + GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) } diff --git a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go index ecb797bef..5defd0869 100644 --- a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go +++ b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go @@ -27,6 +27,74 @@ type splunkGateway struct { credentials *splunkmodel.SplunkCredentials } +// Access information about cluster manager node. +// get List cluster manager node details. +// endpoint: https://:/services/cluster/manager/info +func (p *splunkGateway) GetClusterManagerInfo(context context.Context) (*[]managermodel.ClusterManagerInfoContent, error) { + url := clustermodel.GetClusterManagerInfoUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerInfoHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager info failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerInfoContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Access cluster manager peers. +// endpoint: https://:/services/cluster/manager/peers +func (p *splunkGateway) GetClusterManagerPeers(context context.Context) (*[]managermodel.ClusterManagerPeerContent, error) { + url := clustermodel.GetClusterManagerPeersUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerPeerHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerPeerContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + // Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. // Authentication and Authorization: // diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index a960d46a0..822d06a1b 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -3,13 +3,15 @@ package impl import ( "context" "fmt" + "strings" "github.com/go-logr/logr" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" - "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // splunkProvisioner implements the provisioner.Provisioner interface @@ -27,15 +29,15 @@ type splunkProvisioner struct { gateway gateway.Gateway } -// var callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { -// cminfo, err := p.gateway.GetClusterManagerInfo(ctx) -// if err != nil { -// return nil, err -// } else if cminfo == nil { -// return nil, fmt.Errorf("cluster manager info data is empty") -// } -// return cminfo, err -// } +var callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { + cminfo, err := p.gateway.GetClusterManagerInfo(ctx) + if err != nil { + return nil, err + } else if cminfo == nil { + return nil, fmt.Errorf("cluster manager info data is empty") + } + return cminfo, err +} var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { healthList, err := p.gateway.GetClusterManagerHealth(ctx) @@ -57,77 +59,77 @@ var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner // return sclist, err // } -// var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { -// peerlist, err := p.gateway.GetClusterManagerPeers(ctx) -// if err != nil { -// return nil, err -// } else if peerlist == nil { -// return nil, fmt.Errorf("peer list is empty") -// } -// return peerlist, err -// } +var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { + peerlist, err := p.gateway.GetClusterManagerPeers(ctx) + if err != nil { + return nil, err + } else if peerlist == nil { + return nil, fmt.Errorf("peer list is empty") + } + return peerlist, err +} -// var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { -// peerlist, err := p.gateway.GetClusterManagerPeers(ctx) -// if err != nil { -// return nil, err -// } else if peerlist == nil { -// return nil, fmt.Errorf("peer list is empty") -// } -// return peerlist, err -// } +var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { + peerlist, err := p.gateway.GetClusterManagerPeers(ctx) + if err != nil { + return nil, err + } else if peerlist == nil { + return nil, fmt.Errorf("peer list is empty") + } + return peerlist, err +} // SetClusterManagerStatus Access cluster node configuration details. func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splcommon.MetaObject) error { - // peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) - // if err != nil { - // return err - // } else { - // peerlist := *peerlistptr - // for _, peer := range peerlist { - // condition := metav1.Condition{ - // Type: "Peers", - // Message: fmt.Sprintf("%s with %s is %s ", peer.Site, peer.Label, peer.Status), - // Reason: peer.Site, - // } - // if peer.Status == "Up" { - // condition.Status = metav1.ConditionTrue - // } else { - // condition.Status = metav1.ConditionFalse - - // } - // // set condition to existing conditions list - // meta.SetStatusCondition(conditions, condition) - // } - // } - - // cminfolistptr, err := callGetClusterManagerInfo(ctx, p) - // if err != nil { - // return err - // } - // cminfolist := *cminfolistptr - // if cminfolist[0].Multisite { - // var site string - // multiSiteStatus := metav1.ConditionTrue - // message := "multisite is up" - // peerlist := *peerlistptr - // for _, peer := range peerlist { - // if !strings.Contains(peer.Status, "Up") { - // site = peer.Site - // multiSiteStatus = metav1.ConditionFalse - // message = fmt.Sprintf("site %s with label %s status is %s", peer.Site, peer.Label, peer.Status) - // break - // } // set condition to existing conditions list - // } - // condition := metav1.Condition{ - // Type: "Multisite", - // Message: message, - // Reason: site, - // Status: multiSiteStatus, - // } - // meta.SetStatusCondition(conditions, condition) - // } + peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) + if err != nil { + return err + } else { + peerlist := *peerlistptr + for _, peer := range peerlist { + condition := metav1.Condition{ + Type: "Peers", + Message: fmt.Sprintf("%s with %s is %s ", peer.Site, peer.Label, peer.Status), + Reason: peer.Site, + } + if peer.Status == "Up" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + + } + // set condition to existing conditions list + // meta.SetStatusCondition(conditions, condition) + } + } + + cminfolistptr, err := callGetClusterManagerInfo(ctx, p) + if err != nil { + return err + } + cminfolist := *cminfolistptr + if cminfolist[0].Multisite { + var site string + multiSiteStatus := metav1.ConditionTrue + message := "multisite is up" + peerlist := *peerlistptr + for _, peer := range peerlist { + if !strings.Contains(peer.Status, "Up") { + site = peer.Site + multiSiteStatus = metav1.ConditionFalse + message = fmt.Sprintf("site %s with label %s status is %s", peer.Site, peer.Label, peer.Status) + break + } // set condition to existing conditions list + } + condition := metav1.Condition{ + Type: "Multisite", + Message: message, + Reason: site, + Status: multiSiteStatus, + } + // meta.SetStatusCondition(conditions, condition) + } // business logic starts here //healthList, err := callGetClusterManagerHealth(ctx, p) @@ -141,7 +143,7 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splc if health.AllPeersAreUp == "1" { continue } else { - cr.Status.Phase = enterprise.PhaseWarning + cr.Status.Phase = enterpriseApi.PhaseWarning } // set condition to existing conditions list From 6c6b99527b0a1b8af8b535d01362d507d2c630cd Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 10:48:48 -0700 Subject: [PATCH 49/85] Added MC functions --- pkg/splunk/enterprise/clustermanager.go | 5 + pkg/splunk/enterprise/monitoringconsole.go | 64 +++++++ .../enterprise/monitoringconsole_test.go | 170 ++++++++++++++++++ 3 files changed, 239 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 748d608a4..962ba8d7b 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -232,6 +232,11 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + + err = changeMonitoringConsoleAnnotations(ctx, client, cr) + if err != nil { + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 7482afc56..344864b22 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -137,6 +137,12 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie return result, err } + // check if the Monitoring Console is ready for version upgrade, if required + continueReconcile, err := isMonitoringConsoleReadyForUpgrade(ctx, client, cr) + if err != nil || !continueReconcile { + return result, err + } + mgr := splctrl.DefaultStatefulSetPodManager{} phase, err := mgr.Update(ctx, client, statefulSet, 1) if err != nil { @@ -357,6 +363,64 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } +// isMonitoringConsoleReadyForUpgrade checks if MonitoringConsole can be upgraded if a version upgrade is in-progress +// No-operation otherwise; returns bool, err accordingly +func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.MonitoringConsole) (bool, error) { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("isMonitoringConsoleReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(c, cr) + + // check if a LicenseManager is attached to the instance + clusterManagerRef := cr.Spec.ClusterManagerRef + if clusterManagerRef.Name == "" { + return true, nil + } + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetName()), + } + + // check if the stateful set is created at this instance + statefulSet := &appsv1.StatefulSet{} + err := c.Get(ctx, namespacedName, statefulSet) + if err != nil && k8serrors.IsNotFound(err) { + return true, nil + } + + namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} + clusterManager := &enterpriseApi.ClusterManager{} + + // get the cluster manager referred in cluster manager + err = c.Get(ctx, namespacedName, clusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not find the Cluster Manager. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager") + return true, err + } + + cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get clusterManager current image") + return false, err + } + + mcImage, err := getCurrentImage(ctx, c, cr, SplunkMonitoringConsole) + if err != nil { + eventPublisher.Warning(ctx, "isMonitoringConsolerReadyForUpgrade", fmt.Sprintf("Could not get the Monitoring Console Image. Reason %v", err)) + scopedLog.Error(err, "Unable to get monitoring console current image") + return false, err + } + + // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade + if (cr.Spec.Image != mcImage) && (clusterManager.Status.Phase != enterpriseApi.PhaseReady || cmImage != cr.Spec.Image) { + return false, nil + } + + return true, nil +} + // changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index 72efd15a7..e72750ec1 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1100,3 +1100,173 @@ func TestGetMonitoringConsoleList(t *testing.T) { t.Errorf("Got wrong number of IndexerCluster objects. Expected=%d, Got=%d", 1, numOfObjects) } } + +func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // Create Cluster Manager + cm := enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err := client.Create(ctx, &cm) + _, err = ApplyClusterManager(ctx, client, &cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, &cm) + if err != nil { + t.Errorf("Unexpected status update %v", err) + debug.PrintStack() + } + + // Create Monitoring Console + mc := enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + Image: "splunk/splunk:latest", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + + err = client.Create(ctx, &mc) + _, err = ApplyMonitoringConsole(ctx, client, &mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + mc.Spec.Image = "splunk2" + cm.Spec.Image = "splunk2" + _, err = ApplyClusterManager(ctx, client, &cm) + + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("isMonitoringConsoleReadyForUpgrade should not have returned error=%v", err) + } + + check, err := isMonitoringConsoleReadyForUpgrade(ctx, client, monitoringConsole) + + if err != nil { + t.Errorf("Unexpected upgradeScenario error %v", err) + } + + if !check { + t.Errorf("isMonitoringConsoleReadyForUpgrade: MC should be ready for upgrade") + } +} + +func TestChangeMonitoringConsoleAnnotations(t *testing.T) { + ctx := context.TODO() + + builder := fake.NewClientBuilder() + client := builder.Build() + utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) + + // define CM and MC + cm := &enterpriseApi.ClusterManager{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterManagerSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + }, + }, + } + + mc := &enterpriseApi.MonitoringConsole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test", + }, + Spec: enterpriseApi.MonitoringConsoleSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: enterpriseApi.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterManagerRef: corev1.ObjectReference{ + Name: "test", + }, + }, + }, + } + cm.Spec.Image = "splunk/splunk:latest" + + // Create the instances + client.Create(ctx, cm) + _, err := ApplyClusterManager(ctx, client, cm) + if err != nil { + t.Errorf("applyClusterManager should not have returned error; err=%v", err) + } + cm.Status.Phase = enterpriseApi.PhaseReady + err = client.Status().Update(ctx, cm) + if err != nil { + t.Errorf("Unexpected update pod %v", err) + debug.PrintStack() + } + client.Create(ctx, mc) + _, err = ApplyMonitoringConsole(ctx, client, mc) + if err != nil { + t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) + } + + err = changeMonitoringConsoleAnnotations(ctx, client, cm) + if err != nil { + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) + } + monitoringConsole := &enterpriseApi.MonitoringConsole{} + namespacedName := types.NamespacedName{ + Name: cm.Name, + Namespace: cm.Namespace, + } + err = client.Get(ctx, namespacedName, monitoringConsole) + if err != nil { + t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) + } + + annotations := monitoringConsole.GetAnnotations() + if annotations["splunk/image-tag"] != cm.Spec.Image { + t.Errorf("changeMonitoringConsoleAnnotations should have set the checkUpdateImage annotation field to the current image") + } +} From e2e443383c03f4f300a7727d6b72532c37ca771e Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 11:10:41 -0700 Subject: [PATCH 50/85] Removed blank lines; handled errors in changeAnnotation --- pkg/splunk/enterprise/clustermanager.go | 26 ++++++++++++++------- pkg/splunk/enterprise/monitoringconsole.go | 27 ++++++++++++++-------- pkg/splunk/enterprise/util.go | 6 +---- 3 files changed, 37 insertions(+), 22 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 748d608a4..5620ddc2e 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -517,8 +517,11 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller Name: cr.Spec.ClusterManagerRef.Name, } err := c.Get(ctx, namespacedName, clusterManagerInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } } else { // List out all the ClusterManager instances in the namespace @@ -527,11 +530,14 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } objectList := enterpriseApi.ClusterManagerList{} err := c.List(ctx, &objectList, opts...) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } - // check with instance has the required LicenseManagerRef + // check if instance has the required LicenseManagerRef for _, cm := range objectList.Items { if cm.Spec.LicenseManagerRef.Name == cr.GetName() { clusterManagerInstance = &cm @@ -544,9 +550,13 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } } - image, _ := getCurrentImage(ctx, c, cr, SplunkLicenseManager) - err := changeAnnotations(ctx, c, image, clusterManagerInstance) - + image, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) + if err != nil { + eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not get the LicenseManager Image. Reason %v", err)) + scopedLog.Error(err, "Get LicenseManager Image failed with", "error", err) + return err + } + err = changeAnnotations(ctx, c, image, clusterManagerInstance) if err != nil { eventPublisher.Warning(ctx, "changeClusterManagerAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) scopedLog.Error(err, "ClusterManager types update after changing annotations failed with", "error", err) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 7482afc56..375832a4e 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -372,8 +372,11 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co Name: cr.Spec.MonitoringConsoleRef.Name, } err := client.Get(ctx, namespacedName, monitoringConsoleInstance) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } } else { // List out all the MonitoringConsole instances in the namespace @@ -382,11 +385,14 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } objectList := enterpriseApi.MonitoringConsoleList{} err := client.List(ctx, &objectList, opts...) - if err != nil && k8serrors.IsNotFound(err) { - return nil + if err != nil { + if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + return nil + } + return err } - // check with instance has the required ClusterManagerRef + // check if instance has the required ClusterManagerRef for _, mc := range objectList.Items { if mc.Spec.ClusterManagerRef.Name == cr.GetName() { monitoringConsoleInstance = &mc @@ -399,9 +405,13 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } } - image, _ := getCurrentImage(ctx, client, cr, SplunkClusterManager) - err := changeAnnotations(ctx, client, image, monitoringConsoleInstance) - + image, err := getCurrentImage(ctx, client, cr, SplunkClusterManager) + if err != nil { + eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not get the ClusterManager Image. Reason %v", err)) + scopedLog.Error(err, "Get ClusterManager Image failed with", "error", err) + return err + } + err = changeAnnotations(ctx, client, image, monitoringConsoleInstance) if err != nil { eventPublisher.Warning(ctx, "changeMonitoringConsoleAnnotations", fmt.Sprintf("Could not update annotations. Reason %v", err)) scopedLog.Error(err, "MonitoringConsole types update after changing annotations failed with", "error", err) @@ -409,5 +419,4 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } return nil - } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index ceba58ace..2876565a5 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2288,10 +2288,7 @@ func getCurrentImage(ctx context.Context, c splcommon.ControllerClient, cr splco if len(statefulSet.Spec.Template.Spec.Containers) > 0 { return statefulSet.Spec.Template.Spec.Containers[0].Image, nil } - err = fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) - - return "", err - + return "", fmt.Errorf("Unable to get image from statefulset of type %s.", instanceType.ToString()) } // changeAnnotations updates the splunk/image-tag field to trigger the reconcile loop, and returns error if something is wrong @@ -2316,5 +2313,4 @@ func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image } return nil - } From fe1d66f0a451c61120925d4398d4b349c72089d6 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 11:20:31 -0700 Subject: [PATCH 51/85] Only call changeAnnotation if LM is ready --- pkg/splunk/enterprise/licensemanager.go | 10 ++++++---- pkg/splunk/enterprise/licensemanager_test.go | 8 ++------ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 828a169d5..ad572de10 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -172,6 +172,12 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + + // trigger ClusterManager reconcile by changing the splunk/image-tag annotation + err = changeClusterManagerAnnotations(ctx, client, cr) + if err != nil { + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. @@ -179,10 +185,6 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, result.RequeueAfter = 0 } - err = changeClusterManagerAnnotations(ctx, client, cr) - if err != nil { - return result, err - } return result, nil } diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 4199ae504..2979fcd1b 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -69,16 +69,12 @@ func TestApplyLicenseManager(t *testing.T) { client.InNamespace("test"), client.MatchingLabels(labels), } - listOpts1 := []client.ListOption{ - client.InNamespace("test"), - } listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}, - {ListOpts: listOpts1}, } - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0], listmockCall[1]}} + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[6], funcCalls[8], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} updateFuncCalls := []spltest.MockFuncCall{funcCalls[0], funcCalls[1], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[9], funcCalls[11], funcCalls[12]} - updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0], listmockCall[1]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[4]}, "List": {listmockCall[0]}} current := enterpriseApi.LicenseManager{ TypeMeta: metav1.TypeMeta{ Kind: "LicenseManager", From 451588009e432909a2792d928e9edd1d054e5a91 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Wed, 12 Jul 2023 14:35:00 -0700 Subject: [PATCH 52/85] Removed redundant checks --- pkg/splunk/enterprise/clustermanager.go | 9 ++++++--- pkg/splunk/enterprise/monitoringconsole.go | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 5620ddc2e..167ef3f52 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -475,9 +475,12 @@ func isClusterManagerReadyForUpgrade(ctx context.Context, c splcommon.Controller // get the license manager referred in cluster manager err = c.Get(ctx, namespacedName, licenseManager) if err != nil { + if k8serrors.IsNotFound(err) { + return true, nil + } eventPublisher.Warning(ctx, "isClusterManagerReadyForUpgrade", fmt.Sprintf("Could not find the License Manager. Reason %v", err)) scopedLog.Error(err, "Unable to get licenseManager") - return true, err + return false, err } lmImage, err := getCurrentImage(ctx, c, cr, SplunkLicenseManager) @@ -518,7 +521,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } err := c.Get(ctx, namespacedName, clusterManagerInstance) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { return nil } return err @@ -531,7 +534,7 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller objectList := enterpriseApi.ClusterManagerList{} err := c.List(ctx, &objectList, opts...) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if err.Error() == "NotFound" { return nil } return err diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 375832a4e..9b9b1f534 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -373,7 +373,7 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } err := client.Get(ctx, namespacedName, monitoringConsoleInstance) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if k8serrors.IsNotFound(err) { return nil } return err @@ -386,7 +386,7 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co objectList := enterpriseApi.MonitoringConsoleList{} err := client.List(ctx, &objectList, opts...) if err != nil { - if err.Error() == "NotFound" || k8serrors.IsNotFound(err) { + if err.Error() == "NotFound" { return nil } return err From 8423f9a71aaa298a0785e99aa5e7c8901962478c Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 13:57:37 -0700 Subject: [PATCH 53/85] Added tests, conditions --- api/v4/clustermanager_types.go | 3 + api/v4/common_types.go | 3 - go.mod | 2 + go.sum | 4 + .../services/implementation/splunk_test.go | 124 ++++++++++++++++++ .../splunk/implementation/splunk.go | 31 ++--- .../splunk/implementation/splunk_test.go | 58 ++++++++ pkg/provisioner/splunk/provisioner.go | 4 +- pkg/splunk/enterprise/clustermanager.go | 2 +- 9 files changed, 206 insertions(+), 25 deletions(-) create mode 100644 pkg/gateway/splunk/services/implementation/splunk_test.go create mode 100644 pkg/provisioner/splunk/implementation/splunk_test.go diff --git a/api/v4/clustermanager_types.go b/api/v4/clustermanager_types.go index b9a8ceaca..dd9b5a53d 100644 --- a/api/v4/clustermanager_types.go +++ b/api/v4/clustermanager_types.go @@ -67,6 +67,9 @@ type ClusterManagerStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` } // BundlePushInfo Indicates if bundle push required diff --git a/api/v4/common_types.go b/api/v4/common_types.go index 0ca140367..968ecd8ed 100644 --- a/api/v4/common_types.go +++ b/api/v4/common_types.go @@ -139,9 +139,6 @@ const ( // PhaseError means an error occured with custom resource management PhaseError Phase = "Error" - - // PhaseWarning means an error occured with in the process of version upgrade - PhaseWarning Phase = "Warning" ) // Probe defines set of configurable values for Startup, Readiness, and Liveness probes diff --git a/go.mod b/go.mod index fb6be702c..0c13904e7 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-resty/resty/v2 v2.7.0 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -42,6 +43,7 @@ require ( github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.12 // indirect + github.com/jarcoal/httpmock v1.3.0 github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index fc44186f5..87ac9c3e5 100644 --- a/go.sum +++ b/go.sum @@ -104,6 +104,8 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= @@ -184,6 +186,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= +github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= diff --git a/pkg/gateway/splunk/services/implementation/splunk_test.go b/pkg/gateway/splunk/services/implementation/splunk_test.go new file mode 100644 index 000000000..008be0729 --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/splunk_test.go @@ -0,0 +1,124 @@ +package impl + +import ( + "context" + "fmt" + "time" + + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + + //managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + //peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" + "io/ioutil" + "testing" + + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var slog = logz.New().WithName("gateway").WithName("fixture") + +func setCreds(t *testing.T) *splunkGateway { + //ctx := context.TODO() + sad := &splunkmodel.SplunkCredentials{ + Address: "splunk-cm-cluster-master-service", + Port: 8089, + ServicesNamespace: "", + User: "admin", + App: "", + CredentialsName: "admin: abcdefghijklmnopqrstuvwxyz", + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + } + publisher := func(ctx context.Context, eventType, reason, message string) {} + // TODO fixme how to test the gateway call directly + //sm := NewGatewayFactory(ctx, &sad, publisher) + sm := &splunkGateway{ + credentials: sad, + client: resty.New(), + publisher: publisher, + log: slog, + debugLog: slog, + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s:%d", sad.Address, sad.Port) + sm.client.SetBaseURL(splunkURL) + sm.client.SetHeader("Content-Type", "application/json") + sm.client.SetHeader("Accept", "application/json") + sm.client.SetTimeout(time.Duration(60 * time.Minute)) + sm.client.SetDebug(true) + return sm +} + +func TestGetClusterManagerHealth(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := ioutil.ReadFile("../fixture/cluster_manager_health.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerHealthUrl + httpmock.RegisterResponder("GET", url, responder) + + _, err = sm.GetClusterManagerHealth(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } +} + +func TestGetClusterManagerInfo(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := ioutil.ReadFile("../fixture/cluster_manager_info.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager info %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerInfoUrl + httpmock.RegisterResponder("GET", url, responder) + + _, err = sm.GetClusterManagerInfo(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager info %v", err) + } +} + +func TestGetClusterManagerPeers(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := ioutil.ReadFile("../fixture/cluster_manager_peers.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager peers %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerPeersUrl + httpmock.RegisterResponder("GET", url, responder) + + peersptr, err := sm.GetClusterManagerPeers(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager searchheads %v", err) + } + if peersptr == nil { + t.Errorf("fixture: error in get cluster manager searchheads peers list is empty") + } +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index 822d06a1b..184babf20 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -6,11 +6,10 @@ import ( "strings" "github.com/go-logr/logr" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -49,16 +48,6 @@ var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner return healthList, err } -// var callGetClusterManagerSearchHeadStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.SearchHeadContent, error) { -// sclist, err := p.gateway.GetClusterManagerSearchHeadStatus(ctx) -// if err != nil { -// return nil, err -// } else if sclist == nil { -// return nil, fmt.Errorf("search head list is empty") -// } -// return sclist, err -// } - var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { peerlist, err := p.gateway.GetClusterManagerPeers(ctx) if err != nil { @@ -80,7 +69,7 @@ var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvis } // SetClusterManagerStatus Access cluster node configuration details. -func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splcommon.MetaObject) error { +func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error { peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) if err != nil { @@ -100,7 +89,7 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splc } // set condition to existing conditions list - // meta.SetStatusCondition(conditions, condition) + meta.SetStatusCondition(conditions, condition) } } @@ -128,7 +117,7 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splc Reason: site, Status: multiSiteStatus, } - // meta.SetStatusCondition(conditions, condition) + meta.SetStatusCondition(conditions, condition) } // business logic starts here @@ -140,14 +129,18 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, cr splc hllist := *healthList // prepare fields for conditions for _, health := range hllist { + condition := metav1.Condition{ + Type: "Health", + Message: "all the peers of indexer cluster status", + Reason: "PeersStatus", + } if health.AllPeersAreUp == "1" { - continue + condition.Status = metav1.ConditionTrue } else { - cr.Status.Phase = enterpriseApi.PhaseWarning + condition.Status = metav1.ConditionFalse } - // set condition to existing conditions list - + meta.SetStatusCondition(conditions, condition) } } diff --git a/pkg/provisioner/splunk/implementation/splunk_test.go b/pkg/provisioner/splunk/implementation/splunk_test.go new file mode 100644 index 000000000..397ba5d9d --- /dev/null +++ b/pkg/provisioner/splunk/implementation/splunk_test.go @@ -0,0 +1,58 @@ +package impl + +import ( + "context" + "testing" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + splunkgatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/implementation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//var log = logz.New().WithName("provisioner").WithName("fixture") + +func setCreds(t *testing.T) *splunkProvisioner { + ctx := context.TODO() + sad := &splunkmodel.SplunkCredentials{ + Address: "splunk-cm-cluster-master-service", + Port: 8089, + ServicesNamespace: "", + Namespace: "default", + User: "admin", + App: "", + CredentialsName: "admin: abcdefghijklmnopqrstuvwxyz", + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + } + publisher := func(ctx context.Context, eventType, reason, message string) {} + gatewayFactory := splunkgatewayimpl.NewGatewayFactory() + gateway, err := gatewayFactory.NewGateway(ctx, sad, publisher) + if err != nil { + return nil + } + // TODO fixme how to test the provisioner call directly + //sm := NewProvisionerFactory(ctx, &sad, publisher) + sm := &splunkProvisioner{ + credentials: sad, + publisher: publisher, + gateway: gateway, + } + return sm +} + +func TestSetClusterManagerStatus(t *testing.T) { + callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { + healthData := []managermodel.ClusterManagerHealthContent{} + return &healthData, nil + } + provisioner := setCreds(t) + ctx := context.TODO() + conditions := &[]metav1.Condition{} + err := provisioner.SetClusterManagerStatus(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } +} diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index 3835da614..ff7bcbda1 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -5,7 +5,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EventPublisher is a function type for publishing events associated @@ -22,5 +22,5 @@ type Factory interface { type Provisioner interface { // SetClusterManagerStatus set cluster manager status - SetClusterManagerStatus(ctx context.Context, cr splcommon.MetaObject) error + SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error } diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 26dcdece3..c5bcfc727 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -294,7 +294,7 @@ func SetClusterManagerStatus(ctx context.Context, client splcommon.ControllerCli if err != nil { return errors.Wrap(err, "failed to create gateway") } - err = prov.SetClusterManagerStatus(ctx, cr) + err = prov.SetClusterManagerStatus(ctx, &cr.Status.Conditions) if err != nil { return errors.Wrap(err, "failed to update cluster manager health status") } From b2d7bc18921ce9aff5669af28fa037439d6ed736 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 15:36:09 -0700 Subject: [PATCH 54/85] Return if CM list is empty --- pkg/splunk/enterprise/clustermanager.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 167ef3f52..e25c13ea7 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -540,6 +540,10 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller return err } + if len(objectList.Items) == 0 { + return nil + } + // check if instance has the required LicenseManagerRef for _, cm := range objectList.Items { if cm.Spec.LicenseManagerRef.Name == cr.GetName() { From 0d178a1e4719713d27ef827fdd54f8c0daaba5f4 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 15:49:51 -0700 Subject: [PATCH 55/85] removed superfluous nil err check --- pkg/splunk/enterprise/util.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 2876565a5..a42fe6e9b 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2308,9 +2308,5 @@ func changeAnnotations(ctx context.Context, c splcommon.ControllerClient, image cr.SetAnnotations(annotations) err := c.Update(ctx, cr) - if err != nil { - return err - } - - return nil + return err } From 77f9a749fa3989f55f17c6217354f8ca83bf1631 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 13 Jul 2023 20:47:26 -0700 Subject: [PATCH 56/85] Removed branch from workflow --- .github/workflows/helm-test-workflow.yml | 1 - .github/workflows/int-test-workflow.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index d2e9b7aff..e68dc44d7 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,7 +2,6 @@ name: Helm Test WorkFlow on: push: branches: - - CSPL-2094-LM-upgrade-strategy - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 25a85105a..3dd4eed22 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,7 +2,6 @@ name: Integration Test WorkFlow on: push: branches: - - CSPL-2094-LM-upgrade-strategy - develop - main - feature** From 130c778cae132d3d4c0c2eaa543890eb870f36b3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 14 Jul 2023 10:48:16 -0700 Subject: [PATCH 57/85] Added branch to workflow --- .github/workflows/helm-test-workflow.yml | 1 + .github/workflows/int-test-workflow.yml | 1 + pkg/splunk/enterprise/clustermanager.go | 2 +- pkg/splunk/enterprise/monitoringconsole.go | 3 +++ 4 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index e68dc44d7..0b7ed9275 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,6 +2,7 @@ name: Helm Test WorkFlow on: push: branches: + - cspl-2343 - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 3dd4eed22..d132bafdb 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,6 +2,7 @@ name: Integration Test WorkFlow on: push: branches: + - cspl-2343 - develop - main - feature** diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 5a9e11fb3..dba7955b7 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -233,6 +233,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult + // trigger MonitoringConsole reconcile by changing the splunk/image-tag annotation err = changeMonitoringConsoleAnnotations(ctx, client, cr) if err != nil { return result, err @@ -544,7 +545,6 @@ func changeClusterManagerAnnotations(ctx context.Context, c splcommon.Controller } return err } - if len(objectList.Items) == 0 { return nil } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 0d0d9beef..0c12c7ab9 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -455,6 +455,9 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } return err } + if len(objectList.Items) == 0 { + return nil + } // check if instance has the required ClusterManagerRef for _, mc := range objectList.Items { From 93e5ca0cde44a5dc8713711431d0956093d04d1a Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 14 Jul 2023 16:16:10 -0700 Subject: [PATCH 58/85] Fixed test cases --- .../services/fixture/cluster_config.json | 129 ++++ .../fixture/cluster_manager_health.json | 61 ++ .../fixture/cluster_manager_info.json | 104 +++ .../fixture/cluster_manager_peers.json | 649 ++++++++++++++++++ .../splunk/services/fixture/fixture.go | 7 +- .../splunk/implementation/splunk.go | 1 + .../splunk/implementation/splunk_test.go | 11 +- 7 files changed, 955 insertions(+), 7 deletions(-) create mode 100644 pkg/gateway/splunk/services/fixture/cluster_config.json create mode 100644 pkg/gateway/splunk/services/fixture/cluster_manager_health.json create mode 100644 pkg/gateway/splunk/services/fixture/cluster_manager_info.json create mode 100644 pkg/gateway/splunk/services/fixture/cluster_manager_peers.json diff --git a/pkg/gateway/splunk/services/fixture/cluster_config.json b/pkg/gateway/splunk/services/fixture/cluster_config.json new file mode 100644 index 000000000..16183e806 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_config.json @@ -0,0 +1,129 @@ +{ + "links":{ + "_reload":"/services/cluster/config/_reload", + "_acl":"/services/cluster/config/_acl" + }, + "origin":"https://localhost:8089/services/cluster/config", + "updated":"2022-07-18T23:50:26+00:00", + "generator":{ + "build":"6818ac46f2ec", + "version":"9.0.0" + }, + "entry":[ + { + "name":"config", + "id":"https://localhost:8089/services/cluster/config/config", + "updated":"1970-01-01T00:00:00+00:00", + "links":{ + "alternate":"/services/cluster/config/config", + "list":"/services/cluster/config/config", + "_reload":"/services/cluster/config/config/_reload", + "edit":"/services/cluster/config/config", + "disable":"/services/cluster/config/config/disable" + }, + "author":"system", + "acl":{ + "app":"", + "can_list":true, + "can_write":true, + "modifiable":false, + "owner":"system", + "perms":{ + "read":[ + "admin", + "splunk-system-role" + ], + "write":[ + "admin", + "splunk-system-role" + ] + }, + "removable":false, + "sharing":"system" + }, + "content":{ + "access_logging_for_heartbeats":true, + "ack_factor":0, + "allowed_hbmiss_count":"3", + "buckets_per_addpeer":1000, + "bucketsize_upload_preference":"largest", + "cluster_label":"idxc_label", + "cm_com_timeout":10, + "cm_heartbeat_period":1, + "cm_max_hbmiss_count":3, + "cxn_timeout":60, + "decommission_node_force_timeout":300, + "decommission_search_jobs_wait_secs":180, + "disabled":false, + "eai:acl":null, + "executor_workers":10, + "forwarderdata_rcv_port":9997, + "forwarderdata_use_ssl":false, + "frozen_notifications_per_batch":10, + "guid":"7D3E85AB-B17A-47A6-B5E9-405FB889AD25", + "heartbeat_period":1, + "heartbeat_timeout":60, + "manager_switchover_idx_ping":true, + "manager_switchover_mode":"disabled", + "manager_switchover_quiet_period":60, + "manager_uri":"https://splunk-cm-cluster-master-service:8089", + "manual_detention":"off", + "master_uri":"https://splunk-cm-cluster-master-service:8089", + "max_auto_service_interval":1, + "max_delayed_updates_time_ms ":1000000, + "max_fixup_time_ms":0, + "max_peer_build_load":5, + "max_peer_rep_load":5, + "max_peer_sum_rep_load":5, + "max_peers_to_download_bundle":0, + "max_primary_backups_per_service":10, + "max_replication_errors":3, + "mode":"slave", + "notify_buckets_period":10, + "notify_scan_min_period":10, + "notify_scan_period":10, + "percent_peers_to_reload":100, + "percent_peers_to_restart":10, + "ping_flag":true, + "precompress_cluster_bundle":true, + "quiet_period":60, + "rcv_timeout":60, + "register_forwarder_address":"", + "register_replication_address":"", + "register_search_address":"", + "remote_storage_upload_timeout":60, + "rep_cxn_timeout":60, + "rep_max_rcv_timeout":180, + "rep_max_send_timeout":180, + "rep_rcv_timeout":60, + "rep_send_timeout":60, + "replication_factor":3, + "replication_port":9887, + "replication_use_ssl":false, + "report_remote_storage_bucket_upload_to_targets":"false", + "reporting_delay_period":10, + "restart_timeout":60, + "search_factor":2, + "search_files_retry_timeout":600, + "searchable_rolling_peer_state_delay_interval":60, + "secret":"********", + "send_timeout":60, + "service_execution_threshold_ms":1500, + "service_interval":1, + "site":"site1", + "streaming_replication_wait_secs":60, + "summary_update_batch_size":10, + "upload_rectifier_timeout_secs":2, + "warm_bucket_replication_pre_upload":"false" + } + } + ], + "paging":{ + "total":1, + "perPage":30, + "offset":0 + }, + "messages":[ + + ] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_health.json b/pkg/gateway/splunk/services/fixture/cluster_manager_health.json new file mode 100644 index 000000000..fe7478216 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_health.json @@ -0,0 +1,61 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/health", + "updated": "2022-07-18T23:54:03+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/health/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/health/master", + "list": "/services/cluster/manager/health/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "all_data_is_searchable": "1", + "all_peers_are_up": "1", + "cm_version_is_compatible": "1", + "eai:acl": null, + "multisite": "1", + "no_fixup_tasks_in_progress": "1", + "pre_flight_check": "1", + "ready_for_searchable_rolling_restart": "1", + "replication_factor_met": "1", + "search_factor_met": "1", + "site_replication_factor_met": "1", + "site_search_factor_met": "1", + "splunk_version_peer_count": "{ 9.0.0: 6 }" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_info.json b/pkg/gateway/splunk/services/fixture/cluster_manager_info.json new file mode 100644 index 000000000..af9c7199b --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_info.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_peers.json b/pkg/gateway/splunk/services/fixture/cluster_manager_peers.json new file mode 100644 index 000000000..a40ae7605 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_peers.json @@ -0,0 +1,649 @@ +{ + "links": { + "create": "/services/cluster/manager/peers/_new" + }, + "origin": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers", + "updated": "2022-07-21T07:55:59+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "list": "/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA", + "edit": "/services/cluster/manager/peers/27165CF0-FFDA-403C-B2FD-F258EA1794DA" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 13, + "batched_replication_count": 0, + "bucket_count": 49, + "bucket_count_by_index": { + "_audit": 15, + "_internal": 30, + "_telemetry": 4 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 29, + "site2": 17 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 28, + "site2": 17 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.27.74:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site2-indexer-2", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 17, + "primary_count_remote": 194, + "register_search_address": "192.168.27.74:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 48, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 1 + }, + "site": "site2", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 36, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 4, + "StreamingTarget": 9, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "4E2D2D32-9317-4E00-A531-52622CF1F22D", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D", + "list": "/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D", + "edit": "/services/cluster/manager/peers/4E2D2D32-9317-4E00-A531-52622CF1F22D" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 11, + "batched_replication_count": 0, + "bucket_count": 43, + "bucket_count_by_index": { + "_audit": 14, + "_internal": 26, + "_telemetry": 3 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 21, + "site2": 19 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 21, + "site2": 19 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.11.34:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site1-indexer-1", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 37, + "primary_count_remote": 195, + "register_search_address": "192.168.11.34:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 43, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 0 + }, + "site": "site1", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 30, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 6, + "StreamingTarget": 7, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "59E6334A-BA79-43F0-B360-41C079AC75C1", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1", + "list": "/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1", + "edit": "/services/cluster/manager/peers/59E6334A-BA79-43F0-B360-41C079AC75C1" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 14, + "batched_replication_count": 0, + "bucket_count": 27, + "bucket_count_by_index": { + "_audit": 9, + "_internal": 17, + "_telemetry": 1 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 10, + "site2": 14 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 9, + "site2": 14 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.91.15:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site2-indexer-1", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 12, + "primary_count_remote": 194, + "register_search_address": "192.168.91.15:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 26, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 1 + }, + "site": "site2", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 22, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 4, + "StreamingTarget": 1, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "list": "/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C", + "edit": "/services/cluster/manager/peers/7306C50E-C8FD-45EF-A360-E0A03518BE2C" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 12, + "batched_replication_count": 0, + "bucket_count": 46, + "bucket_count_by_index": { + "_audit": 12, + "_internal": 32, + "_telemetry": 2 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 21, + "site2": 22 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 19, + "site2": 19 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.69.142:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site1-indexer-0", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 37, + "primary_count_remote": 194, + "register_search_address": "192.168.69.142:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 41, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 5 + }, + "site": "site1", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 37, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 5, + "StreamingTarget": 4, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "list": "/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC", + "edit": "/services/cluster/manager/peers/CA22DE5F-72B1-4324-8844-4BA4765E9CBC" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 15, + "batched_replication_count": 0, + "bucket_count": 33, + "bucket_count_by_index": { + "_audit": 11, + "_internal": 20, + "_telemetry": 2 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 14, + "site2": 16 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 12, + "site2": 15 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.61.159:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site2-indexer-0", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 13, + "primary_count_remote": 193, + "register_search_address": "192.168.61.159:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 30, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 3 + }, + "site": "site2", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 23, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 4, + "StreamingTarget": 6, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + }, + { + "name": "FCE8B198-DD45-4D79-A2B8-8663836713FC", + "id": "https://splunk-cm-cluster-master-service.default:8089/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC", + "list": "/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC", + "edit": "/services/cluster/manager/peers/FCE8B198-DD45-4D79-A2B8-8663836713FC" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle_id": "7351975980A20311463444E66492BDD5", + "apply_bundle_status": { + "invalid_bundle": { + "bundle_validation_errors": [], + "invalid_bundle_id": "" + }, + "reasons_for_restart": [], + "restart_required_for_apply_bundle": false, + "status": "None" + }, + "base_generation_id": 25, + "batched_replication_count": 0, + "bucket_count": 30, + "bucket_count_by_index": { + "_audit": 10, + "_internal": 18, + "_telemetry": 2 + }, + "buckets_rf_by_origin_site": { + "default": 3, + "site1": 19, + "site2": 8 + }, + "buckets_sf_by_origin_site": { + "default": 3, + "site1": 18, + "site2": 5 + }, + "eai:acl": null, + "fixup_set": [], + "heartbeat_started": true, + "host_port_pair": "192.168.53.162:8089", + "indexing_disk_space": 15719145472, + "is_searchable": true, + "is_valid_bundle": true, + "label": "splunk-example-site1-indexer-2", + "last_dry_run_bundle": "", + "last_heartbeat": 1658390158, + "last_validated_bundle": "7351975980A20311463444E66492BDD5", + "latest_bundle_id": "7351975980A20311463444E66492BDD5", + "merging_mode": false, + "peer_registered_summaries": true, + "pending_job_count": 0, + "primary_count": 23, + "primary_count_remote": 194, + "register_search_address": "192.168.53.162:8089", + "replication_count": 0, + "replication_port": 9887, + "replication_use_ssl": false, + "restart_required_for_applying_dry_run_bundle": false, + "search_state_counter": { + "PendingSearchable": 0, + "PendingUnsearchable": 0, + "Searchable": 26, + "SearchablePendingMask": 0, + "Unknown": 0, + "Unsearchable": 4 + }, + "site": "site1", + "splunk_version": "9.0.0", + "status": "Up", + "status_counter": { + "Complete": 24, + "NonStreamingTarget": 0, + "PendingDiscard": 0, + "PendingTruncate": 0, + "StreamingError": 0, + "StreamingSource": 5, + "StreamingTarget": 1, + "Unset": 0 + }, + "summary_replication_count": 0, + "transient_job_count": 0 + } + } + ], + "paging": { + "total": 6, + "perPage": 30, + "offset": 0 + }, + "messages": [] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index cbb1987ab..b8225be2c 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -2,6 +2,7 @@ package fixture import ( "context" + //"encoding/json" "io/ioutil" "net/http" @@ -59,7 +60,7 @@ func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredent func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managermodel.ClusterManagerInfoContent, error) { // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("cluster_config.json") + content, err := ioutil.ReadFile("../../../gateway/splunk/services/fixture/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -103,7 +104,7 @@ func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managerm func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) { // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("cluster_config.json") + content, err := ioutil.ReadFile("../../../gateway/splunk/services/fixture/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -151,7 +152,7 @@ func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]manager func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) { // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("cluster_config.json") + content, err := ioutil.ReadFile("../../../gateway/splunk/services/fixture/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index 184babf20..f1c2dd76b 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -51,6 +51,7 @@ var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { peerlist, err := p.gateway.GetClusterManagerPeers(ctx) if err != nil { + fmt.Println("Hi3") return nil, err } else if peerlist == nil { return nil, fmt.Errorf("peer list is empty") diff --git a/pkg/provisioner/splunk/implementation/splunk_test.go b/pkg/provisioner/splunk/implementation/splunk_test.go index 397ba5d9d..7fad69395 100644 --- a/pkg/provisioner/splunk/implementation/splunk_test.go +++ b/pkg/provisioner/splunk/implementation/splunk_test.go @@ -6,7 +6,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" - splunkgatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/implementation" + fixturegatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/fixture" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -28,11 +28,12 @@ func setCreds(t *testing.T) *splunkProvisioner { DisableCertificateVerification: true, } publisher := func(ctx context.Context, eventType, reason, message string) {} - gatewayFactory := splunkgatewayimpl.NewGatewayFactory() - gateway, err := gatewayFactory.NewGateway(ctx, sad, publisher) + fixtureFactory := fixturegatewayimpl.Fixture{} + gateway, err := fixtureFactory.NewGateway(ctx, sad, publisher) if err != nil { return nil } + // TODO fixme how to test the provisioner call directly //sm := NewProvisionerFactory(ctx, &sad, publisher) sm := &splunkProvisioner{ @@ -49,8 +50,10 @@ func TestSetClusterManagerStatus(t *testing.T) { return &healthData, nil } provisioner := setCreds(t) - ctx := context.TODO() conditions := &[]metav1.Condition{} + + ctx := context.TODO() + err := provisioner.SetClusterManagerStatus(ctx, conditions) if err != nil { t.Errorf("fixture: error in set cluster manager %v", err) From 6b33a49786b588588bda215531bd2e689514655e Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Fri, 14 Jul 2023 16:49:47 -0700 Subject: [PATCH 59/85] Added provisioner to test cases, added branch to workflow --- .github/workflows/helm-test-workflow.yml | 1 + .github/workflows/int-test-workflow.yml | 1 + controllers/clustermanager_controller_test.go | 9 +- pkg/splunk/enterprise/clustermanager_test.go | 43 ++-- pkg/splunk/enterprise/configuration_test.go | 3 +- pkg/splunk/enterprise/indexercluster_test.go | 5 +- pkg/splunk/enterprise/licensemanager_test.go | 5 +- pkg/splunk/enterprise/licensemaster_test.go | 5 +- .../enterprise/monitoringconsole_test.go | 219 +----------------- pkg/splunk/enterprise/util_test.go | 3 +- 10 files changed, 45 insertions(+), 249 deletions(-) diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index e68dc44d7..869aeca0e 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -2,6 +2,7 @@ name: Helm Test WorkFlow on: push: branches: + - verify - develop - main jobs: diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 3dd4eed22..f136598d6 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -2,6 +2,7 @@ name: Integration Test WorkFlow on: push: branches: + - verify - develop - main - feature** diff --git a/controllers/clustermanager_controller_test.go b/controllers/clustermanager_controller_test.go index d24c181c7..a8c356e4e 100644 --- a/controllers/clustermanager_controller_test.go +++ b/controllers/clustermanager_controller_test.go @@ -5,6 +5,7 @@ import ( "fmt" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" "time" @@ -35,7 +36,7 @@ var _ = Describe("ClusterManager Controller", func() { It("Get ClusterManager custom resource should failed", func() { namespace := "ns-splunk-cm-1" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -51,7 +52,7 @@ var _ = Describe("ClusterManager Controller", func() { It("Create ClusterManager custom resource with annotations should pause", func() { namespace := "ns-splunk-cm-2" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -71,7 +72,7 @@ var _ = Describe("ClusterManager Controller", func() { Context("ClusterManager Management", func() { It("Create ClusterManager custom resource should succeeded", func() { namespace := "ns-splunk-cm-3" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -84,7 +85,7 @@ var _ = Describe("ClusterManager Controller", func() { It("Cover Unused methods", func() { namespace := "ns-splunk-cm-4" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 63314c870..05831e9c2 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -38,6 +38,7 @@ import ( runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -117,7 +118,7 @@ func TestApplyClusterManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), splunkimpl.NewProvisionerFactory(false)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -127,7 +128,7 @@ func TestApplyClusterManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), splunkimpl.NewProvisionerFactory(false)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -138,7 +139,7 @@ func TestApplyClusterManager(t *testing.T) { } c := spltest.NewMockClient() _ = errors.New(splcommon.Rerr) - _, err := ApplyClusterManager(ctx, c, ¤t) + _, err := ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("Expected error") } @@ -204,7 +205,7 @@ func TestApplyClusterManager(t *testing.T) { }, } - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("Expected error") } @@ -220,7 +221,7 @@ func TestApplyClusterManager(t *testing.T) { current.Spec.SmartStore.VolList[0].SecretRef = "s3-secret" current.Status.SmartStore.VolList[0].SecretRef = "s3-secret" current.Status.ResourceRevMap["s3-secret"] = "v2" - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("Expected error") } @@ -234,7 +235,7 @@ func TestApplyClusterManager(t *testing.T) { c.Create(ctx, &cmap) current.Spec.SmartStore.VolList[0].SecretRef = "" current.Spec.SmartStore.Defaults.IndexAndGlobalCommonSpec.VolName = "msos_s2s3_vol" - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Don't expected error here") } @@ -290,7 +291,7 @@ func TestApplyClusterManager(t *testing.T) { }, }, } - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("Expected error") } @@ -307,7 +308,7 @@ func TestApplyClusterManager(t *testing.T) { } rerr := errors.New(splcommon.Rerr) c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err = ApplyClusterManager(ctx, c, ¤t) + _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("Expected error") } @@ -578,7 +579,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { } // Without S3 keys, ApplyClusterManager should fail - _, err := ApplyClusterManager(ctx, client, ¤t) + _, err := ApplyClusterManager(ctx, client, ¤t, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("ApplyClusterManager should fail without S3 secrets configured") } @@ -607,7 +608,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager)) + _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager), splunkimpl.NewProvisionerFactory(false)) return err } @@ -634,12 +635,12 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManagerWithSmartstore-0", ¤t, revised, createCalls, updateCalls, reconcile, true, secret, &smartstoreConfigMap, ss, pod) current.Status.BundlePushTracker.NeedToPushManagerApps = true - if _, err = ApplyClusterManager(context.Background(), client, ¤t); err != nil { + if _, err = ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err != nil { t.Errorf("ApplyClusterManager() should not have returned error") } current.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd" - if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil { + if _, err := ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err == nil { t.Errorf("ApplyClusterManager() should have returned error") } @@ -649,7 +650,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { ss.Spec.Replicas = &replicas ss.Spec.Template.Spec.Containers[0].Image = "splunk/splunk" client.AddObject(ss) - if result, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil && !result.Requeue { + if result, err := ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err == nil && !result.Requeue { t.Errorf("ApplyClusterManager() should have returned error or result.requeue should have been false") } @@ -659,7 +660,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { client.AddObjects(objects) current.Spec.CommonSplunkSpec.Mock = false - if _, err := ApplyClusterManager(context.Background(), client, ¤t); err == nil { + if _, err := ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err == nil { t.Errorf("ApplyClusterManager() should have returned error") } } @@ -861,7 +862,7 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) { t.Errorf(err.Error()) } - _, err = ApplyClusterManager(context.Background(), client, &cm) + _, err = ApplyClusterManager(context.Background(), client, &cm, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") } @@ -956,7 +957,7 @@ func TestApplyCLusterManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyClusterManager(ctx, c, &cm) + _, err = ApplyClusterManager(ctx, c, &cm, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") } @@ -1444,7 +1445,7 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } err = client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm) + _, err = ApplyClusterManager(ctx, client, &cm, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1529,7 +1530,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { debug.PrintStack() } client.Create(ctx, cm) - _, err = ApplyClusterManager(ctx, client, cm) + _, err = ApplyClusterManager(ctx, client, cm, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1670,7 +1671,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, clustermanager) - _, err := ApplyClusterManager(ctx, c, clustermanager) + _, err := ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for clustermanager with app framework %v", err) debug.PrintStack() @@ -1706,7 +1707,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1824,7 +1825,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index f762c19db..5c7426c60 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -26,6 +26,7 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -232,7 +233,7 @@ func TestSmartstoreApplyClusterManagerFailsOnInvalidSmartStoreConfig(t *testing. var client splcommon.ControllerClient - _, err := ApplyClusterManager(context.Background(), client, &cr) + _, err := ApplyClusterManager(context.Background(), client, &cr, splunkimpl.NewProvisionerFactory(false)) if err == nil { t.Errorf("ApplyClusterManager should fail on invalid smartstore config") } diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index cbeb0a1e6..be984e202 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -42,6 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/go-logr/logr" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -1602,7 +1603,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1681,7 +1682,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 2979fcd1b..4e07f158d 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -914,7 +915,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -988,7 +989,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go index 0fcab3a5c..b2ddb3da6 100644 --- a/pkg/splunk/enterprise/licensemaster_test.go +++ b/pkg/splunk/enterprise/licensemaster_test.go @@ -37,6 +37,7 @@ import ( "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -924,7 +925,7 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1003,7 +1004,7 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager) + _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index 73439e345..9a64d63cf 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -1103,220 +1104,6 @@ func TestGetMonitoringConsoleList(t *testing.T) { } } -func TestUpgradeScenarioMonitoringConsole(t *testing.T) { - - ctx := context.TODO() - - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - // Create License Manager - cm := enterpriseApi.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - }, - }, - } - - err := client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm) - if err != nil { - t.Errorf("applyClusterManager should not have returned error; err=%v", err) - } - cm.Status.Phase = enterpriseApi.PhaseReady - err = client.Status().Update(ctx, &cm) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - // get StatefulSet labels - - namespacedName := types.NamespacedName{ - Namespace: cm.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkClusterManager, cm.GetName()), - } - cmstatefulSet := &appsv1.StatefulSet{} - err = client.Get(ctx, namespacedName, cmstatefulSet) - if err != nil { - t.Errorf("Unexpected get statefulset %v", err) - } - labels := cmstatefulSet.Spec.Template.ObjectMeta.Labels - - // create LM pod - cmstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-cluster-manager-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - cmstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, cmstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update pod - cmstpod.Status.Phase = corev1.PodRunning - cmstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, cmstpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - // Create Cluster Manager - mc := enterpriseApi.MonitoringConsole{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.MonitoringConsoleSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - LicenseManagerRef: corev1.ObjectReference{ - Name: "test", - }, - }, - }, - } - replicas := int32(1) - labels = map[string]string{ - "app": "test", - "tier": "splunk", - } - mcstatefulset := &appsv1.StatefulSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-monitoring-console", - Namespace: "test", - }, - Spec: appsv1.StatefulSetSpec{ - ServiceName: "splunk-test-monitoring-console-headless", - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - }, - Replicas: &replicas, - }, - } - mcstatefulset.Spec.Selector = &metav1.LabelSelector{ - MatchLabels: labels, - } - - err = client.Create(ctx, &mc) - err = client.Create(ctx, mcstatefulset) - _, err = ApplyMonitoringConsole(ctx, client, &mc) - if err != nil { - t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) - } - - // Create CM pod - mcstpod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "splunk-test-monitoring-console-0", - Namespace: "test", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "splunk", - Image: "splunk/splunk:latest", - Env: []corev1.EnvVar{ - { - Name: "test", - Value: "test", - }, - }, - }, - }, - }, - } - mcstpod.ObjectMeta.Labels = labels - // simulate create pod - err = client.Create(ctx, mcstpod) - if err != nil { - t.Errorf("Unexpected create pod failed %v", err) - debug.PrintStack() - } - - // update CM pod - mcstpod.Status.Phase = corev1.PodRunning - mcstpod.Status.ContainerStatuses = []corev1.ContainerStatus{ - { - Image: "splunk/splunk:latest", - Name: "splunk", - Ready: true, - }, - } - err = client.Status().Update(ctx, mcstpod) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - - mc.Spec.Image = "splunk2" - cmstpod.Status.ContainerStatuses[0].Image = "splunk2" - err = client.Status().Update(ctx, cmstpod) - check, err := upgradeScenarioMonitoringConsole(ctx, client, &mc) - - if err != nil { - t.Errorf("Unexpected upgradeScenario error %v", err) - } - - if !check { - t.Errorf("upgradeScenario: MC should be ready for upgrade") - } - -} - func TestGetMonitoringConsoleCurrentImage(t *testing.T) { ctx := context.TODO() @@ -1399,7 +1186,7 @@ func TestGetMonitoringConsoleCurrentImage(t *testing.T) { debug.PrintStack() } - image, err := getMonitoringConsoleCurrentImage(ctx, client, ¤t) + image, err := getCurrentImage(ctx, client, ¤t, SplunkMonitoringConsole) if err != nil { t.Errorf("Unexpected geMonitoringConsoleCurrentImage error %v", err) @@ -1491,7 +1278,7 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { // Create the instances client.Create(ctx, cm) client.Create(ctx, cmstatefulset) - _, err := ApplyClusterManager(ctx, client, cm) + _, err := ApplyClusterManager(ctx, client, cm, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 64587db8a..302ce15fe 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -39,6 +39,7 @@ import ( enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -3172,7 +3173,7 @@ func TestGetCurrentImage(t *testing.T) { utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) err := client.Create(ctx, ¤t) - _, err = ApplyClusterManager(ctx, client, ¤t) + _, err = ApplyClusterManager(ctx, client, ¤t, splunkimpl.NewProvisionerFactory(false)) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } From 077f13080dec46b6fefabc2b5f78569a3f71011a Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 09:06:49 -0700 Subject: [PATCH 60/85] Fixed comment --- pkg/splunk/enterprise/monitoringconsole.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 0c12c7ab9..5ba8ff25b 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -391,7 +391,7 @@ func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.Control namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} clusterManager := &enterpriseApi.ClusterManager{} - // get the cluster manager referred in cluster manager + // get the cluster manager referred in monitoring console err = c.Get(ctx, namespacedName, clusterManager) if err != nil { eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not find the Cluster Manager. Reason %v", err)) From 52914ddcc72fac2833d2eb3df43718e6441d31b3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 09:20:44 -0700 Subject: [PATCH 61/85] Fixed unit test --- pkg/splunk/enterprise/clustermanager_test.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 63314c870..01be64670 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -531,9 +531,14 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { runtime.InNamespace("test"), runtime.MatchingLabels(labels), } + listOpts1 := []runtime.ListOption{ + runtime.InNamespace("test"), + } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} + {ListOpts: listOpts}, + {ListOpts: listOpts1}, + } + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0], listmockCall[1]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[8]}, "List": {listmockCall[0]}} current := enterpriseApi.ClusterManager{ From 2091b8b98de3de9c27c63836909c50be1d5469d3 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 15:12:24 -0700 Subject: [PATCH 62/85] Added sites, status and server endpoints --- pkg/gateway/splunk/introspection/gateway.go | 137 +++++ .../model/services/cluster/config_types.go | 146 ++++++ .../cluster/manager/searchhead_types.go | 55 ++ .../services/cluster/manager/sites_types.go | 105 ++++ .../services/cluster/manager/status_types.go | 99 ++++ .../model/services/cluster/url_types.go | 2 + .../model/services/common/attributes_types.go | 61 +++ .../server/health/deployment_types.go | 57 ++ .../services/server/health/details_types.go | 488 ++++++++++++++++++ .../model/services/server/health/url_types.go | 7 + .../splunk/model/services/server/url_types.go | 7 + .../fixture/cluster_manager_searchhead.json | 55 ++ .../fixture/cluster_manager_sites.json | 109 ++++ .../splunk/services/fixture/fixture.go | 89 ++++ pkg/gateway/splunk/services/gateway.go | 10 + .../implementation/cluster_manager_impl.go | 70 +++ .../services/implementation/server_impl.go | 158 ++++++ .../splunk/implementation/splunk.go | 10 + 18 files changed, 1665 insertions(+) create mode 100644 pkg/gateway/splunk/introspection/gateway.go create mode 100644 pkg/gateway/splunk/model/services/cluster/config_types.go create mode 100644 pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go create mode 100644 pkg/gateway/splunk/model/services/cluster/manager/sites_types.go create mode 100644 pkg/gateway/splunk/model/services/cluster/manager/status_types.go create mode 100644 pkg/gateway/splunk/model/services/common/attributes_types.go create mode 100644 pkg/gateway/splunk/model/services/server/health/deployment_types.go create mode 100644 pkg/gateway/splunk/model/services/server/health/details_types.go create mode 100644 pkg/gateway/splunk/model/services/server/health/url_types.go create mode 100644 pkg/gateway/splunk/model/services/server/url_types.go create mode 100644 pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json create mode 100644 pkg/gateway/splunk/services/fixture/cluster_manager_sites.json create mode 100644 pkg/gateway/splunk/services/implementation/server_impl.go diff --git a/pkg/gateway/splunk/introspection/gateway.go b/pkg/gateway/splunk/introspection/gateway.go new file mode 100644 index 000000000..37d723d56 --- /dev/null +++ b/pkg/gateway/splunk/introspection/gateway.go @@ -0,0 +1,137 @@ +package introspection + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" +) + +// EventPublisher is a function type for publishing events associated +// with gateway functions. +type EventPublisher func(reason, message string) + +// Factory is the interface for creating new Gateway objects. +type Factory interface { + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher EventPublisher) (Gateway, error) +} + +// Gateway holds the state information for talking to +// splunk gateway backend. +type Gateway interface { + + // Heading: Introspect API list + + // Get information about the volume (logical drives) in use by the Splunk deployment. + // endpoint: /services/data/index-volumes + GetIndexVolumes() error + + // List the recognized indexes on the server. + // endpoint: /services/data/indexes + GetIndexes() error + + // List bucket attributes for all indexes. + // endpoint: /services/data/indexes-extended + GetIndexAllBucketInformation() error + + // Get disk usage information about all summaries in an indexer. + // endpoint: /services/data/summaries + GetDataSummaries() error + + // Shows the overall health of a distributed deployment. The health of the deployment can be red, yellow, or green. The overall health of the deployment is based on the health of all features reporting to it. + // Authentication and Authorization: + // Requires the admin role or list_health capability. + // endpoint: /services/server/health/deployment + GetServerDeploymentHealth() error + + // Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. + // Authentication and Authorization: + // Requires the admin role or list_health capability. + // Get health status of distributed deployment features. + // endpoint: https://:/services/server/health/deployment/details + GetServerDeploymentHealthDetails() error + + // Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. + // /services/server/health/splunkd + GetSplunkdHealth() error + + // Shows the overall health of the splunkd health status tree, as well as each feature node and its respective color. For unhealthy nodes (non-green), the output includes reasons, indicators, thresholds, messages, and so on. + // Authentication and Authorization: + // Requires the admin role or list_health capability. + // /services/server/health/splunkd/details + GetSplunkdHealthDetails() error + + // Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. + // Authentication and Authorization + // Requires the admin role or list_health capability. + // Get the health status of splunkd + // endpoint: https://:/services/server/health/splunkd + GetServerHealthConfig() error + + // Access information about the currently running Splunk instance. + // Note: This endpoint provides information on the currently running Splunk instance. Some values returned + // in the GET response reflect server status information. However, this endpoint is meant to provide + // information on the currently running instance, not the machine where the instance is running. + // Server status values returned by this endpoint should be considered deprecated and might not continue + // to be accessible from this endpoint. Use server/sysinfo to access server status instead. + // endpoint: https://:/services/server/info + GetServerInfo() error + + // Access system introspection artifacts. + // endpoint: https://:/services/server/introspection + GetServerIntrospection() error + + // List server/status child resources. + // endpoint: https://:/services/server/status + GetServerStatus() error + + // Access search job information. + // endpoint: https://:/services/server/status/dispatch-artifacts + GetServerDispatchArtifactsStatus() error + + // Access information about the private BTree database. + // GET + // Access private BTree database information. + // endpoint: https://:/services/server/status/fishbucket + GetServerFishBucketStatus() error + + // Check for system file irregularities. + // endpoint: https://:/services/server/status/installed-file-integrity + GetServerInstalledFileIntegrityStatus() error + + // Access search concurrency metrics for a standalone Splunk Enterprise instance. + // Get search concurrency limits for a standalone Splunk Enterprise instance. + // endpoint: https://:/services/server/status/limits/search-concurrency + GetServerSearchConcurrencyLimitsStatus() error + + // Access disk utilization information for filesystems that have Splunk objects, such as indexes, volumes, and logs. A filesystem can span multiple physical disk partitions. + // Get disk utilization information. + // endpoint: https://:/services/server/status/partitions-space + GetServerPartitionSpaceStatus() error + + // Get current resource (CPU, RAM, VM, I/O, file handle) utilization for entire host, and per Splunk-related processes. + // endpoint: https://:/services/server/status/resource-usage + GetServerResourceUsageStatus() error + + // Access host-level dynamic CPU utilization and paging information. + // endpoint: https://:/services/server/status/resource-usage/hostwide + GetServerHostwideResourceUsageState() error + + // Access the most recent disk I/O statistics for each disk. This endpoint is currently supported for Linux, Windows, and Solaris. By default this endpoint is updated every 60s seconds. + // endpoint: https://:/services/server/status/resource-usage/iostats + GetServerIostatResourceUsageStatus() error + + // Access operating system resource utilization information. + // endpoint: https://:/services/server/status/resource-usage/splunk-processes + GetSplunkProcessesResourceUsageStatus() error + + // Exposes relevant information about the resources and OS settings of the machine where Splunk Enterprise is running. + // Usage details + // This endpoint provides status information for the server where the current Splunk instance is running. + // The GET request response includes Kernel Transparent Huge Pages (THP) and ulimit status. + // Note: Some properties returned by this endpoint are also returned by server/info. However, + // the server/info endpoint is meant to provide information on the currently running Splunk instance and not + // the machine where the instance is running. Server status values returned by server/info should be considered + // deprecated and might not continue to be accessible from this endpoint. Use the server/sysinfo endpoint for + // server information instead. + GetServerSysInfo() error +} diff --git a/pkg/gateway/splunk/model/services/cluster/config_types.go b/pkg/gateway/splunk/model/services/cluster/config_types.go new file mode 100644 index 000000000..816f25a3b --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/config_types.go @@ -0,0 +1,146 @@ +package cluster + +import "time" + +// Description: Access cluster node configuration details. +// Rest End point API : services/cluster/config +type ClusterConfigContent struct { + AccessLoggingForHeartbeats bool `json:"access_logging_for_heartbeats"` + AssignPrimariesToAllSites string `json:"assign_primaries_to_all_sites"` + AutoRebalancePrimaries bool `json:"auto_rebalance_primaries"` + BucketsToSummarize string `json:"buckets_to_summarize"` + ClusterLabel string `json:"cluster_label"` + CmComTimeout int `json:"cm_com_timeout"` + CmHeartbeatPeriod int `json:"cm_heartbeat_period"` + CmMaxHbmissCount int `json:"cm_max_hbmiss_count"` + CxnTimeout int `json:"cxn_timeout"` + DecommissionForceFinishIdleTime int `json:"decommission_force_finish_idle_time"` + DecommissionForceTimeout int `json:"decommission_force_timeout"` + Disabled bool `json:"disabled"` + EaiAcl interface{} `json:"eai:acl"` + EnableParallelAddPeer string `json:"enable_parallel_add_peer"` + EnablePrimaryFixupDuringMaintenance string `json:"enable_primary_fixup_during_maintenance"` + ExecutorWorkers int `json:"executor_workers"` + ForwarderSiteFailover string `json:"forwarder_site_failover"` + ForwarderdataRcvPort int `json:"forwarderdata_rcv_port"` + ForwarderdataUseSsl bool `json:"forwarderdata_use_ssl"` + FreezeDuringMaintenance string `json:"freeze_during_maintenance"` + FrozenNotificationsPerBatch int `json:"frozen_notifications_per_batch"` + GenerationPollInterval int `json:"generation_poll_interval"` + GUID string `json:"guid"` + HeartbeatPeriod int64 `json:"heartbeat_period"` + HeartbeatTimeout int `json:"heartbeat_timeout"` + LogBucketDuringAddpeer string `json:"log_bucket_during_addpeer"` + ManagerSwitchoverIdxPing bool `json:"manager_switchover_idx_ping"` + ManagerSwitchoverMode string `json:"manager_switchover_mode"` + ManagerSwitchoverQuietPeriod int `json:"manager_switchover_quiet_period"` + ManagerURI string `json:"manager_uri"` + MasterURI string `json:"master_uri"` + MaxAutoServiceInterval int `json:"max_auto_service_interval"` + MaxConcurrentPeersJoining int `json:"max_concurrent_peers_joining"` + MaxDelayedUpdatesTimeMs int `json:"max_delayed_updates_time_ms "` + MaxFixupTimeMs int `json:"max_fixup_time_ms"` + MaxPeerBuildLoad int `json:"max_peer_build_load"` + MaxPeerRepLoad int `json:"max_peer_rep_load"` + MaxPeerSumRepLoad int `json:"max_peer_sum_rep_load"` + MaxPeersToDownloadBundle int `json:"max_peers_to_download_bundle"` + MaxPrimaryBackupsPerService int `json:"max_primary_backups_per_service"` + MaxRemoveSummaryS2PerService int `json:"max_remove_summary_s2_per_service"` + Mode string `json:"mode"` + Multisite string `json:"multisite"` + NotifyBucketsPeriod int `json:"notify_buckets_period"` + NotifyScanMinPeriod int `json:"notify_scan_min_period"` + NotifyScanPeriod int `json:"notify_scan_period"` + PercentPeersToReload int `json:"percent_peers_to_reload"` + PercentPeersToRestart int `json:"percent_peers_to_restart"` + PingFlag bool `json:"ping_flag"` + PrecompressClusterBundle bool `json:"precompress_cluster_bundle"` + QuietPeriod int `json:"quiet_period"` + RcvTimeout int `json:"rcv_timeout"` + RebalancePrimariesExecutionLimitMs int `json:"rebalance_primaries_execution_limit_ms"` + RebalanceThreshold float64 `json:"rebalance_threshold"` + RegisterForwarderAddress string `json:"register_forwarder_address"` + RegisterReplicationAddress string `json:"register_replication_address"` + RegisterSearchAddress string `json:"register_search_address"` + RemoteStorageRetentionPeriod int `json:"remote_storage_retention_period"` + RemoteStorageUploadTimeout int `json:"remote_storage_upload_timeout"` + RepCxnTimeout int `json:"rep_cxn_timeout"` + RepMaxRcvTimeout int `json:"rep_max_rcv_timeout"` + RepMaxSendTimeout int `json:"rep_max_send_timeout"` + RepRcvTimeout int `json:"rep_rcv_timeout"` + RepSendTimeout int `json:"rep_send_timeout"` + ReplicationFactor int `json:"replication_factor"` + ReplicationPort interface{} `json:"replication_port"` + ReplicationUseSsl bool `json:"replication_use_ssl"` + ReportingDelayPeriod int `json:"reporting_delay_period"` + RestartInactivityTimeout int `json:"restart_inactivity_timeout"` + RestartTimeout int `json:"restart_timeout"` + RollingRestart string `json:"rolling_restart"` + RollingRestartCondition string `json:"rolling_restart_condition"` + SearchFactor int `json:"search_factor"` + SearchFilesRetryTimeout int `json:"search_files_retry_timeout"` + SearchableRebalance string `json:"searchable_rebalance"` + SearchableRollingPeerStateDelayInterval int `json:"searchable_rolling_peer_state_delay_interval"` + Secret string `json:"secret"` + SendTimeout int `json:"send_timeout"` + ServiceExecutionThresholdMs int `json:"service_execution_threshold_ms"` + ServiceInterval int `json:"service_interval"` + ServiceJobsMsec int `json:"service_jobs_msec"` + Site string `json:"site"` + SiteBySite bool `json:"site_by_site"` + SiteReplicationFactor string `json:"site_replication_factor"` + SiteSearchFactor string `json:"site_search_factor"` + StreamingReplicationWaitSecs int `json:"streaming_replication_wait_secs"` + SummaryReplication string `json:"summary_replication"` + SummaryUpdateBatchSize int `json:"summary_update_batch_size"` + TargetWaitTime int `json:"target_wait_time"` + UseBatchDiscard string `json:"use_batch_discard"` + UseBatchMaskChanges string `json:"use_batch_mask_changes"` + UseBatchRemoteRepChanges string `json:"use_batch_remote_rep_changes"` +} + +type ClusterConfigHeader struct { + Links struct { + Reload string `json:"_reload,omitempty"` + Acl string `json:"_acl,omitempty"` + } `json:"links"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` + } `json:"generator"` + Entry []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` + Reload string `json:"_reload,omitempty"` + Edit string `json:"edit,omitempty"` + Disable string `json:"disable,omitempty"` + } `json:"links,omitempty"` + Author string `json:"author,omitempty"` + Acl struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + } `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` + } `json:"acl,omitempty"` + Content ClusterConfigContent `json:"content,omitempty"` + } `json:"entry,omitempty"` + Paging struct { + Total int `json:"total,omitempty"` + PerPage int `json:"perPage,omitempty"` + Offset int `json:"offset,omitempty"` + } `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go b/pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go new file mode 100644 index 000000000..982efa570 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/searchhead_types.go @@ -0,0 +1,55 @@ +package manager + +import "time" + +// https://splunk-cm-cluster-master-service:8089/services/cluster/manager/searchheads?count=0&output_mode=json + +type SearchHeadContent struct { + EaiAcl interface{} `json:"eai:acl"` + HostPortPair string `json:"host_port_pair"` + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` +} + +// ClusterMasterSearchHeadsHeader +type ClusterMasterSearchHeadHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content SearchHeadContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/sites_types.go b/pkg/gateway/splunk/model/services/cluster/manager/sites_types.go new file mode 100644 index 000000000..f23677a95 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/sites_types.go @@ -0,0 +1,105 @@ +package manager + +import "time" + +// Description: Access cluster site information. +// Rest End Point: services/cluster/manager/sites +type ClusterManagerSiteContent struct { + ActiveBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"active_bundle,omitempty"` + ApplyBundleStatus struct { + InvalidBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + BundleValidationErrorsOnMaster []interface{} `json:"bundle_validation_errors_on_master,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"invalid_bundle,omitempty"` + ReloadBundleIssued bool `json:"reload_bundle_issued,omitempty"` + Status string `json:"status,omitempty"` + } `json:"apply_bundle_status,omitempty"` + AvailableSites string `json:"available_sites,omitempty"` + BackupAndRestorePrimaries bool `json:"backup_and_restore_primaries,omitempty"` + ControlledRollingRestartFlag bool `json:"controlled_rolling_restart_flag,omitempty"` + EaiAcl interface{} `json:"eai:acl,omitempty"` + ForwarderSiteFailover string `json:"forwarder_site_failover,omitempty"` + IndexingReadyFlag bool `json:"indexing_ready_flag,omitempty"` + InitializedFlag bool `json:"initialized_flag,omitempty"` + Label string `json:"label,omitempty"` + LastCheckRestartBundleResult bool `json:"last_check_restart_bundle_result,omitempty"` + LastDryRunBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"last_dry_run_bundle,omitempty"` + LastValidatedBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + IsValidBundle bool `json:"is_valid_bundle,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"last_validated_bundle,omitempty"` + LatestBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"latest_bundle,omitempty"` + MaintenanceMode bool `json:"maintenance_mode,omitempty"` + Multisite bool `json:"multisite,omitempty"` + PreviousActiveBundle struct { + BundlePath string `json:"bundle_path,omitempty"` + Checksum string `json:"checksum,omitempty"` + Timestamp int `json:"timestamp,omitempty"` + } `json:"previous_active_bundle,omitempty"` + PrimariesBackupStatus string `json:"primaries_backup_status,omitempty"` + QuietPeriodFlag bool `json:"quiet_period_flag,omitempty"` + RollingRestartFlag bool `json:"rolling_restart_flag,omitempty"` + RollingRestartOrUpgrade bool `json:"rolling_restart_or_upgrade,omitempty"` + ServiceReadyFlag bool `json:"service_ready_flag,omitempty"` + SiteReplicationFactor string `json:"site_replication_factor,omitempty"` + SiteSearchFactor string `json:"site_search_factor,omitempty"` + StartTime int `json:"start_time,omitempty"` + SummaryReplication string `json:"summary_replication,omitempty"` +} + +type ClusterManagerSiteHeader struct { + Links struct { + } `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` + } `json:"generator,omitempty"` + Entry []struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` + } `json:"links,omitempty"` + Author string `json:"author,omitempty"` + Acl struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` + } `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` + } `json:"acl,omitempty"` + Content ClusterManagerSiteContent `json:"content,omitempty"` + } `json:"entry,omitempty"` + Paging struct { + Total int `json:"total,omitempty"` + PerPage int `json:"perPage,omitempty"` + Offset int `json:"offset,omitempty"` + } `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/manager/status_types.go b/pkg/gateway/splunk/model/services/cluster/manager/status_types.go new file mode 100644 index 000000000..19f9d09d4 --- /dev/null +++ b/pkg/gateway/splunk/model/services/cluster/manager/status_types.go @@ -0,0 +1,99 @@ +package manager + +import "time" + +// Description: Endpoint to get the status of a rolling restart. +// Rest End Point: services/cluster/manager/status +type ClusterManagerStatusContent struct { + AvailableSites string `json:"available_sites"` + DecommissionForceTimeout string `json:"decommission_force_timeout"` + EaiAcl interface{} `json:"eai:acl"` + HaMode string `json:"ha_mode"` + MaintenanceMode bool `json:"maintenance_mode"` + Messages string `json:"messages"` + Multisite bool `json:"multisite"` + Peers struct { + One88C23DDD6414BA2B651C042F809A0B3 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"188C23DD-D641-4BA2-B651-C042F809A0B3"` + OneFBC4C960AD04C0084684DDA988FB808 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"1FBC4C96-0AD0-4C00-8468-4DDA988FB808"` + ThreeA617349B0774E0FB76A41C300B00326 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"3A617349-B077-4E0F-B76A-41C300B00326"` + SevenD3E85ABB17A47A6B5E9405FB889AD25 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"7D3E85AB-B17A-47A6-B5E9-405FB889AD25"` + CB87DA8D38FF42D8B7EC076C97D77E18 struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"CB87DA8D-38FF-42D8-B7EC-076C97D77E18"` + F881BA5FE1814C09BB3396131460678E struct { + Label string `json:"label"` + Site string `json:"site"` + Status string `json:"status"` + } `json:"F881BA5F-E181-4C09-BB33-96131460678E"` + } `json:"peers"` + RestartInactivityTimeout string `json:"restart_inactivity_timeout"` + RestartProgress struct { + Done []interface{} `json:"done"` + Failed []interface{} `json:"failed"` + InProgress []interface{} `json:"in_progress"` + ToBeRestarted []interface{} `json:"to_be_restarted"` + } `json:"restart_progress"` + RollingRestartFlag bool `json:"rolling_restart_flag"` + RollingRestartOrUpgrade bool `json:"rolling_restart_or_upgrade"` + SearchableRolling bool `json:"searchable_rolling"` + ServiceReadyFlag bool `json:"service_ready_flag"` +} + +type ClusterManagerStatusHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content ClusterManagerStatusContent `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/cluster/url_types.go b/pkg/gateway/splunk/model/services/cluster/url_types.go index 24081e9c2..59281f6ac 100644 --- a/pkg/gateway/splunk/model/services/cluster/url_types.go +++ b/pkg/gateway/splunk/model/services/cluster/url_types.go @@ -28,4 +28,6 @@ const ( GetLicenseManagerLocalPeers = "/services/licenser/localslave" GetSearchHeadCaptainInfoUrl = "/services/shcluster/captain/info" + + GetClusterManagerStatusUrl = "/services/cluster/manager/status" ) diff --git a/pkg/gateway/splunk/model/services/common/attributes_types.go b/pkg/gateway/splunk/model/services/common/attributes_types.go new file mode 100644 index 000000000..fff1fe196 --- /dev/null +++ b/pkg/gateway/splunk/model/services/common/attributes_types.go @@ -0,0 +1,61 @@ +package common + +import "time" + +type Perms struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` +} + +type ACL struct { + App string `json:"app,omitempty"` + CanList bool `json:"can_list,omitempty"` + CanWrite bool `json:"can_write,omitempty"` + Modifiable bool `json:"modifiable,omitempty"` + Owner string `json:"owner,omitempty"` + Perms Perms `json:"perms,omitempty"` + Removable bool `json:"removable,omitempty"` + Sharing string `json:"sharing,omitempty"` +} + +type HeaderLinks struct { + Create string `json:"create,omitempty"` + Reload string `json:"_reload,omitempty"` + ACL string `json:"_acl,omitempty"` +} + +type Generator struct { + Build string `json:"build,omitempty"` + Version string `json:"version,omitempty"` +} + +type EntryLinks struct { + Alternate string `json:"alternate,omitempty"` + List string `json:"list,omitempty"` +} + +type Entry struct { + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Links EntryLinks `json:"links,omitempty"` + Author string `json:"author,omitempty"` + ACL ACL `json:"acl,omitempty"` + Content interface{} `json:"content,omitempty"` + Content0 interface{} `json:"content0,omitempty"` +} + +type Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` +} +type Header struct { + Links HeaderLinks `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator Generator `json:"generator,omitempty"` + Entry []Entry `json:"acl,omitempty"` + Paging Paging `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/server/health/deployment_types.go b/pkg/gateway/splunk/model/services/server/health/deployment_types.go new file mode 100644 index 000000000..5aac69625 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/health/deployment_types.go @@ -0,0 +1,57 @@ +package health + +import "time" + +// Description: Endpoint to get the status of a rolling restart. +// Rest End Point: services/cluster/manager/status + +type DeploymentHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + Details string `json:"details"` + } `json:"links"` + Author string `json:"author"` + ACL struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []interface{} `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Fields struct { + Required []interface{} `json:"required"` + Optional []interface{} `json:"optional"` + Wildcard []interface{} `json:"wildcard"` + } `json:"fields"` + Content struct { + Disabled bool `json:"disabled"` + EaiACL interface{} `json:"eai:acl"` + Health string `json:"health"` + } `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/server/health/details_types.go b/pkg/gateway/splunk/model/services/server/health/details_types.go new file mode 100644 index 000000000..dc419c978 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/health/details_types.go @@ -0,0 +1,488 @@ +package health + +import ( + "time" + + "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/common" +) + +// Description: Endpoint to get the status of a rolling restart. +// Rest End Point: services/cluster/manager/status +type DataForwarding struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + Splunk2SplunkForwarding struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + Tcpoutautolb0 struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + S2SConnections struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"s2s_connections,omitempty"` + } `json:"tcpoutautolb-0,omitempty"` + } `json:"splunk-2-splunk_forwarding,omitempty"` +} + +type FileMonitorInput struct { + ForwarderIngestionLatency struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + IngestionLatencyIndexerHealth struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"ingestion_latency_indexer_health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"forwarder_ingestion_latency,omitempty"` + Health string `json:"health,omitempty"` + IngestionLatency struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + IngestionLatencyGapMultiplier struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"ingestion_latency_gap_multiplier,omitempty"` + IngestionLatencyLagSec struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"ingestion_latency_lag_sec,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"ingestion_latency,omitempty"` + LargeAndArchiveFileReader0 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"large_and_archive_file_reader-0,omitempty"` + LargeAndArchiveFileReader1 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"large_and_archive_file_reader-1,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + RealTimeReader0 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"real-time_reader-0,omitempty"` + RealTimeReader1 struct { + DataOutRate struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_out_rate,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"real-time_reader-1,omitempty"` +} + +type IndexProcessor struct { + Buckets struct { + BucketsCreatedLast60M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"buckets_created_last_60m,omitempty"` + CountBucketRenameFailureLast10Mins struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_bucket_rename_failure_last_10mins,omitempty"` + DisplayName string `json:"display_name,omitempty"` + GiganticBucketSize struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"gigantic_bucket_size,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSmallBucketsCreatedLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_small_buckets_created_last_24h,omitempty"` + } `json:"buckets,omitempty"` + DiskSpace struct { + DiskSpaceRemainingMultipleMinfreespace struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"disk_space_remaining_multiple_minfreespace,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + MaxVolumeSizeInvalid struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"max_volume_size_invalid,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"disk_space,omitempty"` + Health string `json:"health,omitempty"` + IndexOptimization struct { + ConcurrentOptimizeProcessesPercent struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"concurrent_optimize_processes_percent,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + } `json:"index_optimization,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type ClusterBundles struct { + ClusterBundles struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cluster_bundles,omitempty"` + CountClassicBundleTimeoutLast10Mins struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_classic_bundle_timeout_last_10mins,omitempty"` + CountFullBundleUntarLast10Mins struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_full_bundle_untar_last_10mins,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type DataDurability struct { + ClusterReplicationFactor struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cluster_replication_factor,omitempty"` + ClusterSearchFactor struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cluster_search_factor,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type DataSearchable struct { + DataSearchable struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"data_searchable,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type Indexers struct { + CmServiceIntervalInvalid struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"cm_service_interval_invalid,omitempty"` + Detention struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"detention,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + MissingPeers struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"missing_peers,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type IndexingReady struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + IndexingReady struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"indexing_ready,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type ManagerConnectivity struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + MasterConnectivity struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"master_connectivity,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type PeerState struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SlaveState struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"slave_state,omitempty"` +} + +type PeerVersion struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SlaveVersion struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"slave_version,omitempty"` +} + +type ReplicationFailures struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + ReplicationFailures struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"replication_failures,omitempty"` +} + +type IndexerClustering struct { + ClusterBundles ClusterBundles `json:"cluster_bundles,omitempty"` + DataDurability DataDurability `json:"data_durability,omitempty"` + DataSearchable DataSearchable `json:"data_searchable,omitempty"` + Health string `json:"health,omitempty"` + Indexers Indexers `json:"indexers,omitempty"` + IndexingReady IndexingReady `json:"indexing_ready,omitempty"` + ManagerConnectivity ManagerConnectivity `json:"manager_connectivity,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PeerState PeerState `json:"peer_state,omitempty"` + PeerVersion PeerVersion `json:"peer_version,omitempty"` + ReplicationFailures ReplicationFailures `json:"replication_failures,omitempty"` +} + +type ResourceUsage struct { + Health string `json:"health,omitempty"` + Iowait struct { + AvgCPUMaxPercLast3M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"avg_cpu__max_perc_last_3m,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SingleCPUMaxPercLast3M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"single_cpu__max_perc_last_3m,omitempty"` + SumTop3CPUPercsMaxLast3M struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"sum_top3_cpu_percs__max_last_3m,omitempty"` + } `json:"iowait,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` +} + +type SearchScheduler struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SchedulerSuppression struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + SuppressionListOversized struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"suppression_list_oversized,omitempty"` + } `json:"scheduler_suppression,omitempty"` + SearchLag struct { + CountExtremelyLaggedSearchesLastHour struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"count_extremely_lagged_searches_last_hour,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSearchesLaggedHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_lagged_high_priority_last_24h,omitempty"` + PercentSearchesLaggedNonHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_lagged_non_high_priority_last_24h,omitempty"` + } `json:"search_lag,omitempty"` + SearchesDelayed struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSearchesDelayedHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_delayed_high_priority_last_24h,omitempty"` + PercentSearchesDelayedNonHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_delayed_non_high_priority_last_24h,omitempty"` + } `json:"searches_delayed,omitempty"` + SearchesSkippedInTheLast24Hours struct { + DisplayName string `json:"display_name,omitempty"` + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + PercentSearchesSkippedHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_skipped_high_priority_last_24h,omitempty"` + PercentSearchesSkippedNonHighPriorityLast24H struct { + Description string `json:"description,omitempty"` + Health string `json:"health,omitempty"` + Name string `json:"name,omitempty"` + Path string `json:"path,omitempty"` + } `json:"percent_searches_skipped_non_high_priority_last_24h,omitempty"` + } `json:"searches_skipped_in_the_last_24_hours,omitempty"` +} + +type Splunkd struct { + DataForwarding DataForwarding `json:"data_forwarding,omitempty"` + FileMonitorInput FileMonitorInput `json:"file_monitor_input,omitempty"` + Health string `json:"health,omitempty"` + IndexProcessor IndexProcessor `json:"index_processor,omitempty"` + IndexerClustering IndexerClustering `json:"indexer_clustering,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + ResourceUsage ResourceUsage `json:"resource_usage,omitempty"` + SearchScheduler SearchScheduler `json:"search_scheduler,omitempty"` +} + +type Features struct { + Health string `json:"health,omitempty"` + NumRed int `json:"num_red,omitempty"` + NumYellow int `json:"num_yellow,omitempty"` + Splunkd Splunkd `json:"splunkd,omitempty"` +} + +type DeploymentDetail struct { + Disabled bool `json:"disabled,omitempty"` + EaiACL interface{} `json:"eai:acl,omitempty"` + Features Features `json:"features,omitempty"` + Health string `json:"health,omitempty"` +} + +type DeploymentDetailHeader struct { + Links common.EntryLinks `json:"links,omitempty"` + Origin string `json:"origin,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Generator common.Generator `json:"generator,omitempty"` + Entry []common.Entry `json:"entry,omitempty"` + Paging common.Paging `json:"paging,omitempty"` + Messages []interface{} `json:"messages,omitempty"` +} diff --git a/pkg/gateway/splunk/model/services/server/health/url_types.go b/pkg/gateway/splunk/model/services/server/health/url_types.go new file mode 100644 index 000000000..c116c8d05 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/health/url_types.go @@ -0,0 +1,7 @@ +package health + +const ( + DeploymentDetailsUrl = "server/health/deployment/details" + + SplunkdHealthDetailsUrl = "server/health/splunkd/details" +) diff --git a/pkg/gateway/splunk/model/services/server/url_types.go b/pkg/gateway/splunk/model/services/server/url_types.go new file mode 100644 index 000000000..9d9911b62 --- /dev/null +++ b/pkg/gateway/splunk/model/services/server/url_types.go @@ -0,0 +1,7 @@ +package server + +const ( + InfoUrl = "server/info" + + StatusUrl = "server/status" +) diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json b/pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json new file mode 100644 index 000000000..b143380db --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_searchhead.json @@ -0,0 +1,55 @@ +{ + "links":{ + + }, + "origin":"https://splunk-cm-cluster-master-service:8089/services/cluster/manager/searchheads", + "updated":"2022-07-21T06:51:50+00:00", + "generator":{ + "build":"6818ac46f2ec", + "version":"9.0.0" + }, + "entry":[ + { + "name":"3A702D19-3AEF-4D93-8E9D-0022C2C50CF8", + "id":"https://splunk-cm-cluster-master-service:8089/services/cluster/manager/searchheads/3A702D19-3AEF-4D93-8E9D-0022C2C50CF8", + "updated":"1970-01-01T00:00:00+00:00", + "links":{ + "alternate":"/services/cluster/manager/searchheads/3A702D19-3AEF-4D93-8E9D-0022C2C50CF8", + "list":"/services/cluster/manager/searchheads/3A702D19-3AEF-4D93-8E9D-0022C2C50CF8" + }, + "author":"system", + "acl":{ + "app":"", + "can_list":true, + "can_write":true, + "modifiable":false, + "owner":"system", + "perms":{ + "read":[ + "admin", + "splunk-system-role" + ], + "write":[ + "admin", + "splunk-system-role" + ] + }, + "removable":false, + "sharing":"system" + }, + "content":{ + "eai:acl":null, + "host_port_pair":"splunk-cm-cluster-master-0:8089", + "label":"splunk-cm-cluster-master-0", + "site":"site1", + "status":"Connected" + } + } + ], + "paging":{ + "total":1, + "perPage":10000000, + "offset":0 + }, + "messages":[ ] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/cluster_manager_sites.json b/pkg/gateway/splunk/services/fixture/cluster_manager_sites.json new file mode 100644 index 000000000..33ded1751 --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_manager_sites.json @@ -0,0 +1,109 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/sites", + "updated": "2022-07-18T23:56:42+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "site1", + "id": "https://localhost:8089/services/cluster/manager/sites/site1", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/sites/site1", + "list": "/services/cluster/manager/sites/site1" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "peers": { + "7D3E85AB-B17A-47A6-B5E9-405FB889AD25": { + "host_port_pair": "192.168.47.247:8089", + "server_name": "splunk-example-site1-indexer-0" + }, + "CB87DA8D-38FF-42D8-B7EC-076C97D77E18": { + "host_port_pair": "192.168.82.138:8089", + "server_name": "splunk-example-site1-indexer-2" + }, + "F881BA5F-E181-4C09-BB33-96131460678E": { + "host_port_pair": "192.168.11.34:8089", + "server_name": "splunk-example-site1-indexer-1" + } + } + } + }, + { + "name": "site2", + "id": "https://localhost:8089/services/cluster/manager/sites/site2", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/sites/site2", + "list": "/services/cluster/manager/sites/site2" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "peers": { + "188C23DD-D641-4BA2-B651-C042F809A0B3": { + "host_port_pair": "192.168.61.169:8089", + "server_name": "splunk-example-site2-indexer-0" + }, + "1FBC4C96-0AD0-4C00-8468-4DDA988FB808": { + "host_port_pair": "192.168.79.147:8089", + "server_name": "splunk-example-site2-indexer-2" + }, + "3A617349-B077-4E0F-B76A-41C300B00326": { + "host_port_pair": "192.168.10.218:8089", + "server_name": "splunk-example-site2-indexer-1" + } + } + } + } + ], + "paging": { + "total": 2, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index b8225be2c..edfdfa224 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -191,3 +191,92 @@ func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]manage } return &contentList, nil } + +// GetClusterManagerSites Access cluster site information. +// list List available cluster sites. +// endpoint: https://:/services/cluster/manager/sites +func (p *fixtureGateway) GetClusterManagerSites(ctx context.Context) (*[]managermodel.ClusterManagerSiteContent, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := ioutil.ReadFile("cluster_config.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerSitesUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerSiteHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerSiteContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} + +// GetClusterManagerSearchHeadStatus Endpoint to get searchheads connected to cluster manager. +// endpoint: https://:/services/cluster/manager/status +func (p *fixtureGateway) GetClusterManagerStatus(ctx context.Context) (*[]managermodel.ClusterManagerStatusContent, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := ioutil.ReadFile("cluster_manager_status.json") + if err != nil { + log.Error(err, "fixture: error in get cluster manager search heads") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.GetClusterManagerStatusUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerStatusHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerStatusContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, nil +} diff --git a/pkg/gateway/splunk/services/gateway.go b/pkg/gateway/splunk/services/gateway.go index 6e15a549a..3dcb00b63 100644 --- a/pkg/gateway/splunk/services/gateway.go +++ b/pkg/gateway/splunk/services/gateway.go @@ -34,4 +34,14 @@ type Gateway interface { // Access cluster manager peers. // endpoint: https://:/services/cluster/manager/peers GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) + + // Access cluster site information. + // list List available cluster sites. + // endpoint: https://:/services/cluster/manager/sites + GetClusterManagerSites(ctx context.Context) (*[]managermodel.ClusterManagerSiteContent, error) + + // GetClusterManagerSearchHeadStatus Endpoint to get the status of a rolling restart. + // GET the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/status + GetClusterManagerStatus(ctx context.Context) (*[]managermodel.ClusterManagerStatusContent, error) } diff --git a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go index 5defd0869..2d6cb00e9 100644 --- a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go +++ b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go @@ -133,3 +133,73 @@ func (p *splunkGateway) GetClusterManagerHealth(context context.Context) (*[]man } return &contentList, err } + +// Access cluster site information. +// list List available cluster sites. +// endpoint: https://:/services/cluster/manager/sites +func (p *splunkGateway) GetClusterManagerSites(context context.Context) (*[]managermodel.ClusterManagerSiteContent, error) { + url := clustermodel.GetClusterManagerSitesUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerSiteHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager sites failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerSiteContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// GetClusterManagerSearchHeadStatus Endpoint to get the status of a rolling restart. +// GET the status of a rolling restart. +// endpoint: https://:/services/cluster/manager/status +func (p *splunkGateway) GetClusterManagerStatus(context context.Context) (*[]managermodel.ClusterManagerStatusContent, error) { + url := clustermodel.GetClusterManagerStatusUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &managermodel.ClusterManagerStatusHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.ClusterManagerStatusContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} diff --git a/pkg/gateway/splunk/services/implementation/server_impl.go b/pkg/gateway/splunk/services/implementation/server_impl.go new file mode 100644 index 000000000..5facf4840 --- /dev/null +++ b/pkg/gateway/splunk/services/implementation/server_impl.go @@ -0,0 +1,158 @@ +package impl + +import ( + "context" + "net/http" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + servermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/server" + healthmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/server/health" +) + +// Shows the overall health of splunkd. The health of splunkd can be red, yellow, or green. The health of splunkd is based on the health of all features reporting to it. +// Authentication and Authorization: +// +// Requires the admin role or list_health capability. +// +// Get health status of distributed deployment features. +// endpoint: https://:/services/server/health/deployment/details +func (p *splunkGateway) GetServerDeploymentHealthDetails(context context.Context) (*[]healthmodel.DeploymentContent, error) { + url := healthmodel.DeploymentDetailsUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + Get(url) + if err != nil { + p.log.Error(err, "get deployment details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Shows the overall health of the splunkd health status tree, as well as each feature node and its respective color. For unhealthy nodes (non-green), the output includes reasons, indicators, thresholds, messages, and so on. +// Authentication and Authorization: +// Requires the admin role or list_health capability. +// /services/server/health/splunkd/details + +func (p *splunkGateway) GetSplunkdHealthDetails(context context.Context) (*[]healthmodel.DeploymentContent, error) { + url := healthmodel.SplunkdHealthDetailsUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + Get(url) + if err != nil { + p.log.Error(err, "get splunkd health details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// Access information about the currently running Splunk instance. +// Note: This endpoint provides information on the currently running Splunk instance. Some values returned +// in the GET response reflect server status information. However, this endpoint is meant to provide +// information on the currently running instance, not the machine where the instance is running. +// Server status values returned by this endpoint should be considered deprecated and might not continue +// to be accessible from this endpoint. Use server/sysinfo to access server status instead. +// endpoint: https://:/services/server/info + +func (p *splunkGateway) GetServerInfo(context context.Context) (*[]servermodel.DeploymentContent, error) { + url := servermodel.InfoUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + Get(url) + if err != nil { + p.log.Error(err, "get splunkd health details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} + +// List server/status child resources. +// endpoint: https://:/services/server/status + +func (p *splunkGateway) GetServerStatus(context context.Context) (*[]servermodel.DeploymentContent, error) { + url := servermodel.StatusUrl + + // fetch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &healthmodel.DeploymentHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + Get(url) + if err != nil { + p.log.Error(err, "get splunkd health details failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []managermodel.DeploymentContent{} + for _, entry := range envelop.Entry { + contentList = append(contentList, entry.Content) + } + return &contentList, err +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index f1c2dd76b..75a18f9aa 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -48,6 +48,16 @@ var callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner return healthList, err } +var callGetClusterManagerStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerStatusContent, error) { + statuslist, err := p.gateway.GetClusterManagerStatus(ctx) + if err != nil { + return nil, err + } else if statuslist == nil { + return nil, fmt.Errorf("status list is empty") + } + return statuslist, err +} + var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { peerlist, err := p.gateway.GetClusterManagerPeers(ctx) if err != nil { From bc5354fdff6bf0e24f6e58d440f6b8389b53752b Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 15:41:36 -0700 Subject: [PATCH 63/85] updated deployment types --- .../services/server/health/deployment_types.go | 13 +++++++------ .../splunk/services/implementation/server_impl.go | 13 ++++++------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/gateway/splunk/model/services/server/health/deployment_types.go b/pkg/gateway/splunk/model/services/server/health/deployment_types.go index 5aac69625..477610374 100644 --- a/pkg/gateway/splunk/model/services/server/health/deployment_types.go +++ b/pkg/gateway/splunk/model/services/server/health/deployment_types.go @@ -3,7 +3,12 @@ package health import "time" // Description: Endpoint to get the status of a rolling restart. -// Rest End Point: services/cluster/manager/status +// Rest End Point: services/server/health/deployment/details + +type DeploymentContent struct { + Health string `json:"health,omitempty"` + Reason string `json:"reason,omitempty"` +} type DeploymentHeader struct { Links struct { @@ -42,11 +47,7 @@ type DeploymentHeader struct { Optional []interface{} `json:"optional"` Wildcard []interface{} `json:"wildcard"` } `json:"fields"` - Content struct { - Disabled bool `json:"disabled"` - EaiACL interface{} `json:"eai:acl"` - Health string `json:"health"` - } `json:"content"` + Content DeploymentContent `json:"content,omitempty"` } `json:"entry"` Paging struct { Total int `json:"total"` diff --git a/pkg/gateway/splunk/services/implementation/server_impl.go b/pkg/gateway/splunk/services/implementation/server_impl.go index 5facf4840..7f9d004eb 100644 --- a/pkg/gateway/splunk/services/implementation/server_impl.go +++ b/pkg/gateway/splunk/services/implementation/server_impl.go @@ -5,7 +5,6 @@ import ( "net/http" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" - managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" servermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/server" healthmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/server/health" ) @@ -41,7 +40,7 @@ func (p *splunkGateway) GetServerDeploymentHealthDetails(context context.Context return nil, splunkError } - contentList := []managermodel.DeploymentContent{} + contentList := []healthmodel.DeploymentContent{} for _, entry := range envelop.Entry { contentList = append(contentList, entry.Content) } @@ -77,7 +76,7 @@ func (p *splunkGateway) GetSplunkdHealthDetails(context context.Context) (*[]hea return nil, splunkError } - contentList := []managermodel.DeploymentContent{} + contentList := []healthmodel.DeploymentContent{} for _, entry := range envelop.Entry { contentList = append(contentList, entry.Content) } @@ -92,7 +91,7 @@ func (p *splunkGateway) GetSplunkdHealthDetails(context context.Context) (*[]hea // to be accessible from this endpoint. Use server/sysinfo to access server status instead. // endpoint: https://:/services/server/info -func (p *splunkGateway) GetServerInfo(context context.Context) (*[]servermodel.DeploymentContent, error) { +func (p *splunkGateway) GetServerInfo(context context.Context) (*[]healthmodel.DeploymentContent, error) { url := servermodel.InfoUrl // fetch the configheader into struct @@ -116,7 +115,7 @@ func (p *splunkGateway) GetServerInfo(context context.Context) (*[]servermodel.D return nil, splunkError } - contentList := []managermodel.DeploymentContent{} + contentList := []healthmodel.DeploymentContent{} for _, entry := range envelop.Entry { contentList = append(contentList, entry.Content) } @@ -126,7 +125,7 @@ func (p *splunkGateway) GetServerInfo(context context.Context) (*[]servermodel.D // List server/status child resources. // endpoint: https://:/services/server/status -func (p *splunkGateway) GetServerStatus(context context.Context) (*[]servermodel.DeploymentContent, error) { +func (p *splunkGateway) GetServerStatus(context context.Context) (*[]healthmodel.DeploymentContent, error) { url := servermodel.StatusUrl // fetch the configheader into struct @@ -150,7 +149,7 @@ func (p *splunkGateway) GetServerStatus(context context.Context) (*[]servermodel return nil, splunkError } - contentList := []managermodel.DeploymentContent{} + contentList := []healthmodel.DeploymentContent{} for _, entry := range envelop.Entry { contentList = append(contentList, entry.Content) } From b7e34c4fb9ed285fc45e2dc6e44857c677c5ae12 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Mon, 17 Jul 2023 16:28:57 -0700 Subject: [PATCH 64/85] Fixed unit tests --- pkg/provisioner/splunk/implementation/splunk.go | 1 - pkg/splunk/enterprise/clustermanager_test.go | 1 + pkg/splunk/enterprise/monitoringconsole_test.go | 2 -- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index 75a18f9aa..784ffefd2 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -61,7 +61,6 @@ var callGetClusterManagerStatus = func(ctx context.Context, p *splunkProvisioner var callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { peerlist, err := p.gateway.GetClusterManagerPeers(ctx) if err != nil { - fmt.Println("Hi3") return nil, err } else if peerlist == nil { return nil, fmt.Errorf("peer list is empty") diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 203d44f79..f6049109a 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -500,6 +500,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, + {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index dc3d08397..b4b4b93bc 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -72,7 +72,6 @@ func TestApplyMonitoringConsole(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"}, {MetaName: "*v4.MonitoringConsole-test-stack1"}, {MetaName: "*v4.MonitoringConsole-test-stack1"}, @@ -90,7 +89,6 @@ func TestApplyMonitoringConsole(t *testing.T) { {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-monitoring-console"}, - {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-monitoring-console"}, {MetaName: "*v4.MonitoringConsole-test-stack1"}, From dd117936a6528b5c596a153d3fe62a930d5805c4 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 20 Jul 2023 12:34:48 -0700 Subject: [PATCH 65/85] added few provisioner changes with manager code --- controllers/clustermanager_controller.go | 25 ++++- go.mod | 5 +- go.sum | 11 +- .../splunk/implementation/splunk.go | 16 ++- .../splunk/implementation/splunk_test.go | 68 +++++++++--- pkg/provisioner/splunk/model/types.go | 15 +++ pkg/provisioner/splunk/provisioner.go | 6 +- pkg/splunk/enterprise/clustermanager.go | 81 ++++---------- pkg/splunk/enterprise/clustermanager_test.go | 82 ++++++++++---- pkg/splunk/enterprise/configuration_test.go | 4 +- pkg/splunk/enterprise/factory.go | 105 ++++++++++++++++++ pkg/splunk/enterprise/indexercluster_test.go | 6 +- pkg/splunk/enterprise/licensemanager_test.go | 6 +- pkg/splunk/enterprise/licensemaster_test.go | 6 +- pkg/splunk/enterprise/monitoringconsole.go | 64 ----------- .../enterprise/monitoringconsole_test.go | 94 +--------------- pkg/splunk/enterprise/types.go | 22 ++++ pkg/splunk/enterprise/util_test.go | 4 +- pkg/splunk/manager.go | 16 ++- pkg/splunk/model/types.go | 26 +++++ 20 files changed, 377 insertions(+), 285 deletions(-) create mode 100644 pkg/provisioner/splunk/model/types.go create mode 100644 pkg/splunk/enterprise/factory.go create mode 100644 pkg/splunk/model/types.go diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index b3b18989f..701ecba5e 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -18,14 +18,15 @@ package controllers import ( "context" - "time" - + "github.com/jinzhu/copier" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "time" "github.com/pkg/errors" common "github.com/splunk/splunk-operator/controllers/common" provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -115,7 +116,25 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyClusterManager adding to handle unit test case var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { - return enterprise.ApplyClusterManager(ctx, client, instance, provisionerFactory) + // match the provisioner.EventPublisher interface + publishEvent := func(ctx context.Context, eventType, reason, message string) { + instance.NewEvent(eventType, reason, message) + } + info := &managermodel.ReconcileInfo{ + TypeMeta: instance.TypeMeta, + CommonSpec: instance.Spec.CommonSplunkSpec, + Client: client, + Log: log.FromContext(ctx), + Namespace: instance.GetNamespace(), + Name: instance.GetName(), + } + copier.Copy(info.MetaObject, instance.ObjectMeta) + mg := enterprise.NewManagerFactory(false) + manager, err := mg.NewManager(ctx, info, publishEvent) + if err != nil { + instance.NewEvent("Warning", "ApplyClusterManager", err.Error()) + } + return manager.ApplyClusterManager(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. diff --git a/go.mod b/go.mod index 9388d95cb..c18eaa6ce 100644 --- a/go.mod +++ b/go.mod @@ -6,8 +6,9 @@ require ( github.com/aws/aws-sdk-go v1.42.16 github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 + github.com/jinzhu/copier v0.3.5 github.com/minio/minio-go/v7 v7.0.16 - github.com/onsi/ginkgo/v2 v2.10.0 + github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.8 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -72,7 +73,7 @@ require ( golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.8.0 // indirect + golang.org/x/sys v0.9.0 // indirect golang.org/x/term v0.5.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect diff --git a/go.sum b/go.sum index 26a0d7d74..508085e36 100644 --- a/go.sum +++ b/go.sum @@ -191,6 +191,8 @@ github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -232,6 +234,7 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v7 v7.0.16 h1:GspaSBS8lOuEUCAqMe0W3UxSoyOA4b4F8PTspRVI+k4= @@ -255,8 +258,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs= -github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE= +github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -428,8 +431,8 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index 784ffefd2..d13599207 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -9,6 +9,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -79,11 +80,11 @@ var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvis } // SetClusterManagerStatus Access cluster node configuration details. -func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error { +func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) if err != nil { - return err + return result, err } else { peerlist := *peerlistptr for _, peer := range peerlist { @@ -105,7 +106,7 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditi cminfolistptr, err := callGetClusterManagerInfo(ctx, p) if err != nil { - return err + return result, err } cminfolist := *cminfolistptr if cminfolist[0].Multisite { @@ -134,7 +135,7 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditi //healthList, err := callGetClusterManagerHealth(ctx, p) healthList, err := callGetClusterManagerHealth(ctx, p) if err != nil { - return err + return result, err } else { hllist := *healthList // prepare fields for conditions @@ -153,6 +154,11 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditi meta.SetStatusCondition(conditions, condition) } } + result.Dirty = true + return result, err +} - return nil +// CheckClusterManagerHealth +func (p *splunkProvisioner) CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) { + return result, nil } diff --git a/pkg/provisioner/splunk/implementation/splunk_test.go b/pkg/provisioner/splunk/implementation/splunk_test.go index 7fad69395..b4fdbf94d 100644 --- a/pkg/provisioner/splunk/implementation/splunk_test.go +++ b/pkg/provisioner/splunk/implementation/splunk_test.go @@ -6,13 +6,13 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" - fixturegatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/fixture" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) //var log = logz.New().WithName("provisioner").WithName("fixture") -func setCreds(t *testing.T) *splunkProvisioner { +func setCreds(t *testing.T) provisioner.Provisioner { ctx := context.TODO() sad := &splunkmodel.SplunkCredentials{ Address: "splunk-cm-cluster-master-service", @@ -28,20 +28,12 @@ func setCreds(t *testing.T) *splunkProvisioner { DisableCertificateVerification: true, } publisher := func(ctx context.Context, eventType, reason, message string) {} - fixtureFactory := fixturegatewayimpl.Fixture{} - gateway, err := fixtureFactory.NewGateway(ctx, sad, publisher) + sp := NewProvisionerFactory(true) + provisioner, err := sp.NewProvisioner(ctx, sad, publisher) if err != nil { return nil } - - // TODO fixme how to test the provisioner call directly - //sm := NewProvisionerFactory(ctx, &sad, publisher) - sm := &splunkProvisioner{ - credentials: sad, - publisher: publisher, - gateway: gateway, - } - return sm + return provisioner } func TestSetClusterManagerStatus(t *testing.T) { @@ -54,7 +46,55 @@ func TestSetClusterManagerStatus(t *testing.T) { ctx := context.TODO() - err := provisioner.SetClusterManagerStatus(ctx, conditions) + _, err := provisioner.SetClusterManagerStatus(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } +} + +func TestSetClusterManagerMultiSiteStatus(t *testing.T) { + callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { + healthData := []managermodel.ClusterManagerHealthContent{ + { + AllPeersAreUp: "1", + }, + { + AllPeersAreUp: "0", + }, + } + return &healthData, nil + } + + callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { + cminfo := &[]managermodel.ClusterManagerInfoContent{ + { + Multisite: true, + }, + } + return cminfo, nil + } + + callGetClusterManagerPeersStatus = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerPeerContent, error) { + peerlist := &[]managermodel.ClusterManagerPeerContent{ + { + Site: "1", + Label: "site1", + Status: "Up", + }, + { + Site: "2", + Label: "site1", + Status: "down", + }, + } + return peerlist, nil + } + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.SetClusterManagerStatus(ctx, conditions) if err != nil { t.Errorf("fixture: error in set cluster manager %v", err) } diff --git a/pkg/provisioner/splunk/model/types.go b/pkg/provisioner/splunk/model/types.go new file mode 100644 index 000000000..504948b22 --- /dev/null +++ b/pkg/provisioner/splunk/model/types.go @@ -0,0 +1,15 @@ +package model + +import "time" + +// Result holds the response from a call in the Provsioner API. +type Result struct { + // Dirty indicates whether the splunk object needs to be saved. + Dirty bool + // RequeueAfter indicates how long to wait before making the same + // Provisioner call again. The request should only be requeued if + // Dirty is also true. + RequeueAfter time.Duration + // Any error message produced by the provisioner. + ErrorMessage string +} diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index ff7bcbda1..d3cb9141f 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -5,6 +5,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,5 +23,8 @@ type Factory interface { type Provisioner interface { // SetClusterManagerStatus set cluster manager status - SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) error + SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) + + // CheckClusterManagerHealth + CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) } diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 7757e03a8..60f55f03d 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -21,13 +21,12 @@ import ( "reflect" "time" - "github.com/pkg/errors" enterpriseApi "github.com/splunk/splunk-operator/api/v4" "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" - splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -36,14 +35,27 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +type splunkManager struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher gateway.EventPublisher + // credentials + // gateway factory + provisioner provisioner.Provisioner + // client + client splcommon.ControllerClient +} + // ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager. -func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { +func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -108,7 +120,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, // 1. Initialize the S3Clients based on providers // 2. Check the status of apps on remote storage. if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { - err := initAndCheckAppInfoStatus(ctx, client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + err := initAndCheckAppInfoStatus(ctx, p.client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) if err != nil { eventPublisher.Warning(ctx, "initAndCheckAppInfoStatus", fmt.Sprintf("init and check app info status failed %s", err.Error())) cr.Status.AppContext.IsDeploymentInProgress = false @@ -117,7 +129,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, } // create or update general config resources - namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer) + namespaceScopedSecret, err := ApplySplunkConfig(ctx, p.client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer) if err != nil { scopedLog.Error(err, "create or update general config failed", "error", err.Error()) eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error())) @@ -128,7 +140,7 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, if cr.ObjectMeta.DeletionTimestamp != nil { if cr.Spec.MonitoringConsoleRef.Name != "" { extraEnv, _ := VerifyCMisMultisite(ctx, cr, namespaceScopedSecret) - _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, extraEnv, false) + _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, p.client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, extraEnv, false) if err != nil { return result, err } @@ -235,16 +247,6 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult - err = SetClusterManagerStatus(ctx, client, cr, provisionerFactory) - if err != nil { - scopedLog.Error(err, "error while setting cluster health") - } - - // trigger MonitoringConsole reconcile by changing the splunk/image-tag annotation - err = changeMonitoringConsoleAnnotations(ctx, client, cr) - if err != nil { - return result, err - } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. @@ -255,51 +257,6 @@ func ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, return result, nil } -// SetClusterManagerStatus -func SetClusterManagerStatus(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) error { - eventPublisher, _ := newK8EventPublisher(client, cr) - - defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace()) - defaultSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), defaultSecretObjName) - if err != nil { - eventPublisher.Warning(ctx, "PushManagerAppsBundle", fmt.Sprintf("Could not access default secret object to fetch admin password. Reason %v", err)) - return fmt.Errorf("Could not access default secret object to fetch admin password. Reason %v", err) - } - - //Get the admin password from the secret object - adminPwd, foundSecret := defaultSecret.Data["password"] - if !foundSecret { - eventPublisher.Warning(ctx, "PushManagerAppsBundle", fmt.Sprintf("Could not find admin password ")) - return fmt.Errorf("Could not find admin password ") - } - - service := getSplunkService(ctx, cr, &cr.Spec.CommonSplunkSpec, SplunkClusterManager, false) - - sad := &splunkmodel.SplunkCredentials{ - Address: service.Name, - Port: 8089, - ServicesNamespace: "-", - User: "admin", - App: "-", - CredentialsName: string(adminPwd[:]), - TrustedCAFile: "", - ClientCertificateFile: "", - ClientPrivateKeyFile: "", - DisableCertificateVerification: true, - Namespace: cr.Namespace, - } - prov, err := provisionerFactory.NewProvisioner(ctx, sad, eventPublisher.publishEvent) - if err != nil { - return errors.Wrap(err, "failed to create gateway") - } - err = prov.SetClusterManagerStatus(ctx, &cr.Status.Conditions) - if err != nil { - return errors.Wrap(err, "failed to update cluster manager health status") - } - - return nil -} - // clusterManagerPodManager is used to manage the cluster manager pod type clusterManagerPodManager struct { log logr.Logger diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index f6049109a..6cc06aad9 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/jinzhu/copier" enterpriseApi "github.com/splunk/splunk-operator/api/v4" appsv1 "k8s.io/api/apps/v1" @@ -37,15 +38,40 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" + manager "github.com/splunk/splunk-operator/pkg/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" + //splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" ) +func setCreds(t *testing.T, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) manager.SplunkManager { + ctx := context.TODO() + clusterManager := enterpriseApi.ClusterManager{} + clusterManager.Name = "test" + info := &managermodel.ReconcileInfo{ + TypeMeta: clusterManager.TypeMeta, + CommonSpec: cr.Spec.CommonSplunkSpec, + Client: c, + Log: log.Log, + Namespace: "default", + Name: "clusterManager", + } + copier.Copy(info.MetaObject, cr.ObjectMeta) + publisher := func(ctx context.Context, eventType, reason, message string) {} + mg := NewManagerFactory(true) + manager, err := mg.NewManager(ctx, info, publisher) + if err != nil { + return nil + } + return manager +} + func TestApplyClusterManager(t *testing.T) { // redefining cpmakeTar to return nil always @@ -118,7 +144,8 @@ func TestApplyClusterManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -128,7 +155,8 @@ func TestApplyClusterManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager), splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, ¤t) + _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -139,7 +167,9 @@ func TestApplyClusterManager(t *testing.T) { } c := spltest.NewMockClient() _ = errors.New(splcommon.Rerr) - _, err := ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) + + manager := setCreds(t, c, ¤t) + _, err := manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -205,7 +235,7 @@ func TestApplyClusterManager(t *testing.T) { }, } - _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -221,7 +251,7 @@ func TestApplyClusterManager(t *testing.T) { current.Spec.SmartStore.VolList[0].SecretRef = "s3-secret" current.Status.SmartStore.VolList[0].SecretRef = "s3-secret" current.Status.ResourceRevMap["s3-secret"] = "v2" - _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -235,7 +265,8 @@ func TestApplyClusterManager(t *testing.T) { c.Create(ctx, &cmap) current.Spec.SmartStore.VolList[0].SecretRef = "" current.Spec.SmartStore.Defaults.IndexAndGlobalCommonSpec.VolName = "msos_s2s3_vol" - _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) + manager = setCreds(t, c, ¤t) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err != nil { t.Errorf("Don't expected error here") } @@ -291,7 +322,7 @@ func TestApplyClusterManager(t *testing.T) { }, }, } - _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -308,7 +339,7 @@ func TestApplyClusterManager(t *testing.T) { } rerr := errors.New(splcommon.Rerr) c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err = ApplyClusterManager(ctx, c, ¤t, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -585,7 +616,8 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { } // Without S3 keys, ApplyClusterManager should fail - _, err := ApplyClusterManager(ctx, client, ¤t, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, ¤t) + _, err := manager.ApplyClusterManager(ctx, client, ¤t) if err == nil { t.Errorf("ApplyClusterManager should fail without S3 secrets configured") } @@ -614,7 +646,8 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterManager(context.Background(), c, cr.(*enterpriseApi.ClusterManager), splunkimpl.NewProvisionerFactory(false)) + //manager := setCreds(t, c, ¤t) + _, err := manager.ApplyClusterManager(ctx, client, ¤t) return err } @@ -641,12 +674,12 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManagerWithSmartstore-0", ¤t, revised, createCalls, updateCalls, reconcile, true, secret, &smartstoreConfigMap, ss, pod) current.Status.BundlePushTracker.NeedToPushManagerApps = true - if _, err = ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err != nil { + if _, err = manager.ApplyClusterManager(ctx, client, ¤t); err != nil { t.Errorf("ApplyClusterManager() should not have returned error") } current.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd" - if _, err := ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err == nil { + if _, err = manager.ApplyClusterManager(ctx, client, ¤t); err == nil { t.Errorf("ApplyClusterManager() should have returned error") } @@ -656,7 +689,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { ss.Spec.Replicas = &replicas ss.Spec.Template.Spec.Containers[0].Image = "splunk/splunk" client.AddObject(ss) - if result, err := ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err == nil && !result.Requeue { + if result, err := manager.ApplyClusterManager(ctx, client, ¤t); err == nil && !result.Requeue { t.Errorf("ApplyClusterManager() should have returned error or result.requeue should have been false") } @@ -666,7 +699,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { client.AddObjects(objects) current.Spec.CommonSplunkSpec.Mock = false - if _, err := ApplyClusterManager(context.Background(), client, ¤t, splunkimpl.NewProvisionerFactory(false)); err == nil { + if _, err = manager.ApplyClusterManager(ctx, client, ¤t); err == nil { t.Errorf("ApplyClusterManager() should have returned error") } } @@ -868,7 +901,8 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) { t.Errorf(err.Error()) } - _, err = ApplyClusterManager(context.Background(), client, &cm, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, &cm) + _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") } @@ -963,7 +997,8 @@ func TestApplyCLusterManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyClusterManager(ctx, c, &cm, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, &cm) + _, err = manager.ApplyClusterManager(ctx, c, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") } @@ -1451,7 +1486,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } err = client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, &cm) + _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1536,7 +1572,8 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { debug.PrintStack() } client.Create(ctx, cm) - _, err = ApplyClusterManager(ctx, client, cm, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, cm) + _, err = manager.ApplyClusterManager(ctx, client, cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1677,7 +1714,8 @@ func TestClusterManagerWitReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, clustermanager) - _, err := ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, clustermanager) + _, err := manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for clustermanager with app framework %v", err) debug.PrintStack() @@ -1713,7 +1751,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1831,7 +1869,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index 5c7426c60..32e5f0d4a 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -26,7 +26,6 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -233,7 +232,8 @@ func TestSmartstoreApplyClusterManagerFailsOnInvalidSmartStoreConfig(t *testing. var client splcommon.ControllerClient - _, err := ApplyClusterManager(context.Background(), client, &cr, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, &cr) + _, err := manager.ApplyClusterManager(context.Background(), client, &cr) if err == nil { t.Errorf("ApplyClusterManager should fail on invalid smartstore config") } diff --git a/pkg/splunk/enterprise/factory.go b/pkg/splunk/enterprise/factory.go new file mode 100644 index 000000000..441ecf3b3 --- /dev/null +++ b/pkg/splunk/enterprise/factory.go @@ -0,0 +1,105 @@ +package enterprise + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + + //model "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + splunkprovisionerimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" + manager "github.com/splunk/splunk-operator/pkg/splunk" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + types "github.com/splunk/splunk-operator/pkg/splunk/model" + splutil "github.com/splunk/splunk-operator/pkg/splunk/util" + //cmmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/cluster-manager/model" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkManagerFactory struct { + log logr.Logger + // Gateway Factory + provisionerFactory provisioner.Factory + runInTestMode bool +} + +// NewManagerFactory new manager factory to create manager interface +func NewManagerFactory(runInTestMode bool) manager.Factory { + factory := splunkManagerFactory{} + factory.runInTestMode = runInTestMode + + err := factory.init(runInTestMode) + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkManagerFactory) init(runInTestMode bool) error { + f.provisionerFactory = splunkprovisionerimpl.NewProvisionerFactory(runInTestMode) + return nil +} + +func (f splunkManagerFactory) splunkManager(ctx context.Context, info *types.ReconcileInfo, publisher gateway.EventPublisher) (*splunkManager, error) { + provisionerLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkProvisioner") + + sad := &splunkmodel.SplunkCredentials{} + if !f.runInTestMode { + defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(info.Namespace) + defaultSecret, err := splutil.GetSecretByName(ctx, info.Client, info.Namespace, info.Name, defaultSecretObjName) + if err != nil { + publisher(ctx, "Warning", "splunkManager", fmt.Sprintf("Could not access default secret object to fetch admin password. Reason %v", err)) + return nil, fmt.Errorf("could not access default secret object to fetch admin password. Reason %v", err) + } + + //Get the admin password from the secret object + adminPwd, foundSecret := defaultSecret.Data["password"] + if !foundSecret { + publisher(ctx, "Warning", "splunkManager", fmt.Sprintf("Could not find admin password ")) + return nil, fmt.Errorf("could not find admin password ") + } + + service := getSplunkService(ctx, info.MetaObject, &info.CommonSpec, GetInstantTypeFromKind(info.TypeMeta.Kind), false) + + sad = &splunkmodel.SplunkCredentials{ + Address: service.Name, + Port: 8089, + ServicesNamespace: "-", + User: "admin", + App: "-", + CredentialsName: string(adminPwd[:]), + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + Namespace: info.Namespace, + } + } + provisionerLogger.Info("new splunk manager created to access rest endpoint") + provisioner, err := f.provisionerFactory.NewProvisioner(ctx, sad, publisher) + if err != nil { + return nil, err + } + + newProvisioner := &splunkManager{ + log: f.log, + debugLog: f.log, + publisher: publisher, + provisioner: provisioner, + client: info.Client, + } + + return newProvisioner, nil +} + +// NewProvisioner returns a new Splunk Provisioner using global +// configuration for finding the Splunk services. +func (f splunkManagerFactory) NewManager(ctx context.Context, info *types.ReconcileInfo, publisher gateway.EventPublisher) (manager.SplunkManager, error) { + return f.splunkManager(ctx, info, publisher) +} diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index be984e202..9e6c4490b 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -42,7 +42,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/go-logr/logr" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -1603,7 +1602,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1682,7 +1682,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 4e07f158d..1c331cd43 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -37,7 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -915,7 +914,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -989,7 +989,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go index b2ddb3da6..9daedfca4 100644 --- a/pkg/splunk/enterprise/licensemaster_test.go +++ b/pkg/splunk/enterprise/licensemaster_test.go @@ -37,7 +37,6 @@ import ( "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -925,7 +924,8 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, c, clustermanager) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1004,7 +1004,7 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyClusterManager(ctx, c, clustermanager, splunkimpl.NewProvisionerFactory(false)) + _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 5ba8ff25b..240219f19 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -137,12 +137,6 @@ func ApplyMonitoringConsole(ctx context.Context, client splcommon.ControllerClie return result, err } - // check if the Monitoring Console is ready for version upgrade, if required - continueReconcile, err := isMonitoringConsoleReadyForUpgrade(ctx, client, cr) - if err != nil || !continueReconcile { - return result, err - } - mgr := splctrl.DefaultStatefulSetPodManager{} phase, err := mgr.Update(ctx, client, statefulSet, 1) if err != nil { @@ -363,64 +357,6 @@ func DeleteURLsConfigMap(revised *corev1.ConfigMap, crName string, newURLs []cor } } -// isMonitoringConsoleReadyForUpgrade checks if MonitoringConsole can be upgraded if a version upgrade is in-progress -// No-operation otherwise; returns bool, err accordingly -func isMonitoringConsoleReadyForUpgrade(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.MonitoringConsole) (bool, error) { - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("isMonitoringConsoleReadyForUpgrade").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) - eventPublisher, _ := newK8EventPublisher(c, cr) - - // check if a LicenseManager is attached to the instance - clusterManagerRef := cr.Spec.ClusterManagerRef - if clusterManagerRef.Name == "" { - return true, nil - } - - namespacedName := types.NamespacedName{ - Namespace: cr.GetNamespace(), - Name: GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetName()), - } - - // check if the stateful set is created at this instance - statefulSet := &appsv1.StatefulSet{} - err := c.Get(ctx, namespacedName, statefulSet) - if err != nil && k8serrors.IsNotFound(err) { - return true, nil - } - - namespacedName = types.NamespacedName{Namespace: cr.GetNamespace(), Name: clusterManagerRef.Name} - clusterManager := &enterpriseApi.ClusterManager{} - - // get the cluster manager referred in monitoring console - err = c.Get(ctx, namespacedName, clusterManager) - if err != nil { - eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not find the Cluster Manager. Reason %v", err)) - scopedLog.Error(err, "Unable to get clusterManager") - return true, err - } - - cmImage, err := getCurrentImage(ctx, c, cr, SplunkClusterManager) - if err != nil { - eventPublisher.Warning(ctx, "isMonitoringConsoleReadyForUpgrade", fmt.Sprintf("Could not get the Cluster Manager Image. Reason %v", err)) - scopedLog.Error(err, "Unable to get clusterManager current image") - return false, err - } - - mcImage, err := getCurrentImage(ctx, c, cr, SplunkMonitoringConsole) - if err != nil { - eventPublisher.Warning(ctx, "isMonitoringConsolerReadyForUpgrade", fmt.Sprintf("Could not get the Monitoring Console Image. Reason %v", err)) - scopedLog.Error(err, "Unable to get monitoring console current image") - return false, err - } - - // check if an image upgrade is happening and whether the ClusterManager is ready for the upgrade - if (cr.Spec.Image != mcImage) && (clusterManager.Status.Phase != enterpriseApi.PhaseReady || cmImage != cr.Spec.Image) { - return false, nil - } - - return true, nil -} - // changeMonitoringConsoleAnnotations updates the splunk/image-tag field of the MonitoringConsole annotations to trigger the reconcile loop // on update, and returns error if something is wrong. func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) error { diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index b4b4b93bc..b68cce9d4 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -34,7 +34,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -1102,96 +1101,6 @@ func TestGetMonitoringConsoleList(t *testing.T) { } } -func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { - ctx := context.TODO() - - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - // Create Cluster Manager - cm := enterpriseApi.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - MonitoringConsoleRef: corev1.ObjectReference{ - Name: "test", - }, - }, - }, - } - - err := client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm, splunkimpl.NewProvisionerFactory(false)) - if err != nil { - t.Errorf("applyClusterManager should not have returned error; err=%v", err) - } - cm.Status.Phase = enterpriseApi.PhaseReady - err = client.Status().Update(ctx, &cm) - if err != nil { - t.Errorf("Unexpected status update %v", err) - debug.PrintStack() - } - - // Create Monitoring Console - mc := enterpriseApi.MonitoringConsole{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.MonitoringConsoleSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - Image: "splunk/splunk:latest", - }, - Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ - Name: "test", - }, - }, - }, - } - - err = client.Create(ctx, &mc) - _, err = ApplyMonitoringConsole(ctx, client, &mc) - if err != nil { - t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) - } - - mc.Spec.Image = "splunk2" - cm.Spec.Image = "splunk2" - _, err = ApplyClusterManager(ctx, client, &cm, splunkimpl.NewProvisionerFactory(false)) - - monitoringConsole := &enterpriseApi.MonitoringConsole{} - namespacedName := types.NamespacedName{ - Name: cm.Name, - Namespace: cm.Namespace, - } - err = client.Get(ctx, namespacedName, monitoringConsole) - if err != nil { - t.Errorf("isMonitoringConsoleReadyForUpgrade should not have returned error=%v", err) - } - - check, err := isMonitoringConsoleReadyForUpgrade(ctx, client, monitoringConsole) - - if err != nil { - t.Errorf("Unexpected upgradeScenario error %v", err) - } - - if !check { - t.Errorf("isMonitoringConsoleReadyForUpgrade: MC should be ready for upgrade") - } -} - func TestChangeMonitoringConsoleAnnotations(t *testing.T) { ctx := context.TODO() @@ -1236,7 +1145,8 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { // Create the instances client.Create(ctx, cm) - _, err := ApplyClusterManager(ctx, client, cm, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, cm) + _, err := manager.ApplyClusterManager(ctx, client, cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 557272168..ee6511159 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -76,6 +76,28 @@ const ( TmpAppDownloadDir string = "/tmp/appframework/" ) +func GetInstantTypeFromKind(kind string) InstanceType { + switch kind { + case "ClusterManager": + return SplunkClusterManager + case "ClusterMaster": + return SplunkClusterManager + case "LicenseMaster": + return SplunkLicenseManager + case "LicenseManager": + return SplunkLicenseManager + case "IndexerCluster": + return SplunkIndexer + case "MonitoringConsole": + return SplunkMonitoringConsole + case "SearchHeadCluster": + return SplunkSearchHead + case "Standalone": + return SplunkStandalone + } + return SplunkClusterManager +} + type commonResourceTracker struct { // mutex to serialize the access to commonResourceTracker mutex sync.Mutex diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 302ce15fe..f9a0d1a91 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -39,7 +39,6 @@ import ( enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splunkimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -3173,7 +3172,8 @@ func TestGetCurrentImage(t *testing.T) { utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) err := client.Create(ctx, ¤t) - _, err = ApplyClusterManager(ctx, client, ¤t, splunkimpl.NewProvisionerFactory(false)) + manager := setCreds(t, client, ¤t) + _, err = manager.ApplyClusterManager(ctx, client, ¤t) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } diff --git a/pkg/splunk/manager.go b/pkg/splunk/manager.go index 07e7ba442..aa2efa4e1 100644 --- a/pkg/splunk/manager.go +++ b/pkg/splunk/manager.go @@ -5,13 +5,23 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterpriseApi "github.com/splunk/splunk-operator/api/v3" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + types "github.com/splunk/splunk-operator/pkg/splunk/model" ) -type Factory func(client splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster, gatewayFactory gateway.Factory) (SplunkManager, error) +type Factory interface { + NewManager(ctx context.Context, info *types.ReconcileInfo, publisher gateway.EventPublisher) (SplunkManager, error) +} type SplunkManager interface { - ApplyClusterManager(ctx context.Context) (reconcile.Result, error) + ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) + //ApplyClusterMaster(ctx context.Context, cr *enterpriseApiV3.ClusterMaster) (reconcile.Result, error) + //ApplyIndexerClusterManager(ctx context.Context, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) + //ApplyMonitoringConsole(ctx context.Context, cr *enterpriseApi.MonitoringConsole) (reconcile.Result, error) + //ApplySearchHeadCluster(ctx context.Context, cr *enterpriseApi.SearchHeadCluster) (reconcile.Result, error) + //ApplyStandalone(ctx context.Context, cr *enterpriseApi.Standalone) (reconcile.Result, error) + //ApplyLicenseManager(ctx context.Context, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) + //ApplyLicenseMaster(ctx context.Context, cr *enterpriseApiV3.LicenseMaster) (reconcile.Result, error) } diff --git a/pkg/splunk/model/types.go b/pkg/splunk/model/types.go new file mode 100644 index 000000000..0bc4bf1cc --- /dev/null +++ b/pkg/splunk/model/types.go @@ -0,0 +1,26 @@ +package model + +import ( + "github.com/go-logr/logr" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +// Instead of passing a zillion arguments to the action of a phase, +// hold them in a context +type ReconcileInfo struct { + TypeMeta metav1.TypeMeta + MetaObject splcommon.MetaObject + CommonSpec enterpriseApi.CommonSplunkSpec + Client splcommon.ControllerClient + Log logr.Logger + Namespace string + Name string + Request ctrl.Request + Events []corev1.Event + ErrorMessage string + PostSaveCallbacks []func() +} From bfca12a7a69b77de7b0419158cd2f4984b135c69 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 20 Jul 2023 12:38:22 -0700 Subject: [PATCH 66/85] changed test cases for controller --- controllers/clustermanager_controller.go | 4 ++-- controllers/clustermanager_controller_test.go | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index 701ecba5e..10587d526 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -106,7 +106,7 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyClusterManager(ctx, r.Client, instance, r.ProvisionerFactory) + result, err := ApplyClusterManager(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -115,7 +115,7 @@ func (r *ClusterManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque } // ApplyClusterManager adding to handle unit test case -var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { +var ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { // match the provisioner.EventPublisher interface publishEvent := func(ctx context.Context, eventType, reason, message string) { instance.NewEvent(eventType, reason, message) diff --git a/controllers/clustermanager_controller_test.go b/controllers/clustermanager_controller_test.go index a8c356e4e..d24c181c7 100644 --- a/controllers/clustermanager_controller_test.go +++ b/controllers/clustermanager_controller_test.go @@ -5,7 +5,6 @@ import ( "fmt" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" "time" @@ -36,7 +35,7 @@ var _ = Describe("ClusterManager Controller", func() { It("Get ClusterManager custom resource should failed", func() { namespace := "ns-splunk-cm-1" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -52,7 +51,7 @@ var _ = Describe("ClusterManager Controller", func() { It("Create ClusterManager custom resource with annotations should pause", func() { namespace := "ns-splunk-cm-2" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -72,7 +71,7 @@ var _ = Describe("ClusterManager Controller", func() { Context("ClusterManager Management", func() { It("Create ClusterManager custom resource should succeeded", func() { namespace := "ns-splunk-cm-3" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -85,7 +84,7 @@ var _ = Describe("ClusterManager Controller", func() { It("Cover Unused methods", func() { namespace := "ns-splunk-cm-4" - ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager, provisionerFactory provisioner.Factory) (reconcile.Result, error) { + ApplyClusterManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.ClusterManager) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} From 070fbeeac2344df7a22381ff8620dadbc101d6ee Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 20 Jul 2023 13:29:09 -0700 Subject: [PATCH 67/85] fixed test case --- pkg/splunk/enterprise/clustermanager_test.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 6cc06aad9..47f69996c 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -531,7 +531,6 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-manager"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-manager-0"}, {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, - {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v4.ClusterManager-test-stack1"}, {MetaName: "*v4.ClusterManager-test-stack1"}, } @@ -564,14 +563,9 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { runtime.InNamespace("test"), runtime.MatchingLabels(labels), } - listOpts1 := []runtime.ListOption{ - runtime.InNamespace("test"), - } listmockCall := []spltest.MockFuncCall{ - {ListOpts: listOpts}, - {ListOpts: listOpts1}, - } - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0], listmockCall[1]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} + {ListOpts: listOpts}} + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[7], funcCalls[10], funcCalls[12]}, "List": {listmockCall[0], listmockCall[0]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[13]}} updateCalls := map[string][]spltest.MockFuncCall{"Get": updateFuncCalls, "Update": {funcCalls[8]}, "List": {listmockCall[0]}} current := enterpriseApi.ClusterManager{ @@ -647,7 +641,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { //manager := setCreds(t, c, ¤t) - _, err := manager.ApplyClusterManager(ctx, client, ¤t) + _, err := manager.ApplyClusterManager(ctx, c, ¤t) return err } From 619e21f0987eb3f1004deb1d5c0c2dfd796dd018 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 20 Jul 2023 15:31:53 -0700 Subject: [PATCH 68/85] remove mc upgrade code --- pkg/splunk/enterprise/monitoringconsole.go | 3 - .../enterprise/monitoringconsole_test.go | 81 ------------------- 2 files changed, 84 deletions(-) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 240219f19..9b9b1f534 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -391,9 +391,6 @@ func changeMonitoringConsoleAnnotations(ctx context.Context, client splcommon.Co } return err } - if len(objectList.Items) == 0 { - return nil - } // check if instance has the required ClusterManagerRef for _, mc := range objectList.Items { diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index b68cce9d4..72efd15a7 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1100,84 +1100,3 @@ func TestGetMonitoringConsoleList(t *testing.T) { t.Errorf("Got wrong number of IndexerCluster objects. Expected=%d, Got=%d", 1, numOfObjects) } } - -func TestChangeMonitoringConsoleAnnotations(t *testing.T) { - ctx := context.TODO() - - builder := fake.NewClientBuilder() - client := builder.Build() - utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) - - // define CM and MC - cm := &enterpriseApi.ClusterManager{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.ClusterManagerSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - }, - Volumes: []corev1.Volume{}, - }, - }, - } - - mc := &enterpriseApi.MonitoringConsole{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "test", - }, - Spec: enterpriseApi.MonitoringConsoleSpec{ - CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Spec: enterpriseApi.Spec{ - ImagePullPolicy: "Always", - }, - Volumes: []corev1.Volume{}, - ClusterManagerRef: corev1.ObjectReference{ - Name: "test", - }, - }, - }, - } - cm.Spec.Image = "splunk/splunk:latest" - - // Create the instances - client.Create(ctx, cm) - manager := setCreds(t, client, cm) - _, err := manager.ApplyClusterManager(ctx, client, cm) - if err != nil { - t.Errorf("applyClusterManager should not have returned error; err=%v", err) - } - cm.Status.Phase = enterpriseApi.PhaseReady - err = client.Status().Update(ctx, cm) - if err != nil { - t.Errorf("Unexpected update pod %v", err) - debug.PrintStack() - } - client.Create(ctx, mc) - _, err = ApplyMonitoringConsole(ctx, client, mc) - if err != nil { - t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) - } - - err = changeMonitoringConsoleAnnotations(ctx, client, cm) - if err != nil { - t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) - } - monitoringConsole := &enterpriseApi.MonitoringConsole{} - namespacedName := types.NamespacedName{ - Name: cm.Name, - Namespace: cm.Namespace, - } - err = client.Get(ctx, namespacedName, monitoringConsole) - if err != nil { - t.Errorf("changeMonitoringConsoleAnnotations should not have returned error=%v", err) - } - - annotations := monitoringConsole.GetAnnotations() - if annotations["splunk/image-tag"] != cm.Spec.Image { - t.Errorf("changeMonitoringConsoleAnnotations should have set the checkUpdateImage annotation field to the current image") - } -} From 5d5851a78434254bf70c222992682df10ad1da30 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Thu, 20 Jul 2023 15:34:46 -0700 Subject: [PATCH 69/85] fixed cm test (only one) --- pkg/splunk/enterprise/clustermanager_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 47f69996c..6279aac6b 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -640,8 +640,8 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - //manager := setCreds(t, c, ¤t) - _, err := manager.ApplyClusterManager(ctx, c, ¤t) + //manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } From 28694aea110f0d822b073f4672bffc1bd7cd6f18 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Fri, 21 Jul 2023 11:56:06 -0700 Subject: [PATCH 70/85] fixed cm test --- pkg/splunk/enterprise/clustermanager.go | 7 +++---- pkg/splunk/enterprise/clustermanager_test.go | 2 ++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 60f55f03d..cd04daa36 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -56,7 +56,6 @@ type splunkManager struct { // ApplyClusterManager reconciles the state of a Splunk Enterprise cluster manager. func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { - // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ Requeue: true, @@ -120,7 +119,7 @@ func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommo // 1. Initialize the S3Clients based on providers // 2. Check the status of apps on remote storage. if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { - err := initAndCheckAppInfoStatus(ctx, p.client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + err := initAndCheckAppInfoStatus(ctx, client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) if err != nil { eventPublisher.Warning(ctx, "initAndCheckAppInfoStatus", fmt.Sprintf("init and check app info status failed %s", err.Error())) cr.Status.AppContext.IsDeploymentInProgress = false @@ -129,7 +128,7 @@ func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommo } // create or update general config resources - namespaceScopedSecret, err := ApplySplunkConfig(ctx, p.client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer) + namespaceScopedSecret, err := ApplySplunkConfig(ctx, client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer) if err != nil { scopedLog.Error(err, "create or update general config failed", "error", err.Error()) eventPublisher.Warning(ctx, "ApplySplunkConfig", fmt.Sprintf("create or update general config failed with error %s", err.Error())) @@ -140,7 +139,7 @@ func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommo if cr.ObjectMeta.DeletionTimestamp != nil { if cr.Spec.MonitoringConsoleRef.Name != "" { extraEnv, _ := VerifyCMisMultisite(ctx, cr, namespaceScopedSecret) - _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, p.client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, extraEnv, false) + _, err = ApplyMonitoringConsoleEnvConfigMap(ctx, client, cr.GetNamespace(), cr.GetName(), cr.Spec.MonitoringConsoleRef.Name, extraEnv, false) if err != nil { return result, err } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 6279aac6b..e2e00b5c0 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -146,6 +146,7 @@ func TestApplyClusterManager(t *testing.T) { reconcile := func(c *spltest.MockClient, cr interface{}) error { manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + //_, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -157,6 +158,7 @@ func TestApplyClusterManager(t *testing.T) { deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { manager := setCreds(t, c, ¤t) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) + //_, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return true, err } splunkDeletionTester(t, revised, deleteFunc) From c5bd73766cbf904130a290b30c72b3ccb24ad94e Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Mon, 24 Jul 2023 12:52:02 -0700 Subject: [PATCH 71/85] adding conditions and types to all the fields --- api/v4/clustermanager_types.go | 3 + api/v4/indexercluster_types.go | 6 ++ api/v4/licensemanager_types.go | 6 ++ api/v4/monitoringconsole_types.go | 6 ++ api/v4/searchheadcluster_types.go | 6 ++ api/v4/standalone_types.go | 6 ++ api/v4/zz_generated.deepcopy.go | 59 ++++++++++++-- ...enterprise.splunk.com_clustermanagers.yaml | 75 ++++++++++++++++- .../enterprise.splunk.com_clustermasters.yaml | 2 +- ...enterprise.splunk.com_indexerclusters.yaml | 80 ++++++++++++++++++- ...enterprise.splunk.com_licensemanagers.yaml | 75 ++++++++++++++++- .../enterprise.splunk.com_licensemasters.yaml | 2 +- ...erprise.splunk.com_monitoringconsoles.yaml | 75 ++++++++++++++++- ...erprise.splunk.com_searchheadclusters.yaml | 75 ++++++++++++++++- .../enterprise.splunk.com_standalones.yaml | 75 ++++++++++++++++- controllers/clustermanager_controller.go | 2 +- pkg/splunk/enterprise/clustermanager.go | 10 +++ pkg/splunk/enterprise/clustermanager_test.go | 8 +- pkg/splunk/enterprise/factory.go | 2 +- pkg/splunk/model/types.go | 3 +- 20 files changed, 553 insertions(+), 23 deletions(-) diff --git a/api/v4/clustermanager_types.go b/api/v4/clustermanager_types.go index dd9b5a53d..a3e5d1587 100644 --- a/api/v4/clustermanager_types.go +++ b/api/v4/clustermanager_types.go @@ -70,6 +70,9 @@ type ClusterManagerStatus struct { // Conditions represent the latest available observations of an object's state Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // BundlePushInfo Indicates if bundle push required diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 60c39f46f..77e9048e8 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -61,6 +61,12 @@ type IndexerClusterMemberStatus struct { // Flag indicating if this peer belongs to the current committed generation and is searchable. Searchable bool `json:"is_searchable"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster diff --git a/api/v4/licensemanager_types.go b/api/v4/licensemanager_types.go index 29a0afa9a..cce30c5e5 100644 --- a/api/v4/licensemanager_types.go +++ b/api/v4/licensemanager_types.go @@ -52,6 +52,12 @@ type LicenseManagerStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/monitoringconsole_types.go b/api/v4/monitoringconsole_types.go index 7a4a9ad4a..05adb4475 100644 --- a/api/v4/monitoringconsole_types.go +++ b/api/v4/monitoringconsole_types.go @@ -58,6 +58,12 @@ type MonitoringConsoleStatus struct { // App Framework status AppContext AppDeploymentContext `json:"appContext,omitempty"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/searchheadcluster_types.go b/api/v4/searchheadcluster_types.go index db263cf92..310fa5122 100644 --- a/api/v4/searchheadcluster_types.go +++ b/api/v4/searchheadcluster_types.go @@ -44,6 +44,12 @@ type SearchHeadClusterSpec struct { // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // SearchHeadClusterMemberStatus is used to track the status of each search head cluster member diff --git a/api/v4/standalone_types.go b/api/v4/standalone_types.go index 44bedc5fa..d67a84e74 100644 --- a/api/v4/standalone_types.go +++ b/api/v4/standalone_types.go @@ -46,6 +46,12 @@ type StandaloneSpec struct { // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Conditions represent the latest available observations of an object's state + Conditions []metav1.Condition `json:"conditions"` + + // ErrorMessage shows current error if there are any + ErrorMessage string `json:"errorMessage"` } // StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index b8240b5a6..357313909 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -22,7 +22,8 @@ limitations under the License. package v4 import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -287,6 +288,13 @@ func (in *ClusterManagerStatus) DeepCopyInto(out *ClusterManagerStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterManagerStatus. @@ -307,7 +315,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { out.VarVolumeStorageConfig = in.VarVolumeStorageConfig if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -319,7 +327,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { out.MonitoringConsoleRef = in.MonitoringConsoleRef if in.ExtraEnv != nil { in, out := &in.ExtraEnv, &out.ExtraEnv - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -341,7 +349,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { } if in.ImagePullSecrets != nil { in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]v1.LocalObjectReference, len(*in)) + *out = make([]corev1.LocalObjectReference, len(*in)) copy(*out, *in) } } @@ -496,6 +504,13 @@ func (in *IndexerClusterList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IndexerClusterMemberStatus) DeepCopyInto(out *IndexerClusterMemberStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterMemberStatus. @@ -542,7 +557,9 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { if in.Peers != nil { in, out := &in.Peers, &out.Peers *out = make([]IndexerClusterMemberStatus, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } } @@ -636,6 +653,13 @@ func (in *LicenseManagerSpec) DeepCopy() *LicenseManagerSpec { func (in *LicenseManagerStatus) DeepCopyInto(out *LicenseManagerStatus) { *out = *in in.AppContext.DeepCopyInto(&out.AppContext) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerStatus. @@ -736,6 +760,13 @@ func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleStatus. @@ -873,6 +904,13 @@ func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. @@ -956,7 +994,7 @@ func (in *Spec) DeepCopyInto(out *Spec) { in.Affinity.DeepCopyInto(&out.Affinity) if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -965,7 +1003,7 @@ func (in *Spec) DeepCopyInto(out *Spec) { in.ServiceTemplate.DeepCopyInto(&out.ServiceTemplate) if in.TopologySpreadConstraints != nil { in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) + *out = make([]corev1.TopologySpreadConstraint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1047,6 +1085,13 @@ func (in *StandaloneSpec) DeepCopyInto(out *StandaloneSpec) { in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.SmartStore.DeepCopyInto(&out.SmartStore) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneSpec. diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml index 7d3f80b3a..2f5aefdd3 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: clustermanagers.enterprise.splunk.com spec: @@ -4235,6 +4235,79 @@ spec: needToPushMasterApps: type: boolean type: object + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string phase: description: current phase of the cluster manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml index 05097a4fc..657276f57 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: clustermasters.enterprise.splunk.com spec: diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 45bb80792..533537e5e 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: indexerclusters.enterprise.splunk.com spec: @@ -7531,6 +7531,84 @@ spec: all indexes. format: int64 type: integer + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string guid: description: Unique identifier or GUID for the peer type: string diff --git a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml index 1fedf29e1..6ec984cb0 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: licensemanagers.enterprise.splunk.com spec: @@ -4098,6 +4098,79 @@ spec: description: App Framework version info for future use type: integer type: object + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string phase: description: current phase of the license manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml index 130bfc599..4468b2b8b 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: licensemasters.enterprise.splunk.com spec: diff --git a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml index 8330ef252..c207ae0d7 100644 --- a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml +++ b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: monitoringconsoles.enterprise.splunk.com spec: @@ -8236,6 +8236,79 @@ spec: needToPushMasterApps: type: boolean type: object + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + errorMessage: + description: ErrorMessage shows current error if there are any + type: string phase: description: current phase of the monitoring console enum: diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index 9f0990ae6..df0eddd4d 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: searchheadclusters.enterprise.splunk.com spec: @@ -5330,6 +5330,76 @@ spec: type: string type: object x-kubernetes-map-type: atomic + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array defaults: description: Inline map of default.yml overrides used to initialize the environment @@ -5344,6 +5414,9 @@ spec: be installed on the CM, standalone, search head deployer or license manager instance. type: string + errorMessage: + description: ErrorMessage shows current error if there are any + type: string etcVolumeStorageConfig: description: Storage configuration for /opt/splunk/etc volume properties: diff --git a/config/crd/bases/enterprise.splunk.com_standalones.yaml b/config/crd/bases/enterprise.splunk.com_standalones.yaml index 88a5a1956..e43344242 100644 --- a/config/crd/bases/enterprise.splunk.com_standalones.yaml +++ b/config/crd/bases/enterprise.splunk.com_standalones.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.3 + controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null name: standalones.enterprise.splunk.com spec: @@ -5482,6 +5482,76 @@ spec: type: string type: object x-kubernetes-map-type: atomic + conditions: + description: Conditions represent the latest available observations + of an object's state + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array defaults: description: Inline map of default.yml overrides used to initialize the environment @@ -5496,6 +5566,9 @@ spec: be installed on the CM, standalone, search head deployer or license manager instance. type: string + errorMessage: + description: ErrorMessage shows current error if there are any + type: string etcVolumeStorageConfig: description: Storage configuration for /opt/splunk/etc volume properties: diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index 10587d526..50ed8ad93 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -121,7 +121,7 @@ var ApplyClusterManager = func(ctx context.Context, client client.Client, instan instance.NewEvent(eventType, reason, message) } info := &managermodel.ReconcileInfo{ - TypeMeta: instance.TypeMeta, + Kind: instance.Kind, CommonSpec: instance.Spec.CommonSplunkSpec, Client: client, Log: log.FromContext(ctx), diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index cd04daa36..a1045d2f1 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -28,6 +28,7 @@ import ( "github.com/go-logr/logr" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -247,6 +248,15 @@ func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommo finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult } + + // Verification of splunk instance update CR status + // We are using Conditions to update status information + provResult := provmodel.Result{} + provResult, err = p.provisioner.SetClusterManagerStatus(ctx, &cr.Status.Conditions) + if err != nil { + cr.Status.ErrorMessage = provResult.ErrorMessage + } + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. if !result.Requeue { diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index e2e00b5c0..85ca6f0eb 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -55,12 +55,12 @@ func setCreds(t *testing.T, c splcommon.ControllerClient, cr *enterpriseApi.Clus clusterManager := enterpriseApi.ClusterManager{} clusterManager.Name = "test" info := &managermodel.ReconcileInfo{ - TypeMeta: clusterManager.TypeMeta, + Kind: cr.Kind, CommonSpec: cr.Spec.CommonSplunkSpec, Client: c, Log: log.Log, - Namespace: "default", - Name: "clusterManager", + Namespace: cr.Namespace, + Name: cr.Name, } copier.Copy(info.MetaObject, cr.ObjectMeta) publisher := func(ctx context.Context, eventType, reason, message string) {} @@ -642,7 +642,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - //manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } diff --git a/pkg/splunk/enterprise/factory.go b/pkg/splunk/enterprise/factory.go index 441ecf3b3..744ffe27e 100644 --- a/pkg/splunk/enterprise/factory.go +++ b/pkg/splunk/enterprise/factory.go @@ -65,7 +65,7 @@ func (f splunkManagerFactory) splunkManager(ctx context.Context, info *types.Rec return nil, fmt.Errorf("could not find admin password ") } - service := getSplunkService(ctx, info.MetaObject, &info.CommonSpec, GetInstantTypeFromKind(info.TypeMeta.Kind), false) + service := getSplunkService(ctx, info.MetaObject, &info.CommonSpec, GetInstantTypeFromKind(info.Kind), false) sad = &splunkmodel.SplunkCredentials{ Address: service.Name, diff --git a/pkg/splunk/model/types.go b/pkg/splunk/model/types.go index 0bc4bf1cc..0b4e7909e 100644 --- a/pkg/splunk/model/types.go +++ b/pkg/splunk/model/types.go @@ -5,14 +5,13 @@ import ( enterpriseApi "github.com/splunk/splunk-operator/api/v4" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" ) // Instead of passing a zillion arguments to the action of a phase, // hold them in a context type ReconcileInfo struct { - TypeMeta metav1.TypeMeta + Kind string MetaObject splcommon.MetaObject CommonSpec enterpriseApi.CommonSplunkSpec Client splcommon.ControllerClient From e6928115bd8e77023da9951a37e3bfc5778eb260 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Mon, 24 Jul 2023 12:58:49 -0700 Subject: [PATCH 72/85] added resty for http client Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- go.mod | 8 ++++---- go.sum | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index c18eaa6ce..82b42825c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/jinzhu/copier v0.3.5 github.com/minio/minio-go/v7 v7.0.16 github.com/onsi/ginkgo/v2 v2.11.0 - github.com/onsi/gomega v1.27.8 + github.com/onsi/gomega v1.27.10 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/wk8/go-ordered-map/v2 v2.1.7 @@ -71,11 +71,11 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.10.0 // indirect + golang.org/x/net v0.12.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.9.0 // indirect + golang.org/x/sys v0.10.0 // indirect golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.9.0 // indirect + golang.org/x/text v0.11.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.9.3 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect diff --git a/go.sum b/go.sum index 508085e36..1519f619e 100644 --- a/go.sum +++ b/go.sum @@ -262,6 +262,8 @@ github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -433,6 +435,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -446,6 +450,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From adb2b160cc04c2e3642553766a5fe12410cf28b5 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 25 Jul 2023 23:24:34 -0700 Subject: [PATCH 73/85] fixed test case Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- .../splunk/services/fixture/fixture.go | 71 +++++++++++++++++-- 1 file changed, 66 insertions(+), 5 deletions(-) diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index edfdfa224..760386ee1 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -2,6 +2,10 @@ package fixture import ( "context" + "fmt" + "os" + + "path/filepath" //"encoding/json" "io/ioutil" @@ -39,6 +43,37 @@ type fixtureGateway struct { state *Fixture } +func findFixturePath() (string, error) { + ext := ".env" + wd, err := os.Getwd() + if err != nil { + return "", err + } + for { + dir, err := os.Open(wd) + if err != nil { + fmt.Println("Error opening directory:", err) + return "", err + } + defer dir.Close() + + files, err := dir.Readdir(-1) + if err != nil { + fmt.Println("Error reading directory:", err) + return "", err + } + + for _, file := range files { + if file.Name() == ext { + wd, err = filepath.Abs(wd) + wd += "/pkg/gateway/splunk/services/fixture/" + return wd, err + } + } + wd += "/.." + } +} + // Fixture contains persistent state for a particular splunk instance type Fixture struct { } @@ -60,7 +95,12 @@ func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredent func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managermodel.ClusterManagerInfoContent, error) { // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("../../../gateway/splunk/services/fixture/cluster_config.json") + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -102,9 +142,14 @@ func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managerm // GetClusterManagerPeersAccess cluster manager peers. // endpoint: https://:/services/cluster/manager/peers func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]managermodel.ClusterManagerPeerContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("../../../gateway/splunk/services/fixture/cluster_config.json") + content, err := ioutil.ReadFile(relativePath + "cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -150,9 +195,15 @@ func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]manager // // endpoint: https://:/services/cluster/manager/health func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]managermodel.ClusterManagerHealthContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("../../../gateway/splunk/services/fixture/cluster_config.json") + content, err := ioutil.ReadFile(relativePath + "cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -196,9 +247,14 @@ func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]manage // list List available cluster sites. // endpoint: https://:/services/cluster/manager/sites func (p *fixtureGateway) GetClusterManagerSites(ctx context.Context) (*[]managermodel.ClusterManagerSiteContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("cluster_config.json") + content, err := ioutil.ReadFile(relativePath + "/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -240,9 +296,14 @@ func (p *fixtureGateway) GetClusterManagerSites(ctx context.Context) (*[]manager // GetClusterManagerSearchHeadStatus Endpoint to get searchheads connected to cluster manager. // endpoint: https://:/services/cluster/manager/status func (p *fixtureGateway) GetClusterManagerStatus(ctx context.Context) (*[]managermodel.ClusterManagerStatusContent, error) { + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile("cluster_manager_status.json") + content, err := ioutil.ReadFile(relativePath + "/cluster_manager_status.json") if err != nil { log.Error(err, "fixture: error in get cluster manager search heads") return nil, err From 64c601f8b2927d9f97d6b98210d840479a61b9ed Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Wed, 26 Jul 2023 08:47:47 -0700 Subject: [PATCH 74/85] fixed meta object argument Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- controllers/clustermanager_controller.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index 50ed8ad93..aea3dce5c 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -18,7 +18,7 @@ package controllers import ( "context" - "github.com/jinzhu/copier" + //"github.com/jinzhu/copier" enterpriseApi "github.com/splunk/splunk-operator/api/v4" "time" @@ -127,8 +127,9 @@ var ApplyClusterManager = func(ctx context.Context, client client.Client, instan Log: log.FromContext(ctx), Namespace: instance.GetNamespace(), Name: instance.GetName(), + MetaObject: instance, } - copier.Copy(info.MetaObject, instance.ObjectMeta) + //copier.Copy(info.MetaObject, instance.ObjectMeta) mg := enterprise.NewManagerFactory(false) manager, err := mg.NewManager(ctx, info, publishEvent) if err != nil { From 37b1f6ef328d7a1a348fd02dc47a399981dca684 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Wed, 26 Jul 2023 08:48:10 -0700 Subject: [PATCH 75/85] fixed meta object argument Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- controllers/clustermanager_controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/clustermanager_controller.go b/controllers/clustermanager_controller.go index aea3dce5c..859474d3a 100644 --- a/controllers/clustermanager_controller.go +++ b/controllers/clustermanager_controller.go @@ -127,7 +127,7 @@ var ApplyClusterManager = func(ctx context.Context, client client.Client, instan Log: log.FromContext(ctx), Namespace: instance.GetNamespace(), Name: instance.GetName(), - MetaObject: instance, + MetaObject: instance, } //copier.Copy(info.MetaObject, instance.ObjectMeta) mg := enterprise.NewManagerFactory(false) From b6620b6802a9e6518a5a1ff399ad047ef85be615 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 27 Jul 2023 12:52:09 -0700 Subject: [PATCH 76/85] Added maintenance mode --- .../model/services/cluster/url_types.go | 2 + .../services/fixture/cluster_maintenance.json | 48 +++++++++++++++ .../splunk/services/fixture/fixture.go | 47 +++++++++++++++ pkg/gateway/splunk/services/gateway.go | 5 ++ .../implementation/cluster_manager_impl.go | 30 ++++++++++ .../services/implementation/splunk_test.go | 22 +++++++ .../splunk/implementation/splunk.go | 4 ++ pkg/provisioner/splunk/provisioner.go | 2 + pkg/splunk/enterprise/indexercluster.go | 13 +++- pkg/splunk/enterprise/indexercluster_test.go | 59 +++++++++++++++---- pkg/splunk/manager.go | 2 +- 11 files changed, 221 insertions(+), 13 deletions(-) create mode 100644 pkg/gateway/splunk/services/fixture/cluster_maintenance.json diff --git a/pkg/gateway/splunk/model/services/cluster/url_types.go b/pkg/gateway/splunk/model/services/cluster/url_types.go index 59281f6ac..f7e1fe966 100644 --- a/pkg/gateway/splunk/model/services/cluster/url_types.go +++ b/pkg/gateway/splunk/model/services/cluster/url_types.go @@ -30,4 +30,6 @@ const ( GetSearchHeadCaptainInfoUrl = "/services/shcluster/captain/info" GetClusterManagerStatusUrl = "/services/cluster/manager/status" + + SetClusterInMaintenanceMode = "/services/cluster/manager/control/default/maintenance" ) diff --git a/pkg/gateway/splunk/services/fixture/cluster_maintenance.json b/pkg/gateway/splunk/services/fixture/cluster_maintenance.json new file mode 100644 index 000000000..020c88b2f --- /dev/null +++ b/pkg/gateway/splunk/services/fixture/cluster_maintenance.json @@ -0,0 +1,48 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/control/default/maintenance", + "updated": "2022-07-18T23:54:03+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/control/default/maintenance/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/control/default/maintenance/master", + "list": "/services/cluster/manager/control/default/maintenance/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index 760386ee1..1ce569dee 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "strconv" "path/filepath" @@ -341,3 +342,49 @@ func (p *fixtureGateway) GetClusterManagerStatus(ctx context.Context) (*[]manage } return &contentList, nil } + +// SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. +// Post the status of a rolling restart. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *fixtureGateway) SetClusterInMaintenanceMode(context context.Context, mode bool) error { + + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return err + } + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + content, err := ioutil.ReadFile(relativePath + "/cluster_maintenance.json") + if err != nil { + log.Error(err, "fixture: error in post cluster maintenance") + return err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := clustermodel.SetClusterInMaintenanceMode + httpmock.RegisterResponder("POST", fakeUrl, responder) + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + resp, err := p.client.R(). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "mode": strconv.FormatBool(mode)}). + Post(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return splunkError + } + + return err +} diff --git a/pkg/gateway/splunk/services/gateway.go b/pkg/gateway/splunk/services/gateway.go index 3dcb00b63..692f38539 100644 --- a/pkg/gateway/splunk/services/gateway.go +++ b/pkg/gateway/splunk/services/gateway.go @@ -44,4 +44,9 @@ type Gateway interface { // GET the status of a rolling restart. // endpoint: https://:/services/cluster/manager/status GetClusterManagerStatus(ctx context.Context) (*[]managermodel.ClusterManagerStatusContent, error) + + // SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. + // Post the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/control/default/maintenance + SetClusterInMaintenanceMode(context context.Context, mode bool) error } diff --git a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go index 2d6cb00e9..3da8474e7 100644 --- a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go +++ b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go @@ -3,6 +3,7 @@ package impl import ( "context" "net/http" + "strconv" "github.com/go-logr/logr" "github.com/go-resty/resty/v2" @@ -203,3 +204,32 @@ func (p *splunkGateway) GetClusterManagerStatus(context context.Context) (*[]man } return &contentList, err } + +// SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. +// Post the status of a rolling restart. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *splunkGateway) SetClusterInMaintenanceMode(context context.Context, mode bool) error { + url := clustermodel.SetClusterInMaintenanceMode + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + resp, err := p.client.R(). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "mode": strconv.FormatBool(mode)}). + Post(url) + if err != nil { + p.log.Error(err, "get cluster manager status failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return splunkError + } + + return err +} diff --git a/pkg/gateway/splunk/services/implementation/splunk_test.go b/pkg/gateway/splunk/services/implementation/splunk_test.go index 008be0729..076e3b27c 100644 --- a/pkg/gateway/splunk/services/implementation/splunk_test.go +++ b/pkg/gateway/splunk/services/implementation/splunk_test.go @@ -122,3 +122,25 @@ func TestGetClusterManagerPeers(t *testing.T) { t.Errorf("fixture: error in get cluster manager searchheads peers list is empty") } } + +func TestSetClusterInMaintenanceeMode(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := ioutil.ReadFile("../fixture/cluster_maintenance.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager peers %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.SetClusterInMaintenanceMode + httpmock.RegisterResponder("POST", url, responder) + + err = sm.SetClusterInMaintenanceMode(ctx, true) + if err != nil { + t.Errorf("fixture: error in get cluster manager searchheads %v", err) + } +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index d13599207..c2dae334d 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -162,3 +162,7 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditi func (p *splunkProvisioner) CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) { return result, nil } + +func (p *splunkProvisioner) SetClusterInMaintenanceMode(ctx context.Context, mode bool) error { + return p.gateway.SetClusterInMaintenanceMode(ctx, mode) +} diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index d3cb9141f..d8d28103f 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -27,4 +27,6 @@ type Provisioner interface { // CheckClusterManagerHealth CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) + + SetClusterInMaintenanceMode(ctx context.Context, mode bool) error } diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 8ad327b38..b5ba8e997 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -45,7 +45,7 @@ import ( type NewSplunkClientFunc func(managementURI, username, password string) *splclient.SplunkClient // ApplyIndexerClusterManager reconciles the state of a Splunk Enterprise indexer cluster. -func ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { +func (p *splunkManager) ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -202,6 +202,11 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } else { + err = p.provisioner.SetClusterInMaintenanceMode(ctx, true) + if err != nil { + eventPublisher.Warning(ctx, "SetClusterInMaintenanceMode", fmt.Sprintf("Unable to enable cluster manager maintenance mode %s", err.Error())) + return result, err + } // Delete the statefulset and recreate new one err = client.Delete(ctx, statefulSet) if err != nil { @@ -279,6 +284,12 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller result.Requeue = true return result, err } + + err = p.provisioner.SetClusterInMaintenanceMode(ctx, false) + if err != nil { + eventPublisher.Warning(ctx, "SetClusterInMaintenanceMode", fmt.Sprintf("Unable to disable cluster manager maintenance mode %s", err.Error())) + return result, err + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 9e6c4490b..d8d0ef47d 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/jinzhu/copier" "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -40,10 +41,13 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" "github.com/go-logr/logr" + manager "github.com/splunk/splunk-operator/pkg/splunk" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -67,6 +71,28 @@ func init() { } } +func setCredsIdx(t *testing.T, c splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) manager.SplunkManager { + ctx := context.TODO() + clusterManager := enterpriseApi.ClusterManager{} + clusterManager.Name = "test" + info := &managermodel.ReconcileInfo{ + Kind: cr.Kind, + CommonSpec: cr.Spec.CommonSplunkSpec, + Client: c, + Log: log.Log, + Namespace: cr.Namespace, + Name: cr.Name, + } + copier.Copy(info.MetaObject, cr.ObjectMeta) + publisher := func(ctx context.Context, eventType, reason, message string) {} + mg := NewManagerFactory(true) + manager, err := mg.NewManager(ctx, info, publisher) + if err != nil { + return nil + } + return manager +} + func TestApplyIndexerClusterOld(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() @@ -220,7 +246,8 @@ func TestApplyIndexerCluster(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) + manager := setCredsIdx(t, c, cr.(*enterpriseApi.IndexerCluster)) + _, err := manager.ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyIndexerClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -230,7 +257,8 @@ func TestApplyIndexerCluster(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) + manager := setCredsIdx(t, c, cr.(*enterpriseApi.IndexerCluster)) + _, err := manager.ApplyIndexerClusterManager(context.Background(), c, cr.(*enterpriseApi.IndexerCluster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -240,7 +268,8 @@ func TestApplyIndexerCluster(t *testing.T) { c := spltest.NewMockClient() rerr := errors.New(splcommon.Rerr) c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err := ApplyIndexerClusterManager(ctx, c, ¤t) + manager := setCredsIdx(t, c, ¤t) + _, err := manager.ApplyIndexerClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -260,7 +289,8 @@ func TestApplyIndexerCluster(t *testing.T) { Name: "manager1", Namespace: "test", } - _, err = ApplyIndexerClusterManager(ctx, c, ¤t) + manager = setCredsIdx(t, c, ¤t) + _, err = manager.ApplyIndexerClusterManager(ctx, c, ¤t) if err != nil { t.Errorf("Expected error") } @@ -273,7 +303,8 @@ func TestApplyIndexerCluster(t *testing.T) { newc.Create(ctx, nsSec) newc.Create(ctx, &cManager) newc.InduceErrorKind[splcommon.MockClientInduceErrorCreate] = rerr - _, err = ApplyIndexerClusterManager(ctx, newc, ¤t) + manager = setCredsIdx(t, c, ¤t) + _, err = manager.ApplyIndexerClusterManager(ctx, newc, ¤t) if err == nil { t.Errorf("Expected error") } @@ -1283,19 +1314,22 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { cm.Status.Phase = enterpriseApi.PhaseReady // Empty ClusterManagerRef should return an error cr.Spec.ClusterManagerRef.Name = "" - if _, err := ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { + manager := setCredsIdx(t, c, &cr) + if _, err := manager.ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { t.Errorf("ApplyIndxerCluster() should have returned error") } cr.Spec.ClusterManagerRef.Name = "manager1" // verifyRFPeers should return err here - if _, err := ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { + manager = setCredsIdx(t, c, &cr) + if _, err := manager.ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { t.Errorf("ApplyIndxerCluster() should have returned error") } cm.Status.Phase = enterpriseApi.PhaseError cr.Spec.CommonSplunkSpec.EtcVolumeStorageConfig.StorageCapacity = "-abcd" - if _, err := ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { + manager = setCredsIdx(t, c, &cr) + if _, err := manager.ApplyIndexerClusterManager(context.Background(), c, &cr); err == nil { t.Errorf("ApplyIndxerCluster() should have returned error") } } @@ -1756,7 +1790,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, indexercluster) - _, err = ApplyIndexerClusterManager(ctx, c, indexercluster) + manager = setCredsIdx(t, c, indexercluster) + _, err = manager.ApplyIndexerClusterManager(ctx, c, indexercluster) if err != nil { t.Errorf("Unexpected error while running reconciliation for indexer cluster %v", err) debug.PrintStack() @@ -1793,7 +1828,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyIndexerClusterManager(ctx, c, indexercluster) + manager = setCredsIdx(t, c, indexercluster) + _, err = manager.ApplyIndexerClusterManager(ctx, c, indexercluster) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1870,7 +1906,8 @@ func TestIndexerClusterWithReadyState(t *testing.T) { indexercluster.Status.IndexingReady = true indexercluster.Status.ServiceReady = true // call reconciliation - _, err = ApplyIndexerClusterManager(ctx, c, indexercluster) + manager = setCredsIdx(t, c, indexercluster) + _, err = manager.ApplyIndexerClusterManager(ctx, c, indexercluster) if err != nil { t.Errorf("Unexpected error while running reconciliation for indexer cluster with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/manager.go b/pkg/splunk/manager.go index aa2efa4e1..207192e50 100644 --- a/pkg/splunk/manager.go +++ b/pkg/splunk/manager.go @@ -18,7 +18,7 @@ type Factory interface { type SplunkManager interface { ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) //ApplyClusterMaster(ctx context.Context, cr *enterpriseApiV3.ClusterMaster) (reconcile.Result, error) - //ApplyIndexerClusterManager(ctx context.Context, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) + ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) //ApplyMonitoringConsole(ctx context.Context, cr *enterpriseApi.MonitoringConsole) (reconcile.Result, error) //ApplySearchHeadCluster(ctx context.Context, cr *enterpriseApi.SearchHeadCluster) (reconcile.Result, error) //ApplyStandalone(ctx context.Context, cr *enterpriseApi.Standalone) (reconcile.Result, error) From 59ad0aae56af3d5f8408e13acb637ec65cf370d6 Mon Sep 17 00:00:00 2001 From: Tanya Garg Date: Thu, 27 Jul 2023 12:58:17 -0700 Subject: [PATCH 77/85] Fixed controller code --- controllers/indexercluster_controller.go | 28 +++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/controllers/indexercluster_controller.go b/controllers/indexercluster_controller.go index ab583b0c0..80f0fd3be 100644 --- a/controllers/indexercluster_controller.go +++ b/controllers/indexercluster_controller.go @@ -18,13 +18,16 @@ package controllers import ( "context" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" "time" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/pkg/errors" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" common "github.com/splunk/splunk-operator/controllers/common" + provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -42,7 +45,8 @@ import ( // IndexerClusterReconciler reconciles a IndexerCluster object type IndexerClusterReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + ProvisionerFactory provisioner.Factory } //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=indexerclusters,verbs=get;list;watch;create;update;patch;delete @@ -113,8 +117,26 @@ func (r *IndexerClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyIndexerCluster adding to handle unit test case var ApplyIndexerCluster = func(ctx context.Context, client client.Client, instance *enterpriseApi.IndexerCluster) (reconcile.Result, error) { // IdxCluster can be supported by two CRD types for CM + publishEvent := func(ctx context.Context, eventType, reason, message string) { + instance.NewEvent(eventType, reason, message) + } if len(instance.Spec.ClusterManagerRef.Name) > 0 { - return enterprise.ApplyIndexerClusterManager(ctx, client, instance) + info := &managermodel.ReconcileInfo{ + Kind: instance.Kind, + CommonSpec: instance.Spec.CommonSplunkSpec, + Client: client, + Log: log.FromContext(ctx), + Namespace: instance.GetNamespace(), + Name: instance.GetName(), + MetaObject: instance, + } + //copier.Copy(info.MetaObject, instance.ObjectMeta) + mg := enterprise.NewManagerFactory(false) + manager, err := mg.NewManager(ctx, info, publishEvent) + if err != nil { + instance.NewEvent("Warning", "ApplyIndexerCluster", err.Error()) + } + return manager.ApplyIndexerClusterManager(ctx, client, instance) } return enterprise.ApplyIndexerCluster(ctx, client, instance) } From 5f96a5c548e5c8ec4ed482dbcca30d02214f3c7c Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 1 Aug 2023 12:15:25 -0700 Subject: [PATCH 78/85] changes to support lm Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- api/v3/clustermaster_types.go | 3 + api/v4/clustermanager_types.go | 16 +- api/v4/indexercluster_types.go | 2 +- api/v4/licensemanager_types.go | 2 +- api/v4/monitoringconsole_types.go | 10 +- api/v4/searchheadcluster_types.go | 8 +- api/v4/standalone_types.go | 10 +- ...enterprise.splunk.com_clustermanagers.yaml | 3 + .../enterprise.splunk.com_clustermasters.yaml | 3 + .../license-manager/fixture/license.json | 104 ++++ .../fixture/license_fixture.go | 470 ++++++++++++++++++ .../fixture/license_group.json | 104 ++++ .../fixture/license_local_peer.json | 104 ++++ .../fixture/license_message.json | 104 ++++ .../fixture/license_peers.json | 104 ++++ .../fixture/license_pools.json | 104 ++++ .../fixture/license_stack.json | 104 ++++ .../fixture/license_usage.json | 104 ++++ pkg/gateway/splunk/license-manager/gateway.go | 55 ++ .../license-manager/implementation/factory.go | 86 ++++ .../implementation/license_impl.go | 291 +++++++++++ .../implementation/license_test.go | 77 +++ .../model/services/cluster/url_types.go | 2 +- .../services/license/license_header_types.go | 46 ++ .../model/services/license/license_types.go | 53 ++ .../model/services/license/url_types.go | 12 + .../splunk/services/fixture/fixture.go | 24 +- pkg/gateway/splunk/services/gateway.go | 10 +- .../implementation/cluster_manager_impl.go | 23 +- .../splunk/services/implementation/factory.go | 5 +- .../services/implementation/server_impl.go | 4 + .../services/implementation/splunk_test.go | 2 +- .../splunk/implementation/factory.go | 31 +- .../splunk/implementation/license.go | 49 ++ .../splunk/implementation/splunk.go | 18 +- .../splunk/implementation/splunk_test.go | 6 +- pkg/provisioner/splunk/provisioner.go | 16 +- pkg/splunk/enterprise/clustermanager.go | 60 ++- pkg/splunk/enterprise/clustermanager_test.go | 24 +- pkg/splunk/enterprise/events.go | 7 +- pkg/splunk/enterprise/factory.go | 7 +- pkg/splunk/enterprise/indexercluster.go | 45 +- pkg/splunk/enterprise/licensemanager.go | 11 +- pkg/splunk/enterprise/licensemanager_test.go | 9 +- pkg/splunk/manager.go | 6 +- pkg/splunk/model/types.go | 6 + 46 files changed, 2246 insertions(+), 98 deletions(-) create mode 100644 pkg/gateway/splunk/license-manager/fixture/license.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_fixture.go create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_group.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_local_peer.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_message.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_peers.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_pools.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_stack.json create mode 100644 pkg/gateway/splunk/license-manager/fixture/license_usage.json create mode 100644 pkg/gateway/splunk/license-manager/gateway.go create mode 100644 pkg/gateway/splunk/license-manager/implementation/factory.go create mode 100644 pkg/gateway/splunk/license-manager/implementation/license_impl.go create mode 100644 pkg/gateway/splunk/license-manager/implementation/license_test.go create mode 100644 pkg/gateway/splunk/model/services/license/license_header_types.go create mode 100644 pkg/gateway/splunk/model/services/license/license_types.go create mode 100644 pkg/gateway/splunk/model/services/license/url_types.go create mode 100644 pkg/provisioner/splunk/implementation/license.go diff --git a/api/v3/clustermaster_types.go b/api/v3/clustermaster_types.go index 6d027378b..51bae0f6a 100644 --- a/api/v3/clustermaster_types.go +++ b/api/v3/clustermaster_types.go @@ -68,6 +68,9 @@ type ClusterMasterStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenance_mode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/clustermanager_types.go b/api/v4/clustermanager_types.go index a3e5d1587..006b5c3e3 100644 --- a/api/v4/clustermanager_types.go +++ b/api/v4/clustermanager_types.go @@ -32,6 +32,9 @@ const ( // ClusterManagerPausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) ClusterManagerPausedAnnotation = "clustermanager.enterprise.splunk.com/paused" + // ClusterManagerPausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + ClusterManagerMaintenanceAnnotation = "clustermanager.enterprise.splunk.com/maintenance" ) // ClusterManagerSpec defines the desired state of ClusterManager @@ -68,6 +71,9 @@ type ClusterManagerStatus struct { // Telemetry App installation flag TelAppInstalled bool `json:"telAppInstalled"` + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenanceMode"` + // Conditions represent the latest available observations of an object's state Conditions []metav1.Condition `json:"conditions"` @@ -122,13 +128,13 @@ func (cmstr *ClusterManager) NewEvent(eventType, reason, message string) corev1. return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: cmstr.ObjectMeta.Namespace, + Namespace: cmstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "Clustermanager", - Namespace: cmstr.Namespace, - Name: cmstr.Name, - UID: cmstr.UID, + Kind: "SearchHeadCluster", + Namespace: cmstr.GetNamespace(), + Name: cmstr.GetName(), + UID: cmstr.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 77e9048e8..c9145e86c 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -158,7 +158,7 @@ func (icstr *IndexerCluster) NewEvent(eventType, reason, message string) corev1. return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: icstr.ObjectMeta.Namespace, + Namespace: icstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ Kind: "IndexerCluster", diff --git a/api/v4/licensemanager_types.go b/api/v4/licensemanager_types.go index cce30c5e5..fbd7ea1da 100644 --- a/api/v4/licensemanager_types.go +++ b/api/v4/licensemanager_types.go @@ -97,7 +97,7 @@ func (lmstr *LicenseManager) NewEvent(eventType, reason, message string) corev1. return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: lmstr.ObjectMeta.Namespace, + Namespace: lmstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ Kind: "LicenseManager", diff --git a/api/v4/monitoringconsole_types.go b/api/v4/monitoringconsole_types.go index 05adb4475..fe3bc8c2d 100644 --- a/api/v4/monitoringconsole_types.go +++ b/api/v4/monitoringconsole_types.go @@ -105,13 +105,13 @@ func (mcnsl *MonitoringConsole) NewEvent(eventType, reason, message string) core return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: mcnsl.ObjectMeta.Namespace, + Namespace: mcnsl.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "MonitoringConsole", - Namespace: mcnsl.Namespace, - Name: mcnsl.Name, - UID: mcnsl.UID, + Kind: "SearchHeadCluster", + Namespace: mcnsl.GetNamespace(), + Name: mcnsl.GetName(), + UID: mcnsl.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/searchheadcluster_types.go b/api/v4/searchheadcluster_types.go index 310fa5122..1df55411c 100644 --- a/api/v4/searchheadcluster_types.go +++ b/api/v4/searchheadcluster_types.go @@ -167,13 +167,13 @@ func (shcstr *SearchHeadCluster) NewEvent(eventType, reason, message string) cor return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: shcstr.ObjectMeta.Namespace, + Namespace: shcstr.GetNamespace(), }, InvolvedObject: corev1.ObjectReference{ Kind: "SearchHeadCluster", - Namespace: shcstr.Namespace, - Name: shcstr.Name, - UID: shcstr.UID, + Namespace: shcstr.GetNamespace(), + Name: shcstr.GetName(), + UID: shcstr.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/standalone_types.go b/api/v4/standalone_types.go index d67a84e74..f213bcb11 100644 --- a/api/v4/standalone_types.go +++ b/api/v4/standalone_types.go @@ -121,13 +121,13 @@ func (standln *Standalone) NewEvent(eventType, reason, message string) corev1.Ev return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: standln.ObjectMeta.Namespace, + Namespace: standln.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "Standalone", - Namespace: standln.Namespace, - Name: standln.Name, - UID: standln.UID, + Kind: "SearchHeadCluster", + Namespace: standln.GetNamespace(), + Name: standln.GetName(), + UID: standln.GetUID(), APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml index 2f5aefdd3..23a588606 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml @@ -4308,6 +4308,9 @@ spec: errorMessage: description: ErrorMessage shows current error if there are any type: string + maintenanceMode: + description: Indicates if the cluster is in maintenance mode. + type: boolean phase: description: current phase of the cluster manager enum: diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml index 657276f57..65d045548 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml @@ -4235,6 +4235,9 @@ spec: needToPushMasterApps: type: boolean type: object + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean phase: description: current phase of the cluster manager enum: diff --git a/pkg/gateway/splunk/license-manager/fixture/license.json b/pkg/gateway/splunk/license-manager/fixture/license.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_fixture.go b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go new file mode 100644 index 000000000..8f3b20649 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go @@ -0,0 +1,470 @@ +package fixture + +import ( + "context" + "fmt" + "os" + + "path/filepath" + + //"encoding/json" + "io/ioutil" + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + model "github.com/splunk/splunk-operator/pkg/splunk/model" + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var log = logz.New().WithName("gateway").WithName("fixture") + +// fixtureGateway implements the gateway.fixtureGateway interface +// and uses splunk to manage the host. +type fixtureGateway struct { + // client for talking to splunk + client *resty.Client + // the splunk credentials + credentials splunkmodel.SplunkCredentials + // a logger configured for this host + log logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // state of the splunk + state *Fixture +} + +func findFixturePath() (string, error) { + ext := ".env" + wd, err := os.Getwd() + if err != nil { + return "", err + } + for { + dir, err := os.Open(wd) + if err != nil { + fmt.Println("Error opening directory:", err) + return "", err + } + defer dir.Close() + + files, err := dir.Readdir(-1) + if err != nil { + fmt.Println("Error reading directory:", err) + return "", err + } + + for _, file := range files { + if file.Name() == ext { + wd, err = filepath.Abs(wd) + wd += "/pkg/gateway/splunk/license-manager/fixture/" + return wd, err + } + } + wd += "/.." + } +} + +// Fixture contains persistent state for a particular splunk instance +type Fixture struct { +} + +// NewGateway returns a new Fixture Gateway +func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { + p := &fixtureGateway{ + log: log.WithValues("splunk", sad.Address), + publisher: publisher, + state: f, + client: resty.New(), + } + return p, nil +} + +func (p *fixtureGateway) GetLicenseGroup(ctx context.Context) (*[]licensemodel.LicenseGroup, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_group.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseGroupUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseGroup{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseGroup) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicense(ctx context.Context) (*[]licensemodel.License, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.License{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.License) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseLocalPeer(ctx context.Context) (*[]licensemodel.LicenseLocalPeer, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_local_peer.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseLocalPeersUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseLocalPeer{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseLocalPeer) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel.LicenseMessage, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_message.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseMessagesUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseMessage{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseMessage) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.LicensePool, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_pools.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicensePoolsUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePool{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicensePool) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicensePeers(context context.Context) (*[]licensemodel.LicensePeer, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_peers.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicensePeersUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePeer{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicensePeer) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.LicenseUsage, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_usage.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseUsageUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseUsage{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseUsage) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *fixtureGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel.LicenseStack, error) { + // Read entire file content, giving us little control but + // making it very simple. No need to close the file. + relativePath, err := findFixturePath() + if err != nil { + log.Error(err, "fixture: unable to find path") + return nil, err + } + content, err := ioutil.ReadFile(relativePath + "/license_stack.json") + if err != nil { + log.Error(err, "fixture: error in get cluster config") + return nil, err + } + httpmock.ActivateNonDefault(p.client.GetClient()) + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + fakeUrl := licensemodel.GetLicenseStacksUrl + httpmock.RegisterResponder("GET", fakeUrl, responder) + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(fakeUrl) + if err != nil { + p.log.Error(err, "get cluster manager buckets failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseStack{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseStack) + contentList = append(contentList, content) + } + return &contentList, nil +} diff --git a/pkg/gateway/splunk/license-manager/fixture/license_group.json b/pkg/gateway/splunk/license-manager/fixture/license_group.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_group.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json b/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_message.json b/pkg/gateway/splunk/license-manager/fixture/license_message.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_message.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_peers.json b/pkg/gateway/splunk/license-manager/fixture/license_peers.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_peers.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_pools.json b/pkg/gateway/splunk/license-manager/fixture/license_pools.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_pools.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_stack.json b/pkg/gateway/splunk/license-manager/fixture/license_stack.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_stack.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_usage.json b/pkg/gateway/splunk/license-manager/fixture/license_usage.json new file mode 100644 index 000000000..41a8d026b --- /dev/null +++ b/pkg/gateway/splunk/license-manager/fixture/license_usage.json @@ -0,0 +1,104 @@ +{ + "links": {}, + "origin": "https://localhost:8089/services/cluster/manager/info", + "updated": "2022-07-18T23:54:50+00:00", + "generator": { + "build": "6818ac46f2ec", + "version": "9.0.0" + }, + "entry": [ + { + "name": "master", + "id": "https://localhost:8089/services/cluster/manager/info/master", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/cluster/manager/info/master", + "list": "/services/cluster/manager/info/master" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "apply_bundle_status": { + "invalid_bundle": { + "bundle_path": "", + "bundle_validation_errors_on_master": [], + "checksum": "", + "timestamp": 0 + }, + "reload_bundle_issued": false, + "status": "None" + }, + "available_sites": "[site1, site2]", + "backup_and_restore_primaries": false, + "controlled_rolling_restart_flag": false, + "eai:acl": null, + "forwarder_site_failover": "", + "indexing_ready_flag": true, + "initialized_flag": true, + "label": "splunk-cm-cluster-master-0", + "last_check_restart_bundle_result": false, + "last_dry_run_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "last_validated_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "is_valid_bundle": true, + "timestamp": 1657658326 + }, + "latest_bundle": { + "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", + "checksum": "7351975980A20311463444E66492BDD5", + "timestamp": 1657658326 + }, + "maintenance_mode": false, + "multisite": true, + "previous_active_bundle": { + "bundle_path": "", + "checksum": "", + "timestamp": 0 + }, + "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", + "quiet_period_flag": false, + "rolling_restart_flag": false, + "rolling_restart_or_upgrade": false, + "service_ready_flag": true, + "site_replication_factor": "{ origin:1, total:2 }", + "site_search_factor": "{ origin:1, total:2 }", + "start_time": 1657658831, + "summary_replication": "false" + } + } + ], + "paging": { + "total": 1, + "perPage": 30, + "offset": 0 + }, + "messages": [] +} \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/gateway.go b/pkg/gateway/splunk/license-manager/gateway.go new file mode 100644 index 000000000..713cb813f --- /dev/null +++ b/pkg/gateway/splunk/license-manager/gateway.go @@ -0,0 +1,55 @@ +package licensemanager + +import ( + "context" + + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + model "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +// Factory is the interface for creating new Gateway objects. +type Factory interface { + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (Gateway, error) +} + +// Gateway holds the state information for talking to +// splunk gateway backend. +type Gateway interface { + + // GetLicenseGroup Performs health checks to determine the cluster health and search impact, prior to a rolling upgrade of the indexer cluster. + // Authentication and Authorization: + // Requires the admin role or list_indexer_cluster capability. + // endpoint: https://:/services/cluster/manager/health + GetLicenseGroup(ctx context.Context) (*[]licensemodel.LicenseGroup, error) + + // GetLicense Access information about cluster manager node. + // get List cluster manager node details. + // endpoint: https://:/services/cluster/manager/info + GetLicense(ctx context.Context) (*[]licensemodel.License, error) + + // GetLicenseLocalPeerAccess cluster manager peers. + // endpoint: https://:/services/cluster/manager/peers + GetLicenseLocalPeer(ctx context.Context) (*[]licensemodel.LicenseLocalPeer, error) + + // GetLicenseMessage Access cluster site information. + // list List available cluster sites. + // endpoint: https://:/services/cluster/manager/sites + GetLicenseMessage(ctx context.Context) (*[]licensemodel.LicenseMessage, error) + + // GetLicensePools Endpoint to get the status of a rolling restart. + // GET the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/status + GetLicensePools(ctx context.Context) (*[]licensemodel.LicensePool, error) + + // GetLicensePeers Endpoint to set cluster in maintenance mode. + // Post the status of a rolling restart. + // endpoint: https://:/services/cluster/manager/control/default/maintenance + GetLicensePeers(context context.Context) (*[]licensemodel.LicensePeer, error) + + // GetLicenseUsage check if cluster is in maintenance mode + GetLicenseUsage(ctx context.Context) (*[]licensemodel.LicenseUsage, error) + + // GetLicenseStacks check if cluster is in maintenance mode + GetLicenseStacks(ctx context.Context) (*[]licensemodel.LicenseStack, error) +} diff --git a/pkg/gateway/splunk/license-manager/implementation/factory.go b/pkg/gateway/splunk/license-manager/implementation/factory.go new file mode 100644 index 000000000..5242baa39 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/implementation/factory.go @@ -0,0 +1,86 @@ +package impl + +import ( + "context" + "crypto/tls" + "fmt" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + "time" + + model "github.com/splunk/splunk-operator/pkg/splunk/model" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type splunkGatewayFactory struct { + log logr.Logger + //credentials to log on to splunk + credentials *splunkmodel.SplunkCredentials + // client for talking to splunk + client *resty.Client +} + +// NewGatewayFactory new gateway factory to create gateway interface +func NewGatewayFactory() gateway.Factory { + factory := splunkGatewayFactory{} + err := factory.init() + if err != nil { + return nil // FIXME we have to throw some kind of exception or error here + } + return factory +} + +func (f *splunkGatewayFactory) init() error { + return nil +} + +func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (*splunkGateway, error) { + gatewayLogger := log.FromContext(ctx) + reqLogger := log.FromContext(ctx) + f.log = reqLogger.WithName("splunkGateway") + + f.client = resty.New() + // Enable debug mode + f.client.SetDebug(true) + // or One can disable security check (https) + f.client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: sad.DisableCertificateVerification}) + // Set client timeout as per your need + f.client.SetTimeout(1 * time.Minute) + namespace := "default" + if len(sad.Namespace) > 0 { + namespace = sad.Namespace + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s.%s:%d", sad.Address, namespace, sad.Port) + f.client.SetBaseURL(splunkURL) + f.client.SetBasicAuth("admin", sad.CredentialsName) + f.client.SetHeader("Content-Type", "application/json") + f.client.SetHeader("Accept", "application/json") + f.credentials = sad + + gatewayLogger.Info("new splunk manager created to access rest endpoint") + newGateway := &splunkGateway{ + credentials: f.credentials, + client: f.client, + log: f.log, + debugLog: f.log, + publisher: publisher, + } + f.log.Info("splunk settings", + "endpoint", f.credentials.Address, + "CACertFile", f.credentials.TrustedCAFile, + "ClientCertFile", f.credentials.ClientCertificateFile, + "ClientPrivKeyFile", f.credentials.ClientPrivateKeyFile, + "TLSInsecure", f.credentials.DisableCertificateVerification, + ) + return newGateway, nil +} + +// NewGateway returns a new Splunk Gateway using global +// configuration for finding the Splunk services. +func (f splunkGatewayFactory) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { + return f.splunkGateway(ctx, sad, publisher) +} diff --git a/pkg/gateway/splunk/license-manager/implementation/license_impl.go b/pkg/gateway/splunk/license-manager/implementation/license_impl.go new file mode 100644 index 000000000..cadcd281f --- /dev/null +++ b/pkg/gateway/splunk/license-manager/implementation/license_impl.go @@ -0,0 +1,291 @@ +package impl + +import ( + "context" + "net/http" + + "github.com/go-logr/logr" + "github.com/go-resty/resty/v2" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + model "github.com/splunk/splunk-operator/pkg/splunk/model" +) + +// splunkGateway implements the gateway.Gateway interface +// and uses gateway to manage the host. +type splunkGateway struct { + // a logger configured for this host + log logr.Logger + // a debug logger configured for this host + debugLog logr.Logger + // an event publisher for recording significant events + publisher model.EventPublisher + // client for talking to splunk + client *resty.Client + // credentials + credentials *splunkmodel.SplunkCredentials +} + +func (p *splunkGateway) GetLicenseGroup(ctx context.Context) (*[]licensemodel.LicenseGroup, error) { + url := licensemodel.GetLicenseGroupUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseGroup{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseGroup) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicense(ctx context.Context) (*[]licensemodel.License, error) { + url := licensemodel.GetLicenseUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.License{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.License) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseLocalPeer(ctx context.Context) (*[]licensemodel.LicenseLocalPeer, error) { + url := licensemodel.GetLicenseLocalPeersUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseLocalPeer{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseLocalPeer) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel.LicenseMessage, error) { + url := licensemodel.GetLicenseMessagesUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseMessage{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseMessage) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.LicensePool, error) { + url := licensemodel.GetLicensePoolsUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePool{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicensePool) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicensePeers(context context.Context) (*[]licensemodel.LicensePeer, error) { + url := licensemodel.GetLicenseGroupUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicensePeer{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicensePeer) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.LicenseUsage, error) { + url := licensemodel.GetLicenseUsageUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseUsage{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseUsage) + contentList = append(contentList, content) + } + return &contentList, nil +} + +func (p *splunkGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel.LicenseStack, error) { + url := licensemodel.GetLicenseStacksUrl + + // featch the configheader into struct + splunkError := &splunkmodel.SplunkError{} + envelop := &licensemodel.LicenseHeader{} + resp, err := p.client.R(). + SetResult(envelop). + SetError(&splunkError). + ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). + Get(url) + if err != nil { + p.log.Error(err, "get cluster manager peers failed") + } + if resp.StatusCode() != http.StatusOK { + p.log.Info("response failure set to", "result", err) + } + if resp.StatusCode() > 400 { + if len(splunkError.Messages) > 0 { + p.log.Info("response failure set to", "result", splunkError.Messages[0].Text) + } + return nil, splunkError + } + + contentList := []licensemodel.LicenseStack{} + for _, entry := range envelop.Entry { + content := entry.Content.(licensemodel.LicenseStack) + contentList = append(contentList, content) + } + return &contentList, nil +} diff --git a/pkg/gateway/splunk/license-manager/implementation/license_test.go b/pkg/gateway/splunk/license-manager/implementation/license_test.go new file mode 100644 index 000000000..477d91e75 --- /dev/null +++ b/pkg/gateway/splunk/license-manager/implementation/license_test.go @@ -0,0 +1,77 @@ +package impl + +import ( + "context" + "fmt" + "time" + + "github.com/go-resty/resty/v2" + "github.com/jarcoal/httpmock" + splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" + + //managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + //peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" + "io/ioutil" + "testing" + + logz "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var slog = logz.New().WithName("gateway").WithName("fixture") + +func setCreds(t *testing.T) *splunkGateway { + //ctx := context.TODO() + sad := &splunkmodel.SplunkCredentials{ + Address: "splunk-cm-cluster-master-service", + Port: 8089, + ServicesNamespace: "", + User: "admin", + App: "", + CredentialsName: "admin: abcdefghijklmnopqrstuvwxyz", + TrustedCAFile: "", + ClientCertificateFile: "", + ClientPrivateKeyFile: "", + DisableCertificateVerification: true, + } + publisher := func(ctx context.Context, eventType, reason, message string) {} + // TODO fixme how to test the gateway call directly + //sm := NewGatewayFactory(ctx, &sad, publisher) + sm := &splunkGateway{ + credentials: sad, + client: resty.New(), + publisher: publisher, + log: slog, + debugLog: slog, + } + //splunkURL := fmt.Sprintf("https://%s:%d/%s", sad.Address, sad.Port, sad.ServicesNamespace) + splunkURL := fmt.Sprintf("https://%s:%d", sad.Address, sad.Port) + sm.client.SetBaseURL(splunkURL) + sm.client.SetHeader("Content-Type", "application/json") + sm.client.SetHeader("Accept", "application/json") + sm.client.SetTimeout(time.Duration(60 * time.Minute)) + sm.client.SetDebug(true) + return sm +} + +func GetLicenseGroup(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + ctx := context.TODO() + sm := setCreds(t) + httpmock.ActivateNonDefault(sm.client.GetClient()) + content, err := ioutil.ReadFile("../fixture/license_group.json") + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } + fixtureData := string(content) + responder := httpmock.NewStringResponder(200, fixtureData) + url := clustermodel.GetClusterManagerHealthUrl + httpmock.RegisterResponder("GET", url, responder) + + _, err = sm.GetLicenseGroup(ctx) + if err != nil { + t.Errorf("fixture: error in get cluster manager health %v", err) + } +} diff --git a/pkg/gateway/splunk/model/services/cluster/url_types.go b/pkg/gateway/splunk/model/services/cluster/url_types.go index f7e1fe966..29165c013 100644 --- a/pkg/gateway/splunk/model/services/cluster/url_types.go +++ b/pkg/gateway/splunk/model/services/cluster/url_types.go @@ -31,5 +31,5 @@ const ( GetClusterManagerStatusUrl = "/services/cluster/manager/status" - SetClusterInMaintenanceMode = "/services/cluster/manager/control/default/maintenance" + SetClusterInMaintenanceModeUrl = "/services/cluster/manager/control/default/maintenance" ) diff --git a/pkg/gateway/splunk/model/services/license/license_header_types.go b/pkg/gateway/splunk/model/services/license/license_header_types.go new file mode 100644 index 000000000..9f4de1624 --- /dev/null +++ b/pkg/gateway/splunk/model/services/license/license_header_types.go @@ -0,0 +1,46 @@ +package license + +import ( + "time" +) + +type LicenseHeader struct { + Links struct { + } `json:"links"` + Origin string `json:"origin"` + Updated time.Time `json:"updated"` + Generator struct { + Build string `json:"build"` + Version string `json:"version"` + } `json:"generator"` + Entry []struct { + Name string `json:"name"` + ID string `json:"id"` + Updated time.Time `json:"updated"` + Links struct { + Alternate string `json:"alternate"` + List string `json:"list"` + } `json:"links"` + Author string `json:"author"` + Acl struct { + App string `json:"app"` + CanList bool `json:"can_list"` + CanWrite bool `json:"can_write"` + Modifiable bool `json:"modifiable"` + Owner string `json:"owner"` + Perms struct { + Read []string `json:"read"` + Write []string `json:"write"` + } `json:"perms"` + Removable bool `json:"removable"` + Sharing string `json:"sharing"` + } `json:"acl"` + Content interface{} `json:"content"` + } `json:"entry"` + Paging struct { + Total int `json:"total"` + PerPage int `json:"perPage"` + Offset int `json:"offset"` + } `json:"paging"` + Messages []interface{} `json:"messages"` +} diff --git a/pkg/gateway/splunk/model/services/license/license_types.go b/pkg/gateway/splunk/model/services/license/license_types.go new file mode 100644 index 000000000..05c0c650f --- /dev/null +++ b/pkg/gateway/splunk/model/services/license/license_types.go @@ -0,0 +1,53 @@ +package license + +// https://:/services/licenser/groups +// Provides access to the configuration of licenser groups. +// A licenser group contains one or more licenser stacks that can operate concurrently. +// Only one licenser group is active at any given time. +type LicenseGroup struct { +} + +// https://:/services/licenser/licenses +// Provides access to the licenses for this Splunk Enterprise instance. +// A license enables various features for a Splunk instance, including but not limited +// to indexing quota, auth, search, forwarding. +type License struct { +} + +// https://:/services/licenser/localpeer +// Get license state information for the Splunk instance. +type LicenseLocalPeer struct { +} + +// https://:/services/licenser/messages +// Access licenser messages. +// Messages may range from helpful warnings about being close to violations, licenses +// expiring or more severe alerts regarding overages and exceeding license warning window. +type LicenseMessage struct { +} + +// https://:/services/licenser/pools +// Access the licenser pools configuration. +// A pool logically partitions the daily volume entitlements of a stack. You can use a +// license pool to divide license privileges amongst multiple peers. +type LicensePool struct { +} + +// https://:/services/licenser/peers +// Access license peer instances. +type LicensePeer struct { +} + +// https://:/services/licenser/stacks +// Provides access to the license stack configuration. +// A license stack is comprised of one or more licenses of the same "type". +// The daily indexing quota of a license stack is additive, so a stack represents +// the aggregate entitlement for a collection of licenses. +type LicenseStack struct { +} + +// LicenseUsage +// https://:/services/licenser/usage +// Get current license usage stats from the last minute. +type LicenseUsage struct { +} diff --git a/pkg/gateway/splunk/model/services/license/url_types.go b/pkg/gateway/splunk/model/services/license/url_types.go new file mode 100644 index 000000000..43e4b61ce --- /dev/null +++ b/pkg/gateway/splunk/model/services/license/url_types.go @@ -0,0 +1,12 @@ +package license + +const ( + GetLicenseGroupUrl = "/services/licenser/groups" + GetLicenseUrl = "/services/licenser/licenses" + GetLicenseLocalPeersUrl = "/services/licenser/localpeer" + GetLicenseMessagesUrl = "/services/licenser/messages" + GetLicensePoolsUrl = "/services/licenser/pools" + GetLicensePeersUrl = "/services/licenser/peers" + GetLicenseStacksUrl = "/services/licenser/stacks" + GetLicenseUsageUrl = "/services/licenser/usage" +) diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index 1ce569dee..36d2c7086 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -18,7 +18,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" - + model "github.com/splunk/splunk-operator/pkg/splunk/model" // peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" // searchheadmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/searchhead" // commonmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/common" @@ -39,7 +39,7 @@ type fixtureGateway struct { // a logger configured for this host log logr.Logger // an event publisher for recording significant events - publisher gateway.EventPublisher + publisher model.EventPublisher // state of the splunk state *Fixture } @@ -80,7 +80,7 @@ type Fixture struct { } // NewGateway returns a new Fixture Gateway -func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (gateway.Gateway, error) { +func (f *Fixture) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { p := &fixtureGateway{ log: log.WithValues("splunk", sad.Address), publisher: publisher, @@ -363,7 +363,7 @@ func (p *fixtureGateway) SetClusterInMaintenanceMode(context context.Context, mo httpmock.ActivateNonDefault(p.client.GetClient()) fixtureData := string(content) responder := httpmock.NewStringResponder(200, fixtureData) - fakeUrl := clustermodel.SetClusterInMaintenanceMode + fakeUrl := clustermodel.SetClusterInMaintenanceModeUrl httpmock.RegisterResponder("POST", fakeUrl, responder) // featch the configheader into struct @@ -374,7 +374,7 @@ func (p *fixtureGateway) SetClusterInMaintenanceMode(context context.Context, mo SetQueryParams(map[string]string{"output_mode": "json", "mode": strconv.FormatBool(mode)}). Post(fakeUrl) if err != nil { - p.log.Error(err, "get cluster manager status failed") + p.log.Error(err, "set cluster manager in maintenance mode failed") } if resp.StatusCode() != http.StatusOK { p.log.Info("response failure set to", "result", err) @@ -388,3 +388,17 @@ func (p *fixtureGateway) SetClusterInMaintenanceMode(context context.Context, mo return err } + +// IsClusterInMaintenanceMode Endpoint check if cluster in maintenance mode. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *fixtureGateway) IsClusterInMaintenanceMode(ctx context.Context) (result bool, err error) { + clusterInfoList, err := p.GetClusterManagerInfo(ctx) + if err != nil { + return false, err + } + if clusterInfoList != nil && len(*clusterInfoList) > 0 { + content := *clusterInfoList + return content[0].MaintenanceMode, nil + } + return false, nil +} diff --git a/pkg/gateway/splunk/services/gateway.go b/pkg/gateway/splunk/services/gateway.go index 692f38539..1e19304e3 100644 --- a/pkg/gateway/splunk/services/gateway.go +++ b/pkg/gateway/splunk/services/gateway.go @@ -5,15 +5,12 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" + model "github.com/splunk/splunk-operator/pkg/splunk/model" ) -// EventPublisher is a function type for publishing events associated -// with gateway functions. -type EventPublisher func(ctx context.Context, eventType, reason, message string) - // Factory is the interface for creating new Gateway objects. type Factory interface { - NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher EventPublisher) (Gateway, error) + NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (Gateway, error) } // Gateway holds the state information for talking to @@ -49,4 +46,7 @@ type Gateway interface { // Post the status of a rolling restart. // endpoint: https://:/services/cluster/manager/control/default/maintenance SetClusterInMaintenanceMode(context context.Context, mode bool) error + + // IsClusterInMaintenanceMode check if cluster is in maintenance mode + IsClusterInMaintenanceMode(ctx context.Context) (bool, error) } diff --git a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go index 3da8474e7..58f10f5b0 100644 --- a/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go +++ b/pkg/gateway/splunk/services/implementation/cluster_manager_impl.go @@ -10,7 +10,7 @@ import ( splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" - gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" + model "github.com/splunk/splunk-operator/pkg/splunk/model" ) // splunkGateway implements the gateway.Gateway interface @@ -21,7 +21,7 @@ type splunkGateway struct { // a debug logger configured for this host debugLog logr.Logger // an event publisher for recording significant events - publisher gateway.EventPublisher + publisher model.EventPublisher // client for talking to splunk client *resty.Client // credentials @@ -41,6 +41,7 @@ func (p *splunkGateway) GetClusterManagerInfo(context context.Context) (*[]manag SetResult(envelop). SetError(&splunkError). ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). Get(url) if err != nil { p.log.Error(err, "get cluster manager info failed") @@ -208,8 +209,8 @@ func (p *splunkGateway) GetClusterManagerStatus(context context.Context) (*[]man // SetClusterInMaintainanceMode Endpoint to set cluster in maintenance mode. // Post the status of a rolling restart. // endpoint: https://:/services/cluster/manager/control/default/maintenance -func (p *splunkGateway) SetClusterInMaintenanceMode(context context.Context, mode bool) error { - url := clustermodel.SetClusterInMaintenanceMode +func (p *splunkGateway) SetClusterInMaintenanceMode(ctx context.Context, mode bool) error { + url := clustermodel.SetClusterInMaintenanceModeUrl // featch the configheader into struct splunkError := &splunkmodel.SplunkError{} @@ -233,3 +234,17 @@ func (p *splunkGateway) SetClusterInMaintenanceMode(context context.Context, mod return err } + +// IsClusterInMaintenanceMode Endpoint check if cluster in maintenance mode. +// endpoint: https://:/services/cluster/manager/control/default/maintenance +func (p *splunkGateway) IsClusterInMaintenanceMode(ctx context.Context) (result bool, err error) { + clusterInfoList, err := p.GetClusterManagerInfo(ctx) + if err != nil { + return false, err + } + if clusterInfoList != nil && len(*clusterInfoList) > 0 { + content := *clusterInfoList + return content[0].MaintenanceMode, nil + } + return false, nil +} diff --git a/pkg/gateway/splunk/services/implementation/factory.go b/pkg/gateway/splunk/services/implementation/factory.go index 3cb48e6f6..930d33322 100644 --- a/pkg/gateway/splunk/services/implementation/factory.go +++ b/pkg/gateway/splunk/services/implementation/factory.go @@ -14,6 +14,7 @@ import ( //cmmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/cluster-manager/model" "time" + model "github.com/splunk/splunk-operator/pkg/splunk/model" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -39,7 +40,7 @@ func (f *splunkGatewayFactory) init() error { return nil } -func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (*splunkGateway, error) { +func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (*splunkGateway, error) { gatewayLogger := log.FromContext(ctx) reqLogger := log.FromContext(ctx) f.log = reqLogger.WithName("splunkGateway") @@ -83,6 +84,6 @@ func (f splunkGatewayFactory) splunkGateway(ctx context.Context, sad *splunkmode // NewGateway returns a new Splunk Gateway using global // configuration for finding the Splunk services. -func (f splunkGatewayFactory) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (gateway.Gateway, error) { +func (f splunkGatewayFactory) NewGateway(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (gateway.Gateway, error) { return f.splunkGateway(ctx, sad, publisher) } diff --git a/pkg/gateway/splunk/services/implementation/server_impl.go b/pkg/gateway/splunk/services/implementation/server_impl.go index 7f9d004eb..cc3b0c100 100644 --- a/pkg/gateway/splunk/services/implementation/server_impl.go +++ b/pkg/gateway/splunk/services/implementation/server_impl.go @@ -26,6 +26,7 @@ func (p *splunkGateway) GetServerDeploymentHealthDetails(context context.Context SetResult(envelop). SetError(&splunkError). ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). Get(url) if err != nil { p.log.Error(err, "get deployment details failed") @@ -62,6 +63,7 @@ func (p *splunkGateway) GetSplunkdHealthDetails(context context.Context) (*[]hea SetResult(envelop). SetError(&splunkError). ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). Get(url) if err != nil { p.log.Error(err, "get splunkd health details failed") @@ -101,6 +103,7 @@ func (p *splunkGateway) GetServerInfo(context context.Context) (*[]healthmodel.D SetResult(envelop). SetError(&splunkError). ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). Get(url) if err != nil { p.log.Error(err, "get splunkd health details failed") @@ -135,6 +138,7 @@ func (p *splunkGateway) GetServerStatus(context context.Context) (*[]healthmodel SetResult(envelop). SetError(&splunkError). ForceContentType("application/json"). + SetQueryParams(map[string]string{"output_mode": "json", "count": "0"}). Get(url) if err != nil { p.log.Error(err, "get splunkd health details failed") diff --git a/pkg/gateway/splunk/services/implementation/splunk_test.go b/pkg/gateway/splunk/services/implementation/splunk_test.go index 076e3b27c..03e1429d3 100644 --- a/pkg/gateway/splunk/services/implementation/splunk_test.go +++ b/pkg/gateway/splunk/services/implementation/splunk_test.go @@ -136,7 +136,7 @@ func TestSetClusterInMaintenanceeMode(t *testing.T) { } fixtureData := string(content) responder := httpmock.NewStringResponder(200, fixtureData) - url := clustermodel.SetClusterInMaintenanceMode + url := clustermodel.SetClusterInMaintenanceModeUrl httpmock.RegisterResponder("POST", url, responder) err = sm.SetClusterInMaintenanceMode(ctx, true) diff --git a/pkg/provisioner/splunk/implementation/factory.go b/pkg/provisioner/splunk/implementation/factory.go index 21cb14d42..24c532be1 100644 --- a/pkg/provisioner/splunk/implementation/factory.go +++ b/pkg/provisioner/splunk/implementation/factory.go @@ -6,6 +6,9 @@ import ( "github.com/go-logr/logr" //model "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + licensegateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" + licensefixture "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager/fixture" + splunklicensegatewayimpl "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager/implementation" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" "github.com/splunk/splunk-operator/pkg/gateway/splunk/services/fixture" @@ -13,7 +16,7 @@ import ( provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" //cmmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/cluster-manager/model" - + model "github.com/splunk/splunk-operator/pkg/splunk/model" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -23,6 +26,8 @@ type splunkProvisionerFactory struct { credentials *splunkmodel.SplunkCredentials // Gateway Factory gatewayFactory gateway.Factory + // splunk license factory + licenseFactory licensegateway.Factory } // NewProvisionerFactory new provisioner factory to create provisioner interface @@ -42,10 +47,15 @@ func (f *splunkProvisionerFactory) init(runInTestMode bool) error { } else { f.gatewayFactory = splunkgatewayimpl.NewGatewayFactory() } + if runInTestMode { + f.licenseFactory = &licensefixture.Fixture{} + } else { + f.licenseFactory = splunklicensegatewayimpl.NewGatewayFactory() + } return nil } -func (f splunkProvisionerFactory) splunkProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (*splunkProvisioner, error) { +func (f splunkProvisionerFactory) splunkProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (*splunkProvisioner, error) { provisionerLogger := log.FromContext(ctx) reqLogger := log.FromContext(ctx) f.log = reqLogger.WithName("splunkProvisioner") @@ -57,12 +67,17 @@ func (f splunkProvisionerFactory) splunkProvisioner(ctx context.Context, sad *sp if err != nil { return nil, err } + licensegateway, err := f.licenseFactory.NewGateway(ctx, sad, publisher) + if err != nil { + return nil, err + } newProvisioner := &splunkProvisioner{ - credentials: f.credentials, - log: f.log, - debugLog: f.log, - publisher: publisher, - gateway: gateway, + credentials: f.credentials, + log: f.log, + debugLog: f.log, + publisher: publisher, + gateway: gateway, + licensegateway: licensegateway, } f.log.Info("splunk settings", @@ -77,6 +92,6 @@ func (f splunkProvisionerFactory) splunkProvisioner(ctx context.Context, sad *sp // NewProvisioner returns a new Splunk Provisioner using global // configuration for finding the Splunk services. -func (f splunkProvisionerFactory) NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (provisioner.Provisioner, error) { +func (f splunkProvisionerFactory) NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (provisioner.Provisioner, error) { return f.splunkProvisioner(ctx, sad, publisher) } diff --git a/pkg/provisioner/splunk/implementation/license.go b/pkg/provisioner/splunk/implementation/license.go new file mode 100644 index 000000000..b41972579 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/license.go @@ -0,0 +1,49 @@ +package impl + +import ( + "context" + "fmt" + + licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + //"k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var callLicenseLocalPeer = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.LicenseLocalPeer, error) { + lminfo, err := p.licensegateway.GetLicenseLocalPeer(ctx) + if err != nil { + return nil, err + } else if lminfo == nil { + return nil, fmt.Errorf("cluster manager info data is empty") + } + return lminfo, err +} + +// GetClusterManagerStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetLicenseLocalPeer(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { + _, err = callLicenseLocalPeer(ctx, p) + //peerlistptr, err := callLicenseLocalPeer(ctx, p) + if err != nil { + return result, err + } + /* else { + peerlist := *peerlistptr + for _, peer := range peerlist { + condition := metav1.Condition{ + Type: peer.Label, + Message: fmt.Sprintf("%s in site %s is %s ", peer.Label, peer.Site, peer.Status), + Reason: peer.Site, + } + if peer.Status == "Up" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + }*/ + return result, err +} diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index c2dae334d..d8fcbd829 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -6,10 +6,12 @@ import ( "strings" "github.com/go-logr/logr" + licensegateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/license-manager" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + model "github.com/splunk/splunk-operator/pkg/splunk/model" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,11 +24,13 @@ type splunkProvisioner struct { // a debug logger configured for this host debugLog logr.Logger // an event publisher for recording significant events - publisher gateway.EventPublisher + publisher model.EventPublisher // credentials credentials *splunkmodel.SplunkCredentials // gateway factory gateway gateway.Gateway + // splunk license factory + licensegateway licensegateway.Gateway } var callGetClusterManagerInfo = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerInfoContent, error) { @@ -79,8 +83,8 @@ var callGetClusterManagerSitesStatus = func(ctx context.Context, p *splunkProvis return peerlist, err } -// SetClusterManagerStatus Access cluster node configuration details. -func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { +// GetClusterManagerStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { peerlistptr, err := callGetClusterManagerPeersStatus(ctx, p) if err != nil { @@ -89,8 +93,8 @@ func (p *splunkProvisioner) SetClusterManagerStatus(ctx context.Context, conditi peerlist := *peerlistptr for _, peer := range peerlist { condition := metav1.Condition{ - Type: "Peers", - Message: fmt.Sprintf("%s with %s is %s ", peer.Site, peer.Label, peer.Status), + Type: peer.Label, + Message: fmt.Sprintf("%s in site %s is %s ", peer.Label, peer.Site, peer.Status), Reason: peer.Site, } if peer.Status == "Up" { @@ -166,3 +170,7 @@ func (p *splunkProvisioner) CheckClusterManagerHealth(ctx context.Context) (resu func (p *splunkProvisioner) SetClusterInMaintenanceMode(ctx context.Context, mode bool) error { return p.gateway.SetClusterInMaintenanceMode(ctx, mode) } + +func (p *splunkProvisioner) IsClusterInMaintenanceMode(ctx context.Context) (bool, error) { + return p.gateway.IsClusterInMaintenanceMode(ctx) +} diff --git a/pkg/provisioner/splunk/implementation/splunk_test.go b/pkg/provisioner/splunk/implementation/splunk_test.go index b4fdbf94d..e6a7ef6b5 100644 --- a/pkg/provisioner/splunk/implementation/splunk_test.go +++ b/pkg/provisioner/splunk/implementation/splunk_test.go @@ -36,7 +36,7 @@ func setCreds(t *testing.T) provisioner.Provisioner { return provisioner } -func TestSetClusterManagerStatus(t *testing.T) { +func TestGetClusterManagerStatus(t *testing.T) { callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]managermodel.ClusterManagerHealthContent, error) { healthData := []managermodel.ClusterManagerHealthContent{} return &healthData, nil @@ -46,7 +46,7 @@ func TestSetClusterManagerStatus(t *testing.T) { ctx := context.TODO() - _, err := provisioner.SetClusterManagerStatus(ctx, conditions) + _, err := provisioner.GetClusterManagerStatus(ctx, conditions) if err != nil { t.Errorf("fixture: error in set cluster manager %v", err) } @@ -94,7 +94,7 @@ func TestSetClusterManagerMultiSiteStatus(t *testing.T) { ctx := context.TODO() - _, err := provisioner.SetClusterManagerStatus(ctx, conditions) + _, err := provisioner.GetClusterManagerStatus(ctx, conditions) if err != nil { t.Errorf("fixture: error in set cluster manager %v", err) } diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index d8d28103f..8884500df 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -4,29 +4,29 @@ import ( "context" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" - gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" + model "github.com/splunk/splunk-operator/pkg/splunk/model" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EventPublisher is a function type for publishing events associated -// with gateway functions. -type EventPublisher func(ctx context.Context, eventType, reason, message string) - // Factory is the interface for creating new Provisioner objects. type Factory interface { - NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher gateway.EventPublisher) (Provisioner, error) + NewProvisioner(ctx context.Context, sad *splunkmodel.SplunkCredentials, publisher model.EventPublisher) (Provisioner, error) } // Provisioner holds the state information for talking to // splunk provisioner backend. type Provisioner interface { - // SetClusterManagerStatus set cluster manager status - SetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) + // GetClusterManagerStatus set cluster manager status + GetClusterManagerStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) // CheckClusterManagerHealth CheckClusterManagerHealth(ctx context.Context) (result provmodel.Result, err error) + //SetClusterInMaintenanceMode SetClusterInMaintenanceMode(ctx context.Context, mode bool) error + + // IsClusterInMaintenanceMode + IsClusterInMaintenanceMode(ctx context.Context) (bool, error) } diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index a1045d2f1..8cb18c5b6 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -26,12 +26,12 @@ import ( rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" - gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" + model "github.com/splunk/splunk-operator/pkg/splunk/model" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -47,7 +47,7 @@ type splunkManager struct { // a debug logger configured for this host debugLog logr.Logger // an event publisher for recording significant events - publisher gateway.EventPublisher + publisher model.EventPublisher // credentials // gateway factory provisioner provisioner.Provisioner @@ -247,14 +247,16 @@ func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommo finalResult := handleAppFrameworkActivity(ctx, client, cr, &cr.Status.AppContext, &cr.Spec.AppFrameworkConfig) result = *finalResult - } - // Verification of splunk instance update CR status - // We are using Conditions to update status information - provResult := provmodel.Result{} - provResult, err = p.provisioner.SetClusterManagerStatus(ctx, &cr.Status.Conditions) - if err != nil { - cr.Status.ErrorMessage = provResult.ErrorMessage + p.ReconcileClusterManagerMaintenanceMode(ctx, client, cr) + + // Verification of splunk instance update CR status + // We are using Conditions to update status information + provResult := provmodel.Result{} + provResult, err = p.provisioner.GetClusterManagerStatus(ctx, &cr.Status.Conditions) + if err != nil { + cr.Status.ErrorMessage = provResult.ErrorMessage + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. @@ -266,6 +268,46 @@ func (p *splunkManager) ApplyClusterManager(ctx context.Context, client splcommo return result, nil } +func (p *splunkManager) ReconcileClusterManagerMaintenanceMode(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) { + var result reconcile.Result + var err error + var response bool + response, err = p.provisioner.IsClusterInMaintenanceMode(ctx) + if err != nil { + cr.Status.ErrorMessage = err.Error() + return result, err + } + + // Check if user asking to move Cluster to maintenance mode + cr.Status.MaintenanceMode = response + annotations := cr.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.ClusterManagerMaintenanceAnnotation]; ok { + if response { + // if cluster is already in maintenance mode return + return result, nil + } + // place cluster manager in maintenance mode + err = p.provisioner.SetClusterInMaintenanceMode(ctx, true) + if err != nil { + cr.Status.ErrorMessage = err.Error() + return result, err + } + cr.Status.MaintenanceMode = true + } else if response { + // if cluster manager is in maintenance mode and annotations is not set then + // unset maintenance mode + err = p.provisioner.SetClusterInMaintenanceMode(ctx, false) + if err != nil { + cr.Status.ErrorMessage = err.Error() + return result, err + } + cr.Status.MaintenanceMode = false + } + } + return result, err +} + // clusterManagerPodManager is used to manage the cluster manager pod type clusterManagerPodManager struct { log logr.Logger diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 85ca6f0eb..197c774fc 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -50,19 +50,19 @@ import ( //splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" ) -func setCreds(t *testing.T, c splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) manager.SplunkManager { +func setCreds(t *testing.T, c splcommon.ControllerClient, cr splcommon.MetaObject, spec enterpriseApi.CommonSplunkSpec) manager.SplunkManager { ctx := context.TODO() clusterManager := enterpriseApi.ClusterManager{} clusterManager.Name = "test" info := &managermodel.ReconcileInfo{ - Kind: cr.Kind, - CommonSpec: cr.Spec.CommonSplunkSpec, + Kind: cr.GroupVersionKind().Kind, + CommonSpec: spec, Client: c, Log: log.Log, - Namespace: cr.Namespace, - Name: cr.Name, + Namespace: cr.GetNamespace(), + Name: cr.GetName(), } - copier.Copy(info.MetaObject, cr.ObjectMeta) + copier.Copy(info.MetaObject, cr) publisher := func(ctx context.Context, eventType, reason, message string) {} mg := NewManagerFactory(true) manager, err := mg.NewManager(ctx, info, publisher) @@ -156,7 +156,7 @@ func TestApplyClusterManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - manager := setCreds(t, c, ¤t) + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) //_, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return true, err @@ -170,7 +170,7 @@ func TestApplyClusterManager(t *testing.T) { c := spltest.NewMockClient() _ = errors.New(splcommon.Rerr) - manager := setCreds(t, c, ¤t) + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") @@ -612,7 +612,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { } // Without S3 keys, ApplyClusterManager should fail - manager := setCreds(t, client, ¤t) + manager := setCreds(t, client, ¤t, current.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, client, ¤t) if err == nil { t.Errorf("ApplyClusterManager should fail without S3 secrets configured") @@ -642,7 +642,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), ) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } @@ -897,7 +897,7 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) { t.Errorf(err.Error()) } - manager := setCreds(t, client, &cm) + manager := setCreds(t, client, &cm, cm.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") @@ -993,7 +993,7 @@ func TestApplyCLusterManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - manager := setCreds(t, c, &cm) + manager := setCreds(t, c, &cm, cm.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, c, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") diff --git a/pkg/splunk/enterprise/events.go b/pkg/splunk/enterprise/events.go index f05917f18..222de4e3c 100644 --- a/pkg/splunk/enterprise/events.go +++ b/pkg/splunk/enterprise/events.go @@ -55,12 +55,17 @@ func (k *K8EventPublisher) publishEvent(ctx context.Context, eventType, reason, // based on the custom resource instance type find name, type and create new event switch v := k.instance.(type) { case *enterpriseApi.Standalone: - case *enterpriseApiV3.LicenseMaster: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.LicenseManager: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.IndexerCluster: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.ClusterManager: + event = v.NewEvent(eventType, reason, message) case *enterpriseApiV3.ClusterMaster: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.MonitoringConsole: + event = v.NewEvent(eventType, reason, message) case *enterpriseApi.SearchHeadCluster: event = v.NewEvent(eventType, reason, message) default: diff --git a/pkg/splunk/enterprise/factory.go b/pkg/splunk/enterprise/factory.go index 744ffe27e..780fe2146 100644 --- a/pkg/splunk/enterprise/factory.go +++ b/pkg/splunk/enterprise/factory.go @@ -8,7 +8,6 @@ import ( //model "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" - gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" splunkprovisionerimpl "github.com/splunk/splunk-operator/pkg/provisioner/splunk/implementation" manager "github.com/splunk/splunk-operator/pkg/splunk" @@ -16,7 +15,7 @@ import ( types "github.com/splunk/splunk-operator/pkg/splunk/model" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" //cmmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/cluster-manager/model" - + model "github.com/splunk/splunk-operator/pkg/splunk/model" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -44,7 +43,7 @@ func (f *splunkManagerFactory) init(runInTestMode bool) error { return nil } -func (f splunkManagerFactory) splunkManager(ctx context.Context, info *types.ReconcileInfo, publisher gateway.EventPublisher) (*splunkManager, error) { +func (f splunkManagerFactory) splunkManager(ctx context.Context, info *types.ReconcileInfo, publisher model.EventPublisher) (*splunkManager, error) { provisionerLogger := log.FromContext(ctx) reqLogger := log.FromContext(ctx) f.log = reqLogger.WithName("splunkProvisioner") @@ -100,6 +99,6 @@ func (f splunkManagerFactory) splunkManager(ctx context.Context, info *types.Rec // NewProvisioner returns a new Splunk Provisioner using global // configuration for finding the Splunk services. -func (f splunkManagerFactory) NewManager(ctx context.Context, info *types.ReconcileInfo, publisher gateway.EventPublisher) (manager.SplunkManager, error) { +func (f splunkManagerFactory) NewManager(ctx context.Context, info *types.ReconcileInfo, publisher model.EventPublisher) (manager.SplunkManager, error) { return f.splunkManager(ctx, info, publisher) } diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index b5ba8e997..27af9af64 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -187,6 +187,8 @@ func (p *splunkManager) ApplyIndexerClusterManager(ctx context.Context, client s // get the pod image name if v.Spec.Containers[0].Image != cr.Spec.Image { // image do not match that means its image upgrade + eventPublisher.Normal(ctx, "version_upgrade", fmt.Sprintf("image change v.spec.Containers[0].Image=%s cr.Spec.Image=%s", v.Spec.Containers[0].Image, cr.Spec.Image)) + scopedLog.Info("image change enabled", "v.spec.Containers[0].Image", v.Spec.Containers[0].Image, "cr.Spec.Image", cr.Spec.Image) versionUpgrade = true break } @@ -202,7 +204,8 @@ func (p *splunkManager) ApplyIndexerClusterManager(ctx context.Context, client s return result, err } } else { - err = p.provisioner.SetClusterInMaintenanceMode(ctx, true) + + err = p.SetClusterInMaintenanceMode(ctx, client, cr, true) if err != nil { eventPublisher.Warning(ctx, "SetClusterInMaintenanceMode", fmt.Sprintf("Unable to enable cluster manager maintenance mode %s", err.Error())) return result, err @@ -285,7 +288,7 @@ func (p *splunkManager) ApplyIndexerClusterManager(ctx context.Context, client s return result, err } - err = p.provisioner.SetClusterInMaintenanceMode(ctx, false) + err = p.SetClusterInMaintenanceMode(ctx, client, cr, false) if err != nil { eventPublisher.Warning(ctx, "SetClusterInMaintenanceMode", fmt.Sprintf("Unable to disable cluster manager maintenance mode %s", err.Error())) return result, err @@ -299,6 +302,44 @@ func (p *splunkManager) ApplyIndexerClusterManager(ctx context.Context, client s return result, nil } +func (p *splunkManager) SetClusterInMaintenanceMode(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster, value bool) error { + + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("changeClusterManagerAnnotations").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + eventPublisher, _ := newK8EventPublisher(client, cr) + + clusterManagerInstance := &enterpriseApi.ClusterManager{} + if len(cr.Spec.ClusterManagerRef.Name) == 0 { + return fmt.Errorf("cluster manager not found") + } + + namespacedName := types.NamespacedName{ + Namespace: cr.GetNamespace(), + Name: cr.Spec.ClusterManagerRef.Name, + } + err := client.Get(ctx, namespacedName, clusterManagerInstance) + if err != nil { + return err + } + + annotations := cr.GetAnnotations() + if value { + annotations[enterpriseApi.ClusterManagerMaintenanceAnnotation] = "" + scopedLog.Info("set cluster manager in maintenance mode") + eventPublisher.Normal(ctx, "ClusterManager", "set cluster manager in maintenance mode") + } else { + delete(annotations, enterpriseApi.ClusterManagerMaintenanceAnnotation) + scopedLog.Info("unset cluster manager in maintenance mode") + eventPublisher.Normal(ctx, "ClusterManager", "unset cluster manager in maintenance mode") + } + clusterManagerInstance.Annotations = annotations + err = client.Update(ctx, cr) + if err != nil { + return err + } + return nil +} + // ApplyIndexerCluster reconciles the state of a Splunk Enterprise indexer cluster for Older CM CRDs. func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index ad572de10..a5b144808 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -22,6 +22,7 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" appsv1 "k8s.io/api/apps/v1" @@ -36,7 +37,7 @@ import ( ) // ApplyLicenseManager reconciles the state for the Splunk Enterprise license manager. -func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) { +func (p *splunkManager) ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -178,6 +179,14 @@ func ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, if err != nil { return result, err } + + // Verification of splunk instance update CR status + // We are using Conditions to update status information + provResult := provmodel.Result{} + provResult, err = p.provisioner.GetClusterManagerStatus(ctx, &cr.Status.Conditions) + if err != nil { + cr.Status.ErrorMessage = provResult.ErrorMessage + } } // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index 1c331cd43..befe2f5c5 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -88,7 +88,8 @@ func TestApplyLicenseManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyLicenseManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -98,7 +99,8 @@ func TestApplyLicenseManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -107,7 +109,8 @@ func TestApplyLicenseManager(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() current.Spec.LivenessInitialDelaySeconds = -1 - _, err := ApplyLicenseManager(ctx, c, ¤t) + manager := setCreds(t, c, current.(*enterpriseApi.ClusterManager)) + _, err := manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } diff --git a/pkg/splunk/manager.go b/pkg/splunk/manager.go index 207192e50..264670774 100644 --- a/pkg/splunk/manager.go +++ b/pkg/splunk/manager.go @@ -6,22 +6,22 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - gateway "github.com/splunk/splunk-operator/pkg/gateway/splunk/services" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + model "github.com/splunk/splunk-operator/pkg/splunk/model" types "github.com/splunk/splunk-operator/pkg/splunk/model" ) type Factory interface { - NewManager(ctx context.Context, info *types.ReconcileInfo, publisher gateway.EventPublisher) (SplunkManager, error) + NewManager(ctx context.Context, info *types.ReconcileInfo, publisher model.EventPublisher) (SplunkManager, error) } type SplunkManager interface { ApplyClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.ClusterManager) (reconcile.Result, error) //ApplyClusterMaster(ctx context.Context, cr *enterpriseApiV3.ClusterMaster) (reconcile.Result, error) ApplyIndexerClusterManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) + ApplyLicenseManager(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) //ApplyMonitoringConsole(ctx context.Context, cr *enterpriseApi.MonitoringConsole) (reconcile.Result, error) //ApplySearchHeadCluster(ctx context.Context, cr *enterpriseApi.SearchHeadCluster) (reconcile.Result, error) //ApplyStandalone(ctx context.Context, cr *enterpriseApi.Standalone) (reconcile.Result, error) - //ApplyLicenseManager(ctx context.Context, cr *enterpriseApi.LicenseManager) (reconcile.Result, error) //ApplyLicenseMaster(ctx context.Context, cr *enterpriseApiV3.LicenseMaster) (reconcile.Result, error) } diff --git a/pkg/splunk/model/types.go b/pkg/splunk/model/types.go index 0b4e7909e..db5c735fe 100644 --- a/pkg/splunk/model/types.go +++ b/pkg/splunk/model/types.go @@ -1,6 +1,8 @@ package model import ( + "context" + "github.com/go-logr/logr" enterpriseApi "github.com/splunk/splunk-operator/api/v4" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -8,6 +10,10 @@ import ( ctrl "sigs.k8s.io/controller-runtime" ) +// EventPublisher is a function type for publishing events associated +// with gateway functions. +type EventPublisher func(ctx context.Context, eventType, reason, message string) + // Instead of passing a zillion arguments to the action of a phase, // hold them in a context type ReconcileInfo struct { From 1855c51b10ed41e21f698e2b54bcefc95e78bb54 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 1 Aug 2023 17:05:58 -0700 Subject: [PATCH 79/85] working license manager verification code Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- controllers/licensemanager_controller.go | 22 +- .../license-manager/fixture/license.json | 372 +++++++++++++----- .../fixture/license_fixture.go | 72 +++- .../fixture/license_group.json | 298 +++++++++----- .../fixture/license_local_peer.json | 201 +++++----- .../fixture/license_message.json | 116 +----- .../fixture/license_peers.json | 165 +++----- .../fixture/license_pools.json | 256 +++++++----- .../fixture/license_stack.json | 237 ++++++----- .../fixture/license_usage.json | 148 +++---- .../implementation/license_impl.go | 71 +++- .../model/services/license/license_types.go | 103 +++++ .../splunk/implementation/license.go | 40 +- .../splunk/implementation/license_test.go | 49 +++ pkg/provisioner/splunk/provisioner.go | 5 + pkg/splunk/enterprise/clustermanager_test.go | 28 +- pkg/splunk/enterprise/configuration_test.go | 2 +- pkg/splunk/enterprise/indexercluster_test.go | 2 +- pkg/splunk/enterprise/licensemanager.go | 10 +- pkg/splunk/enterprise/licensemanager_test.go | 27 +- pkg/splunk/enterprise/licensemaster_test.go | 2 +- pkg/splunk/enterprise/util_test.go | 2 +- 22 files changed, 1377 insertions(+), 851 deletions(-) create mode 100644 pkg/provisioner/splunk/implementation/license_test.go diff --git a/controllers/licensemanager_controller.go b/controllers/licensemanager_controller.go index 20af31fdd..3c77ad6b0 100644 --- a/controllers/licensemanager_controller.go +++ b/controllers/licensemanager_controller.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" common "github.com/splunk/splunk-operator/controllers/common" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + managermodel "github.com/splunk/splunk-operator/pkg/splunk/model" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -111,7 +112,26 @@ func (r *LicenseManagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ApplyLicenseManager adding to handle unit test case var ApplyLicenseManager = func(ctx context.Context, client client.Client, instance *enterpriseApi.LicenseManager) (reconcile.Result, error) { - return enterprise.ApplyLicenseManager(ctx, client, instance) + // match the provisioner.EventPublisher interface + publishEvent := func(ctx context.Context, eventType, reason, message string) { + instance.NewEvent(eventType, reason, message) + } + info := &managermodel.ReconcileInfo{ + Kind: instance.Kind, + CommonSpec: instance.Spec.CommonSplunkSpec, + Client: client, + Log: log.FromContext(ctx), + Namespace: instance.GetNamespace(), + Name: instance.GetName(), + MetaObject: instance, + } + //copier.Copy(info.MetaObject, instance.ObjectMeta) + mg := enterprise.NewManagerFactory(false) + manager, err := mg.NewManager(ctx, info, publishEvent) + if err != nil { + instance.NewEvent("Warning", "ApplyClusterManager", err.Error()) + } + return manager.ApplyLicenseManager(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. diff --git a/pkg/gateway/splunk/license-manager/fixture/license.json b/pkg/gateway/splunk/license-manager/fixture/license.json index 41a8d026b..9c907c0db 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license.json +++ b/pkg/gateway/splunk/license-manager/fixture/license.json @@ -1,104 +1,286 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", + "links": { + "create": "/services/licenser/licenses/_new" + }, + "origin": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses", + "updated": "2023-08-01T23:14:56+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", "updated": "1970-01-01T00:00:00+00:00", "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" + "alternate": "/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "list": "/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "edit": "/services/licenser/licenses/455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987" }, "author": "system", "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" }, "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" + "add_ons": null, + "allowedRoles": [], + "assignableRoles": [], + "creation_time": 1688108400, + "disabled_features": [], + "eai:acl": null, + "expiration_time": 1704095999, + "features": [ + "Acceleration", + "AdvancedSearchCommands", + "AdvancedXML", + "Alerting", + "AllowDuplicateKeys", + "ArchiveToHdfs", + "Auth", + "CanBeRemoteMaster", + "ConditionalLicensingEnforcement", + "CustomRoles", + "DeployClient", + "DeployServer", + "DistSearch", + "FwdData", + "GuestPass", + "KVStore", + "LDAPAuth", + "LocalSearch", + "MultifactorAuth", + "MultisiteClustering", + "NontableLookups", + "RcvData", + "RcvSearch", + "RollingWindowAlerts", + "SAMLAuth", + "ScheduledAlerts", + "ScheduledReports", + "ScheduledSearch", + "ScriptedAuth", + "SearchheadPooling", + "SigningProcessor", + "SplunkWeb", + "SyslogOutputProcessor", + "UnisiteClustering" + ], + "group_id": "Enterprise", + "guid": "2A327594-08E4-48BA-A001-3EB4C1475910", + "is_unlimited": false, + "label": "Splunk Internal License DO NOT DISTRIBUTE", + "license_hash": "455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987", + "max_retention_size": 0, + "max_stack_quota": 18446744073709552000, + "max_users": 0, + "max_violations": 5, + "notes": "", + "quota": 53687091200, + "relative_expiration_interval": 0, + "relative_expiration_start": 0, + "sourcetypes": [], + "stack_id": "enterprise", + "status": "VALID", + "subgroup_id": "Production", + "type": "enterprise", + "window_period": 30 } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] -} \ No newline at end of file + }, + { + "name": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "list": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "edit": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "add_ons": null, + "allowedRoles": [], + "assignableRoles": [], + "creation_time": 1277017200, + "disabled_features": [ + "Acceleration", + "AdvancedSearchCommands", + "AdvancedXML", + "ArchiveToHdfs", + "ConditionalLicensingEnforcement", + "CustomRoles", + "GuestPass", + "KVStore", + "LDAPAuth", + "MultifactorAuth", + "MultisiteClustering", + "NontableLookups", + "RollingWindowAlerts", + "SAMLAuth", + "ScheduledAlerts", + "ScheduledReports", + "ScriptedAuth", + "SearchheadPooling", + "UnisiteClustering" + ], + "eai:acl": null, + "expiration_time": 2147483647, + "features": [ + "Auth", + "DeployClient", + "FwdData", + "RcvData", + "SigningProcessor", + "SplunkWeb", + "SyslogOutputProcessor" + ], + "group_id": "Forwarder", + "guid": "11111111-1111-1111-1111-111111111111", + "is_unlimited": false, + "label": "Splunk Forwarder", + "license_hash": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD", + "max_retention_size": 0, + "max_stack_quota": 18446744073709552000, + "max_users": 4294967295, + "max_violations": 5, + "notes": "", + "quota": 1048576, + "relative_expiration_interval": 0, + "relative_expiration_start": 0, + "sourcetypes": [], + "stack_id": "forwarder", + "status": "VALID", + "subgroup_id": "Production", + "type": "forwarder", + "window_period": 30 + } + }, + { + "name": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "list": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "edit": "/services/licenser/licenses/FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "add_ons": null, + "allowedRoles": [], + "assignableRoles": [], + "creation_time": 1277017200, + "disabled_features": [ + "Acceleration", + "AdvancedSearchCommands", + "AdvancedXML", + "ArchiveToHdfs", + "ConditionalLicensingEnforcement", + "CustomRoles", + "GuestPass", + "LDAPAuth", + "MultifactorAuth", + "MultisiteClustering", + "NontableLookups", + "RollingWindowAlerts", + "SAMLAuth", + "ScheduledAlerts", + "ScheduledReports", + "ScriptedAuth", + "SearchheadPooling", + "UnisiteClustering" + ], + "eai:acl": null, + "expiration_time": 2147483647, + "features": [ + "FwdData", + "KVStore", + "LocalSearch", + "RcvData", + "ScheduledSearch", + "SigningProcessor", + "SplunkWeb" + ], + "group_id": "Free", + "guid": "00000000-0000-0000-0000-000000000000", + "is_unlimited": false, + "label": "Splunk Free", + "license_hash": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "max_retention_size": 0, + "max_stack_quota": 18446744073709552000, + "max_users": 4294967295, + "max_violations": 3, + "notes": "", + "quota": 524288000, + "relative_expiration_interval": 0, + "relative_expiration_start": 0, + "sourcetypes": [], + "stack_id": "free", + "status": "VALID", + "subgroup_id": "Production", + "type": "free", + "window_period": 30 + } + } + ], + "paging": { + "total": 3, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] + } diff --git a/pkg/gateway/splunk/license-manager/fixture/license_fixture.go b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go index 8f3b20649..d2ff5bd3b 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_fixture.go +++ b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go @@ -2,6 +2,7 @@ package fixture import ( "context" + "encoding/json" "fmt" "os" @@ -175,7 +176,16 @@ func (p *fixtureGateway) GetLicense(ctx context.Context) (*[]licensemodel.Licens contentList := []licensemodel.License{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.License) + var content licensemodel.License + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + //content = entry.Content.(licensemodel.License) + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -223,7 +233,15 @@ func (p *fixtureGateway) GetLicenseLocalPeer(ctx context.Context) (*[]licensemod contentList := []licensemodel.LicenseLocalPeer{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseLocalPeer) + var content licensemodel.LicenseLocalPeer + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -271,7 +289,15 @@ func (p *fixtureGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel contentList := []licensemodel.LicenseMessage{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseMessage) + var content licensemodel.LicenseMessage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -319,7 +345,15 @@ func (p *fixtureGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.L contentList := []licensemodel.LicensePool{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicensePool) + var content licensemodel.LicensePool + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -367,7 +401,15 @@ func (p *fixtureGateway) GetLicensePeers(context context.Context) (*[]licensemod contentList := []licensemodel.LicensePeer{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicensePeer) + var content licensemodel.LicensePeer + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -415,7 +457,15 @@ func (p *fixtureGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.L contentList := []licensemodel.LicenseUsage{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseUsage) + var content licensemodel.LicenseUsage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -463,7 +513,15 @@ func (p *fixtureGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel. contentList := []licensemodel.LicenseStack{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseStack) + var content licensemodel.LicenseStack + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil diff --git a/pkg/gateway/splunk/license-manager/fixture/license_group.json b/pkg/gateway/splunk/license-manager/fixture/license_group.json index 41a8d026b..b341fcf81 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_group.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_group.json @@ -1,104 +1,198 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", - "updated": "1970-01-01T00:00:00+00:00", - "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" - }, - "author": "system", - "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" - }, - "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" - } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] + "links": {}, + "origin": "https://localhost:8089/services/licenser/groups", + "updated": "2023-08-01T21:47:15+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "Enterprise", + "id": "https://localhost:8089/services/licenser/groups/Enterprise", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Enterprise", + "list": "/services/licenser/groups/Enterprise", + "edit": "/services/licenser/groups/Enterprise" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": true, + "stack_ids": [ + "enterprise" + ] + } + }, + { + "name": "Forwarder", + "id": "https://localhost:8089/services/licenser/groups/Forwarder", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Forwarder", + "list": "/services/licenser/groups/Forwarder", + "edit": "/services/licenser/groups/Forwarder" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [ + "forwarder" + ] + } + }, + { + "name": "Free", + "id": "https://localhost:8089/services/licenser/groups/Free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Free", + "list": "/services/licenser/groups/Free", + "edit": "/services/licenser/groups/Free" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [ + "free" + ] + } + }, + { + "name": "Lite", + "id": "https://localhost:8089/services/licenser/groups/Lite", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Lite", + "list": "/services/licenser/groups/Lite", + "edit": "/services/licenser/groups/Lite" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [] + } + }, + { + "name": "Lite_Free", + "id": "https://localhost:8089/services/licenser/groups/Lite_Free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/groups/Lite_Free", + "list": "/services/licenser/groups/Lite_Free", + "edit": "/services/licenser/groups/Lite_Free" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "is_active": false, + "stack_ids": [] + } + } + ], + "paging": { + "total": 5, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json b/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json index 41a8d026b..bffde8278 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_local_peer.json @@ -1,104 +1,115 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", + "links": {}, + "origin": "https://splunk-lm-license-manager-service.test:8089/services/licenser/localpeer", + "updated": "2023-08-01T21:36:59+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "license", + "id": "https://splunk-lm-license-manager-service.test:8089/services/licenser/localpeer/license", "updated": "1970-01-01T00:00:00+00:00", "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" + "alternate": "/services/licenser/localpeer/license", + "list": "/services/licenser/localpeer/license", + "edit": "/services/licenser/localpeer/license" }, "author": "system", "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" }, "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" + "add_ons": null, + "connection_timeout": 30, + "eai:acl": null, + "features": { + "AWSMarketplace": "DISABLED_DUE_TO_LICENSE", + "Acceleration": "ENABLED", + "AdvancedSearchCommands": "ENABLED", + "AdvancedXML": "ENABLED", + "Alerting": "ENABLED", + "AllowDuplicateKeys": "ENABLED", + "ArchiveToHdfs": "ENABLED", + "Auth": "ENABLED", + "CanBeRemoteMaster": "ENABLED", + "ConditionalLicensingEnforcement": "ENABLED", + "CustomRoles": "ENABLED", + "DeployClient": "ENABLED", + "DeployServer": "ENABLED", + "DisableQuotaEnforcement": "DISABLED_DUE_TO_LICENSE", + "DistSearch": "ENABLED", + "FwdData": "ENABLED", + "GuestPass": "ENABLED", + "HideQuotaWarnings": "DISABLED_DUE_TO_LICENSE", + "KVStore": "ENABLED", + "LDAPAuth": "ENABLED", + "LocalSearch": "ENABLED", + "MultifactorAuth": "ENABLED", + "MultisiteClustering": "ENABLED", + "NontableLookups": "ENABLED", + "RcvData": "ENABLED", + "RcvSearch": "ENABLED", + "ResetWarnings": "DISABLED_DUE_TO_LICENSE", + "RollingWindowAlerts": "ENABLED", + "SAMLAuth": "ENABLED", + "ScheduledAlerts": "ENABLED", + "ScheduledReports": "ENABLED", + "ScheduledSearch": "ENABLED", + "ScriptedAuth": "ENABLED", + "SearchheadPooling": "ENABLED", + "SigningProcessor": "ENABLED", + "SplunkWeb": "ENABLED", + "SubgroupId": "DISABLED_DUE_TO_LICENSE", + "SyslogOutputProcessor": "ENABLED", + "UnisiteClustering": "ENABLED" + }, + "guid": [ + "2A327594-08E4-48BA-A001-3EB4C1475910" + ], + "last_manager_contact_attempt_time": 1690925795, + "last_manager_contact_success_time": 1690925795, + "last_master_contact_attempt_time": 1690925795, + "last_master_contact_success_time": 1690925795, + "last_trackerdb_service_time": 0, + "license_keys": [ + "455A1EC0719F74DB8B13CD2FDA81D930A55EF21366AC9A3D096E2545B6D5D987" + ], + "manager_guid": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "manager_uri": "self", + "master_guid": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "master_uri": "self", + "peer_id": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "peer_label": "splunk-lm-license-manager-0", + "receive_timeout": 30, + "send_timeout": 30, + "slave_id": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "slave_label": "splunk-lm-license-manager-0", + "squash_threshold": 2000 } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] -} \ No newline at end of file + } + ], + "paging": { + "total": 1, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] + } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_message.json b/pkg/gateway/splunk/license-manager/fixture/license_message.json index 41a8d026b..6763913fa 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_message.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_message.json @@ -1,104 +1,16 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", - "updated": "1970-01-01T00:00:00+00:00", - "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" - }, - "author": "system", - "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" - }, - "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" - } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] + "links": {}, + "origin": "https://localhost:8089/services/licenser/messages", + "updated": "2023-08-01T21:48:06+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [], + "paging": { + "total": 0, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_peers.json b/pkg/gateway/splunk/license-manager/fixture/license_peers.json index 41a8d026b..a9c736894 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_peers.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_peers.json @@ -1,104 +1,65 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", - "updated": "1970-01-01T00:00:00+00:00", - "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" - }, - "author": "system", - "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" - }, - "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" - } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] + "links": {}, + "origin": "https://localhost:8089/services/licenser/peers", + "updated": "2023-08-01T21:48:40+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/peers/A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/peers/A7E343C4-26D4-47A8-86F4-56B3CAD86721", + "list": "/servicesNS/nobody/system/licenser/peers/A7E343C4-26D4-47A8-86F4-56B3CAD86721" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "active_pool_ids": [ + "auto_generated_pool_enterprise" + ], + "eai:acl": null, + "label": "splunk-lm-license-manager-0", + "pool_ids": [ + "auto_generated_pool_enterprise", + "auto_generated_pool_forwarder", + "auto_generated_pool_free" + ], + "pool_suggestion": null, + "stack_ids": [ + "enterprise", + "forwarder", + "free" + ], + "warning_count": 0 + } + } + ], + "paging": { + "total": 1, + "perPage": 0, + "offset": 0 + }, + "messages": [] } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_pools.json b/pkg/gateway/splunk/license-manager/fixture/license_pools.json index 41a8d026b..8f746870c 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_pools.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_pools.json @@ -1,104 +1,156 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", - "updated": "1970-01-01T00:00:00+00:00", - "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" - }, - "author": "system", - "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" - }, - "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" - } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] + "links": { + "create": "/services/licenser/pools/_new", + "_reload": "/services/licenser/pools/_reload", + "_acl": "/services/licenser/pools/_acl" + }, + "origin": "https://localhost:8089/services/licenser/pools", + "updated": "2023-08-01T21:49:16+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "auto_generated_pool_enterprise", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "list": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "_reload": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise/_reload", + "edit": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise", + "remove": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_enterprise" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "description": "auto_generated_pool_enterprise", + "eai:acl": null, + "effective_quota": 53687091200, + "is_unlimited": false, + "peers": [], + "peers_usage_bytes": null, + "quota": "MAX", + "slaves": [], + "slaves_usage_bytes": null, + "stack_id": "enterprise", + "used_bytes": 0 + } + }, + { + "name": "auto_generated_pool_forwarder", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "list": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "_reload": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder/_reload", + "edit": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder", + "remove": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_forwarder" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "description": "auto_generated_pool_forwarder", + "eai:acl": null, + "effective_quota": 1048576, + "is_unlimited": false, + "peers": [], + "peers_usage_bytes": null, + "quota": "MAX", + "slaves": [], + "slaves_usage_bytes": null, + "stack_id": "forwarder", + "used_bytes": 0 + } + }, + { + "name": "auto_generated_pool_free", + "id": "https://localhost:8089/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "list": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "_reload": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free/_reload", + "edit": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free", + "remove": "/servicesNS/nobody/system/licenser/pools/auto_generated_pool_free" + }, + "author": "nobody", + "acl": { + "app": "system", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "nobody", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "description": "auto_generated_pool_free", + "eai:acl": null, + "effective_quota": 524288000, + "is_unlimited": false, + "peers": [], + "peers_usage_bytes": null, + "quota": "MAX", + "slaves": [], + "slaves_usage_bytes": null, + "stack_id": "free", + "used_bytes": 0 + } + } + ], + "paging": { + "total": 3, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_stack.json b/pkg/gateway/splunk/license-manager/fixture/license_stack.json index 41a8d026b..9d88adc99 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_stack.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_stack.json @@ -1,104 +1,137 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", - "updated": "1970-01-01T00:00:00+00:00", - "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" - }, - "author": "system", - "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" - }, - "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" - } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] + "links": {}, + "origin": "https://localhost:8089/services/licenser/stacks", + "updated": "2023-08-01T21:50:11+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "enterprise", + "id": "https://localhost:8089/services/licenser/stacks/enterprise", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/stacks/enterprise", + "list": "/services/licenser/stacks/enterprise" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "cle_active": 1, + "eai:acl": null, + "is_unlimited": false, + "label": "Splunk Internal License DO NOT DISTRIBUTE", + "max_retention_size": 0, + "max_violations": 45, + "quota": 53687091200, + "type": "enterprise", + "window_period": 60 + } + }, + { + "name": "forwarder", + "id": "https://localhost:8089/services/licenser/stacks/forwarder", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/stacks/forwarder", + "list": "/services/licenser/stacks/forwarder" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "cle_active": 0, + "eai:acl": null, + "is_unlimited": false, + "label": "Splunk Forwarder", + "max_retention_size": 0, + "max_violations": 5, + "quota": 1048576, + "type": "forwarder", + "window_period": 30 + } + }, + { + "name": "free", + "id": "https://localhost:8089/services/licenser/stacks/free", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/stacks/free", + "list": "/services/licenser/stacks/free" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "admin", + "splunk-system-role" + ], + "write": [ + "admin", + "splunk-system-role" + ] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "cle_active": 0, + "eai:acl": null, + "is_unlimited": false, + "label": "Splunk Free", + "max_retention_size": 0, + "max_violations": 3, + "quota": 524288000, + "type": "free", + "window_period": 30 + } + } + ], + "paging": { + "total": 3, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/fixture/license_usage.json b/pkg/gateway/splunk/license-manager/fixture/license_usage.json index 41a8d026b..08c64b857 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_usage.json +++ b/pkg/gateway/splunk/license-manager/fixture/license_usage.json @@ -1,104 +1,48 @@ { - "links": {}, - "origin": "https://localhost:8089/services/cluster/manager/info", - "updated": "2022-07-18T23:54:50+00:00", - "generator": { - "build": "6818ac46f2ec", - "version": "9.0.0" - }, - "entry": [ - { - "name": "master", - "id": "https://localhost:8089/services/cluster/manager/info/master", - "updated": "1970-01-01T00:00:00+00:00", - "links": { - "alternate": "/services/cluster/manager/info/master", - "list": "/services/cluster/manager/info/master" - }, - "author": "system", - "acl": { - "app": "", - "can_list": true, - "can_write": true, - "modifiable": false, - "owner": "system", - "perms": { - "read": [ - "admin", - "splunk-system-role" - ], - "write": [ - "admin", - "splunk-system-role" - ] - }, - "removable": false, - "sharing": "system" - }, - "content": { - "active_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "apply_bundle_status": { - "invalid_bundle": { - "bundle_path": "", - "bundle_validation_errors_on_master": [], - "checksum": "", - "timestamp": 0 - }, - "reload_bundle_issued": false, - "status": "None" - }, - "available_sites": "[site1, site2]", - "backup_and_restore_primaries": false, - "controlled_rolling_restart_flag": false, - "eai:acl": null, - "forwarder_site_failover": "", - "indexing_ready_flag": true, - "initialized_flag": true, - "label": "splunk-cm-cluster-master-0", - "last_check_restart_bundle_result": false, - "last_dry_run_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "last_validated_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "is_valid_bundle": true, - "timestamp": 1657658326 - }, - "latest_bundle": { - "bundle_path": "/opt/splunk/var/run/splunk/cluster/remote-bundle/aabbb9c25a79c081cbd0b1aaf1c2425a-1657658326.bundle", - "checksum": "7351975980A20311463444E66492BDD5", - "timestamp": 1657658326 - }, - "maintenance_mode": false, - "multisite": true, - "previous_active_bundle": { - "bundle_path": "", - "checksum": "", - "timestamp": 0 - }, - "primaries_backup_status": "No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.", - "quiet_period_flag": false, - "rolling_restart_flag": false, - "rolling_restart_or_upgrade": false, - "service_ready_flag": true, - "site_replication_factor": "{ origin:1, total:2 }", - "site_search_factor": "{ origin:1, total:2 }", - "start_time": 1657658831, - "summary_replication": "false" - } - } - ], - "paging": { - "total": 1, - "perPage": 30, - "offset": 0 - }, - "messages": [] + "links": {}, + "origin": "https://localhost:8089/services/licenser/usage", + "updated": "2023-08-01T21:50:49+00:00", + "generator": { + "build": "e9494146ae5c", + "version": "9.0.5" + }, + "entry": [ + { + "name": "license_usage", + "id": "https://localhost:8089/services/licenser/usage/license_usage", + "updated": "1970-01-01T00:00:00+00:00", + "links": { + "alternate": "/services/licenser/usage/license_usage", + "list": "/services/licenser/usage/license_usage" + }, + "author": "system", + "acl": { + "app": "", + "can_list": true, + "can_write": true, + "modifiable": false, + "owner": "system", + "perms": { + "read": [ + "*" + ], + "write": [] + }, + "removable": false, + "sharing": "system" + }, + "content": { + "eai:acl": null, + "peers_usage_bytes": 0, + "quota": 53687091200, + "slaves_usage_bytes": 0 + } + } + ], + "paging": { + "total": 1, + "perPage": 10000000, + "offset": 0 + }, + "messages": [] } \ No newline at end of file diff --git a/pkg/gateway/splunk/license-manager/implementation/license_impl.go b/pkg/gateway/splunk/license-manager/implementation/license_impl.go index cadcd281f..2018cbe56 100644 --- a/pkg/gateway/splunk/license-manager/implementation/license_impl.go +++ b/pkg/gateway/splunk/license-manager/implementation/license_impl.go @@ -2,6 +2,7 @@ package impl import ( "context" + "encoding/json" "net/http" "github.com/go-logr/logr" @@ -53,7 +54,15 @@ func (p *splunkGateway) GetLicenseGroup(ctx context.Context) (*[]licensemodel.Li contentList := []licensemodel.LicenseGroup{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseGroup) + var content licensemodel.LicenseGroup + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -86,7 +95,15 @@ func (p *splunkGateway) GetLicense(ctx context.Context) (*[]licensemodel.License contentList := []licensemodel.License{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.License) + var content licensemodel.License + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -152,7 +169,15 @@ func (p *splunkGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel. contentList := []licensemodel.LicenseMessage{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseMessage) + var content licensemodel.LicenseMessage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -185,7 +210,15 @@ func (p *splunkGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.Li contentList := []licensemodel.LicensePool{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicensePool) + var content licensemodel.LicensePool + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -218,7 +251,15 @@ func (p *splunkGateway) GetLicensePeers(context context.Context) (*[]licensemode contentList := []licensemodel.LicensePeer{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicensePeer) + var content licensemodel.LicensePeer + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -251,7 +292,15 @@ func (p *splunkGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.Li contentList := []licensemodel.LicenseUsage{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseUsage) + var content licensemodel.LicenseUsage + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil @@ -284,7 +333,15 @@ func (p *splunkGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel.L contentList := []licensemodel.LicenseStack{} for _, entry := range envelop.Entry { - content := entry.Content.(licensemodel.LicenseStack) + var content licensemodel.LicenseStack + s, err := json.Marshal(entry.Content) + if err != nil { + return &contentList, nil + } + err = json.Unmarshal([]byte(s), &content) + if err != nil { + return &contentList, nil + } contentList = append(contentList, content) } return &contentList, nil diff --git a/pkg/gateway/splunk/model/services/license/license_types.go b/pkg/gateway/splunk/model/services/license/license_types.go index 05c0c650f..0870e8431 100644 --- a/pkg/gateway/splunk/model/services/license/license_types.go +++ b/pkg/gateway/splunk/model/services/license/license_types.go @@ -5,6 +5,8 @@ package license // A licenser group contains one or more licenser stacks that can operate concurrently. // Only one licenser group is active at any given time. type LicenseGroup struct { + IsActive string `json:"is_active,omitempty"` + StackIds []string `json:"stack_ids,omitempty"` } // https://:/services/licenser/licenses @@ -12,11 +14,94 @@ type LicenseGroup struct { // A license enables various features for a Splunk instance, including but not limited // to indexing quota, auth, search, forwarding. type License struct { + AddOns string `json:"add_ons,omitempty"` + AllowedRoles []string `json:"allowedRoles,omitempty"` + AssignableRoles []string `json:"assignableRoles,omitempty"` + CreationTime uint `json:"creation_time,omitempty"` + DisabledFeatures []string `json:"disabled_features,omitempty"` + ExpirationTime int `json:"expiration_time,omitempty"` + Features []string `json:"features,omitempty"` + GroupId string `json:"group_id,omitempty"` + Guid string `json:"guid,omitempty"` + IsUnlimited bool `json:"is_unlimited,omitempty"` + Label string `json:"label,omitempty"` + LicenseHash string `json:"license_hash,omitempty"` + MaxRetentionSize int `json:"max_retention_size,omitempty"` + MaxStackQuota float64 `json:"max_stack_quota,omitempty"` + MaxUsers int `json:"max_users,omitempty"` + MaxViolation int `json:"max_violations,omitempty"` + Notes string `json:"notes,omitempty"` + Quota int `json:"quota,omitempty"` + RelativeExpirationInterval int `json:"relative_expiration_interval,omitempty"` + RelativeExpirationStart int `json:"relative_expiration_start,omitempty"` + SourceTypes []string `json:"sourcetypes,omitempty"` + StackId string `json:"stack_id,omitempty"` + Status string `json:"status,omitempty"` + SubGroupId string `json:"subgroup_id,omitempty"` + Type string `json:"type,omitempty"` + WindowPeriod int `json:"window_period,omitempty"` +} + +type Features struct { + AWSMarketPlace string `json:"AWSMarketplace,omitempty"` + Acceleration string `json:"Acceleration,omitempty"` + AdvancedSearchCommands string `json:"AdvancedSearchCommands,omitempty"` + AdvanceXML string `json:"AdvancedXML,omitempty"` + Alerting string `json:"Alerting,omitempty"` + AllowDuplicateKeys string `json:"AllowDuplicateKeys,omitempty"` + ArchiveToHdfs string `json:"ArchiveToHdfs,omitempty"` + Auth string `json:"Auth,omitempty"` + CanBeRemoteMaster string `json:"CanBeRemoteMaster,omitempty"` + ConditionalLicensingEnforcement string `json:"ConditionalLicensingEnforcement,omitempty"` + CustomRoles string `json:"CustomRoles,omitempty"` + DeployClient string `json:"DeployClient,omitempty"` + DeployServer string `json:"DeployServer,omitempty"` + DisableQuotaEnforcement string `json:"DisableQuotaEnforcement,omitempty"` + DistSearch string `json:"DistSearch,omitempty"` + FwdData string `json:"FwdData,omitempty"` + GuestPass string `json:"GuestPass,omitempty"` + HideQuotaWarning string `json:"HideQuotaWarnings"` + KVStore string `json:"KVStore,omitempty"` + LDAPAuth string `json:"LDAPAuth,omitempty"` + LocalSearch string `json:"LocalSearch,omitempty"` + MultifactorAuth string `json:"MultifactorAuth,omitempty"` + MultisiteClustering string `json:"MultisiteClustering,omitempty"` + NontableLookups string `json:"NontableLookups,omitempty"` + RcvData string `json:"RcvData,omitempty"` + RcvSearch string `json:"RcvSearch,omitempty"` + ResetWarning string `json:"ResetWarnings,omitempty"` + RollingWindowAlert string `json:"RollingWindowAlerts,omitempty"` + SAMLAuth string `json:"SAMLAuth,omitempty"` + ScheduledAlert string `json:"ScheduledAlerts,omitempty"` + ScheduledReports string `json:"ScheduledReports,omitempty"` + ScheduledSearch string `json:"ScheduledSearch,omitempty"` + ScriptedAuth string `json:"ScriptedAuth,omitempty"` + SearchheadPooling string `json:"SearchheadPooling,omitempty"` + SigningProcessor string `json:"SigningProcessor,omitempty"` + SplunkWeb string `json:"SplunkWeb,omitempty"` + SubgroupId string `json:"SubgroupId,omitempty"` + SyslogOutputProcessor string `json:"SyslogOutputProcessor,omitempty"` + UnisiteClustring string `json:"UnisiteClustering,omitempty"` } // https://:/services/licenser/localpeer // Get license state information for the Splunk instance. type LicenseLocalPeer struct { + AddOns string `json:"add_ons,omitempty"` + ConnectionTimeout int `json:"connection_timeout,omitempty"` + Features Features `json:"features,omitempty"` + Guid []string `json:"guid"` + LastManagerContactAttemptTime int `json:"last_manager_contact_attempt_time,omitempty"` + LastManagerContactSuccessTime int `json:"last_manager_contact_success_time,omitempty"` + LastTrackDBServiceTime int `json:"last_trackerdb_service_time,omitempty"` + LicenseKeys []string `json:"license_keys,omitempty"` + ManagerGuid string `json:"manager_guid,omitempty"` + ManagerUri string `json:"manager_uri,omitempty"` + PeerId string `json:"peer_id,omitempty"` + PeerLabel string `json:"peer_label,omitempty"` + ReceiveTimeout int `json:"receive_timeout,omitempty"` + SendTimeout int `json:"send_timeout,omitempty"` + SquashThreshold int `json:"squash_threshold,omitempty"` } // https://:/services/licenser/messages @@ -24,6 +109,7 @@ type LicenseLocalPeer struct { // Messages may range from helpful warnings about being close to violations, licenses // expiring or more severe alerts regarding overages and exceeding license warning window. type LicenseMessage struct { + Messages []string `json:"messages,omitempty"` } // https://:/services/licenser/pools @@ -36,6 +122,12 @@ type LicensePool struct { // https://:/services/licenser/peers // Access license peer instances. type LicensePeer struct { + ActivePoolIds []string `json:"active_pool_ids,omitempty"` + Label string `json:"splunk-lm-license-manager-0,omitempty"` + PoolIds []string `json:"pool_ids,omitempty"` + PoolSuggestion string `json:"pool_suggestion,omitempty"` + StackIds []string `json:"stack_ids,omitempty"` + WarningCount string `json:"warning_count,omitempty"` } // https://:/services/licenser/stacks @@ -44,10 +136,21 @@ type LicensePeer struct { // The daily indexing quota of a license stack is additive, so a stack represents // the aggregate entitlement for a collection of licenses. type LicenseStack struct { + CleActive int `json:"cle_active,omitempty"` + IsUnlimited bool `json:"is_unlimited,omitempty"` + Label string `json:"label,omitempty"` + MaxRetentionSize int `json:"max_retention_size,omitempty"` + MaxViolations int `json:"max_violations,omitempty"` + Quota int `json:"quota,omitempty"` + Type string `json:"type,omitempty"` + WindowPeriod int `json:"window_period,omitempty"` } // LicenseUsage // https://:/services/licenser/usage // Get current license usage stats from the last minute. type LicenseUsage struct { + PeerUsageBytes int `json:"peers_usage_bytes,omitempty"` + Quota int `json:"quota,omitempty"` + SlavesUsageBytes int `json:"slaves_usage_bytes,omitempty"` } diff --git a/pkg/provisioner/splunk/implementation/license.go b/pkg/provisioner/splunk/implementation/license.go index b41972579..cad54bff5 100644 --- a/pkg/provisioner/splunk/implementation/license.go +++ b/pkg/provisioner/splunk/implementation/license.go @@ -6,7 +6,8 @@ import ( licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" provmodel "github.com/splunk/splunk-operator/pkg/provisioner/splunk/model" - //"k8s.io/apimachinery/pkg/api/meta" + + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -20,6 +21,16 @@ var callLicenseLocalPeer = func(ctx context.Context, p *splunkProvisioner) (*[]l return lminfo, err } +var callLicense = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.License, error) { + lminfo, err := p.licensegateway.GetLicense(ctx) + if err != nil { + return nil, err + } else if lminfo == nil { + return nil, fmt.Errorf("license data is empty") + } + return lminfo, err +} + // GetClusterManagerStatus Access cluster node configuration details. func (p *splunkProvisioner) GetLicenseLocalPeer(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { _, err = callLicenseLocalPeer(ctx, p) @@ -47,3 +58,30 @@ func (p *splunkProvisioner) GetLicenseLocalPeer(ctx context.Context, conditions }*/ return result, err } + +// GetClusterManagerStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetLicense(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { + _, err = callLicense(ctx, p) + lslistptr, err := callLicense(ctx, p) + if err != nil { + return result, err + } else { + lslist := *lslistptr + for _, peer := range lslist { + condition := metav1.Condition{ + Type: peer.GroupId, + Message: fmt.Sprintf("%s license %s is %s ", peer.Type, peer.Guid, peer.Status), + Reason: peer.SubGroupId, + } + if peer.Status == "VALID" { + condition.Status = metav1.ConditionTrue + } else { + condition.Status = metav1.ConditionFalse + + } + // set condition to existing conditions list + meta.SetStatusCondition(conditions, condition) + } + } + return result, err +} diff --git a/pkg/provisioner/splunk/implementation/license_test.go b/pkg/provisioner/splunk/implementation/license_test.go new file mode 100644 index 000000000..006754588 --- /dev/null +++ b/pkg/provisioner/splunk/implementation/license_test.go @@ -0,0 +1,49 @@ +package impl + +import ( + "context" + "testing" + + //splunkmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model" + //licensemodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/license" + //provisioner "github.com/splunk/splunk-operator/pkg/provisioner/splunk" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetGetLicense(t *testing.T) { + /*callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.License, error) { + healthData := []licensemodel.ClusterManagerHealthContent{} + return &healthData, nil + }*/ + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.GetLicense(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } + if conditions == nil || len(*conditions) == 0 { + t.Errorf("fixture: error in conditions for lm %v", err) + } +} + +func TestGetLicenseLocalPeer(t *testing.T) { + /*callGetClusterManagerHealth = func(ctx context.Context, p *splunkProvisioner) (*[]licensemodel.LicenseLocalPeer, error) { + healthData := []licensemodel.ClusterManagerHealthContent{} + return &healthData, nil + }*/ + provisioner := setCreds(t) + conditions := &[]metav1.Condition{} + + ctx := context.TODO() + + _, err := provisioner.GetLicenseLocalPeer(ctx, conditions) + if err != nil { + t.Errorf("fixture: error in set cluster manager %v", err) + } + if conditions == nil || len(*conditions) == 0 { + t.Errorf("fixture: error in conditions for lm %v", err) + } +} diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index 8884500df..a2d9b78a6 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -29,4 +29,9 @@ type Provisioner interface { // IsClusterInMaintenanceMode IsClusterInMaintenanceMode(ctx context.Context) (bool, error) + + // GetLicenseLocalPeer + GetLicenseLocalPeer(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) + + GetLicense(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) } diff --git a/pkg/splunk/enterprise/clustermanager_test.go b/pkg/splunk/enterprise/clustermanager_test.go index 197c774fc..c605eb597 100644 --- a/pkg/splunk/enterprise/clustermanager_test.go +++ b/pkg/splunk/enterprise/clustermanager_test.go @@ -144,9 +144,8 @@ func TestApplyClusterManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), cr.(*enterpriseApi.ClusterManager).Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) - //_, err := ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterManager", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -170,7 +169,7 @@ func TestApplyClusterManager(t *testing.T) { c := spltest.NewMockClient() _ = errors.New(splcommon.Rerr) - manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") @@ -267,7 +266,7 @@ func TestApplyClusterManager(t *testing.T) { c.Create(ctx, &cmap) current.Spec.SmartStore.VolList[0].SecretRef = "" current.Spec.SmartStore.Defaults.IndexAndGlobalCommonSpec.VolName = "msos_s2s3_vol" - manager = setCreds(t, c, ¤t) + manager = setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, c, ¤t) if err != nil { t.Errorf("Don't expected error here") @@ -642,7 +641,7 @@ func TestApplyClusterManagerWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), ) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), current.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, c, cr.(*enterpriseApi.ClusterManager)) return err } @@ -897,7 +896,7 @@ func TestAppFrameworkApplyClusterManagerShouldNotFail(t *testing.T) { t.Errorf(err.Error()) } - manager := setCreds(t, client, &cm, cm.Spec.CommonSplunkSpec) + manager := setCreds(t, client, &cm, cm.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") @@ -993,7 +992,7 @@ func TestApplyCLusterManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - manager := setCreds(t, c, &cm, cm.Spec.CommonSplunkSpec) + manager := setCreds(t, c, &cm, cm.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, c, &cm) if err != nil { t.Errorf("ApplyClusterManager should not have returned error here.") @@ -1450,7 +1449,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } err := client.Create(ctx, &lm) - _, err = ApplyLicenseManager(ctx, client, &lm) + manager := setCreds(t, client, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, client, &lm) if err != nil { t.Errorf("applyLicenseManager should not have returned error; err=%v", err) } @@ -1482,7 +1482,7 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { } err = client.Create(ctx, &cm) - manager := setCreds(t, client, &cm) + manager = setCreds(t, client, &lm, lm.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) @@ -1490,7 +1490,8 @@ func TestIsClusterManagerReadyForUpgrade(t *testing.T) { cm.Spec.Image = "splunk2" lm.Spec.Image = "splunk2" - _, err = ApplyLicenseManager(ctx, client, &lm) + manager = setCreds(t, client, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, client, &lm) clusterManager := &enterpriseApi.ClusterManager{} namespacedName := types.NamespacedName{ @@ -1557,7 +1558,8 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { // Create the instances client.Create(ctx, lm) - _, err := ApplyLicenseManager(ctx, client, lm) + manager := setCreds(t, client, lm, lm.Spec.CommonSplunkSpec) + _, err := manager.ApplyLicenseManager(ctx, client, lm) if err != nil { t.Errorf("applyLicenseManager should not have returned error; err=%v", err) } @@ -1568,7 +1570,7 @@ func TestChangeClusterManagerAnnotations(t *testing.T) { debug.PrintStack() } client.Create(ctx, cm) - manager := setCreds(t, client, cm) + manager = setCreds(t, client, cm, cm.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, client, cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) @@ -1710,7 +1712,7 @@ func TestClusterManagerWitReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, clustermanager) - manager := setCreds(t, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for clustermanager with app framework %v", err) diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index 32e5f0d4a..312f87c9d 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -232,7 +232,7 @@ func TestSmartstoreApplyClusterManagerFailsOnInvalidSmartStoreConfig(t *testing. var client splcommon.ControllerClient - manager := setCreds(t, client, &cr) + manager := setCreds(t, client, &cr, cr.Spec.CommonSplunkSpec) _, err := manager.ApplyClusterManager(context.Background(), client, &cr) if err == nil { t.Errorf("ApplyClusterManager should fail on invalid smartstore config") diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index d8d0ef47d..0ccb98cd4 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1636,7 +1636,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { } // call reconciliation - manager := setCreds(t, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index a5b144808..7bbe1de88 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -175,15 +175,15 @@ func (p *splunkManager) ApplyLicenseManager(ctx context.Context, client splcommo result = *finalResult // trigger ClusterManager reconcile by changing the splunk/image-tag annotation - err = changeClusterManagerAnnotations(ctx, client, cr) - if err != nil { - return result, err - } + //err = changeClusterManagerAnnotations(ctx, client, cr) + //if err != nil { + // return result, err + //} // Verification of splunk instance update CR status // We are using Conditions to update status information provResult := provmodel.Result{} - provResult, err = p.provisioner.GetClusterManagerStatus(ctx, &cr.Status.Conditions) + provResult, err = p.provisioner.GetLicense(ctx, &cr.Status.Conditions) if err != nil { cr.Status.ErrorMessage = provResult.ErrorMessage } diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index befe2f5c5..f43d5d285 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -88,7 +88,7 @@ func TestApplyLicenseManager(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.LicenseManager), cr.(*enterpriseApi.LicenseManager).Spec.CommonSplunkSpec) _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return err } @@ -99,7 +99,7 @@ func TestApplyLicenseManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), cr.(*enterpriseApi.ClusterManager).Spec.CommonSplunkSpec) _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return true, err } @@ -109,7 +109,7 @@ func TestApplyLicenseManager(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() current.Spec.LivenessInitialDelaySeconds = -1 - manager := setCreds(t, c, current.(*enterpriseApi.ClusterManager)) + manager := setCreds(t, c, ¤t, current.Spec.CommonSplunkSpec) _, err := manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") @@ -118,7 +118,7 @@ func TestApplyLicenseManager(t *testing.T) { rerr := errors.New(splcommon.Rerr) current.Spec.LivenessInitialDelaySeconds = 5 c.InduceErrorKind[splcommon.MockClientInduceErrorGet] = rerr - _, err = ApplyLicenseManager(ctx, c, ¤t) + _, err = manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -130,7 +130,7 @@ func TestApplyLicenseManager(t *testing.T) { } c.Create(ctx, nsSec) c.InduceErrorKind[splcommon.MockClientInduceErrorCreate] = rerr - _, err = ApplyLicenseManager(ctx, c, ¤t) + _, err = manager.ApplyLicenseManager(ctx, c, ¤t) if err == nil { t.Errorf("Expected error") } @@ -253,7 +253,8 @@ func TestAppFrameworkApplyLicenseManagerShouldNotFail(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyLicenseManager(ctx, client, &cr) + manager := setCreds(t, client, &cr, cr.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, client, &cr) if err != nil { t.Errorf("ApplyLicenseManager should be successful") @@ -683,7 +684,8 @@ func TestApplyLicenseManagerDeletion(t *testing.T) { t.Errorf("Unable to create download directory for apps :%s", splcommon.AppDownloadVolume) } - _, err = ApplyLicenseManager(ctx, c, &lm) + manager := setCreds(t, c, &lm, lm.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, &lm) if err != nil { t.Errorf("ApplyLicenseManager should not have returned error here.") } @@ -917,7 +919,7 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - manager := setCreds(t, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) @@ -1069,7 +1071,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { // simulate create clustermanager instance before reconcilation c.Create(ctx, licensemanager) - _, err = ApplyLicenseManager(ctx, c, licensemanager) + manager = setCreds(t, c, licensemanager, licensemanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, licensemanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for indexer cluster %v", err) debug.PrintStack() @@ -1106,7 +1109,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyLicenseManager(ctx, c, licensemanager) + manager = setCreds(t, c, licensemanager, licensemanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, licensemanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) debug.PrintStack() @@ -1221,7 +1225,8 @@ func TestLicenseManagerWithReadyState(t *testing.T) { } // call reconciliation - _, err = ApplyLicenseManager(ctx, c, licensemanager) + manager = setCreds(t, c, licensemanager, licensemanager.Spec.CommonSplunkSpec) + _, err = manager.ApplyLicenseManager(ctx, c, licensemanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for license manager with app framework %v", err) debug.PrintStack() diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go index 9daedfca4..0d27155d8 100644 --- a/pkg/splunk/enterprise/licensemaster_test.go +++ b/pkg/splunk/enterprise/licensemaster_test.go @@ -924,7 +924,7 @@ func TestLicenseMasterWithReadyState(t *testing.T) { } // call reconciliation - manager := setCreds(t, c, clustermanager) + manager := setCreds(t, c, clustermanager, clustermanager.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, c, clustermanager) if err != nil { t.Errorf("Unexpected error while running reconciliation for cluster manager with app framework %v", err) diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index f9a0d1a91..a0f52f463 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -3172,7 +3172,7 @@ func TestGetCurrentImage(t *testing.T) { utilruntime.Must(enterpriseApi.AddToScheme(clientgoscheme.Scheme)) err := client.Create(ctx, ¤t) - manager := setCreds(t, client, ¤t) + manager := setCreds(t, client, ¤t, current.Spec.CommonSplunkSpec) _, err = manager.ApplyClusterManager(ctx, client, ¤t) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) From 0b0989a6a8caf9156c25c1628f2f6bbc38a3bec4 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Wed, 2 Aug 2023 09:57:14 -0700 Subject: [PATCH 80/85] working license manager verification code after PR review Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- api/v4/clustermanager_types.go | 2 +- pkg/splunk/enterprise/licensemanager.go | 8 ++++---- pkg/splunk/enterprise/licensemanager_test.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/v4/clustermanager_types.go b/api/v4/clustermanager_types.go index 006b5c3e3..db2f485df 100644 --- a/api/v4/clustermanager_types.go +++ b/api/v4/clustermanager_types.go @@ -131,7 +131,7 @@ func (cmstr *ClusterManager) NewEvent(eventType, reason, message string) corev1. Namespace: cmstr.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "SearchHeadCluster", + Kind: "ClusterManager", Namespace: cmstr.GetNamespace(), Name: cmstr.GetName(), UID: cmstr.GetUID(), diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index 7bbe1de88..bfea285ee 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -175,10 +175,10 @@ func (p *splunkManager) ApplyLicenseManager(ctx context.Context, client splcommo result = *finalResult // trigger ClusterManager reconcile by changing the splunk/image-tag annotation - //err = changeClusterManagerAnnotations(ctx, client, cr) - //if err != nil { - // return result, err - //} + err = changeClusterManagerAnnotations(ctx, client, cr) + if err != nil { + return result, err + } // Verification of splunk instance update CR status // We are using Conditions to update status information diff --git a/pkg/splunk/enterprise/licensemanager_test.go b/pkg/splunk/enterprise/licensemanager_test.go index f43d5d285..0f0fcae99 100644 --- a/pkg/splunk/enterprise/licensemanager_test.go +++ b/pkg/splunk/enterprise/licensemanager_test.go @@ -99,7 +99,7 @@ func TestApplyLicenseManager(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - manager := setCreds(t, c, cr.(*enterpriseApi.ClusterManager), cr.(*enterpriseApi.ClusterManager).Spec.CommonSplunkSpec) + manager := setCreds(t, c, cr.(*enterpriseApi.LicenseManager), cr.(*enterpriseApi.LicenseManager).Spec.CommonSplunkSpec) _, err := manager.ApplyLicenseManager(context.Background(), c, cr.(*enterpriseApi.LicenseManager)) return true, err } From 74937ecec05137febf9d340626a1c3565d7704c2 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Wed, 23 Aug 2023 12:05:56 -0700 Subject: [PATCH 81/85] fixed some test cases Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- pkg/splunk/enterprise/monitoringconsole_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index e72750ec1..cee5ed297 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -1129,7 +1129,8 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { } err := client.Create(ctx, &cm) - _, err = ApplyClusterManager(ctx, client, &cm) + manager := setCreds(t, client, &cm, cm.Spec.CommonSplunkSpec) + _, err = manager.ApplyClusterManager(ctx, client, &cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } @@ -1161,6 +1162,9 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { } err = client.Create(ctx, &mc) + if err != nil { + t.Errorf("applyMonitoringConsole create mc failed error; err=%v", err) + } _, err = ApplyMonitoringConsole(ctx, client, &mc) if err != nil { t.Errorf("applyMonitoringConsole should not have returned error; err=%v", err) @@ -1168,7 +1172,7 @@ func TestIsMonitoringConsoleReadyForUpgrade(t *testing.T) { mc.Spec.Image = "splunk2" cm.Spec.Image = "splunk2" - _, err = ApplyClusterManager(ctx, client, &cm) + _, err = manager.ApplyClusterManager(ctx, client, &cm) monitoringConsole := &enterpriseApi.MonitoringConsole{} namespacedName := types.NamespacedName{ @@ -1235,7 +1239,8 @@ func TestChangeMonitoringConsoleAnnotations(t *testing.T) { // Create the instances client.Create(ctx, cm) - _, err := ApplyClusterManager(ctx, client, cm) + manager := setCreds(t, client, cm, cm.Spec.CommonSplunkSpec) + _, err := manager.ApplyClusterManager(ctx, client, cm) if err != nil { t.Errorf("applyClusterManager should not have returned error; err=%v", err) } From fe0cce890053c2c44c76f99b02e1158892b1b22c Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Mon, 11 Sep 2023 15:21:44 -0700 Subject: [PATCH 82/85] upgrading go 1.21 Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- Dockerfile | 2 +- go.mod | 10 +++++----- go.sum | 8 ++++++++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index b56602ab7..ea326a619 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.19 as builder +FROM golang:1.21 as builder WORKDIR /workspace # Copy the Go Modules manifests diff --git a/go.mod b/go.mod index 82b42825c..0b7903ca8 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/google/go-cmp v0.5.9 github.com/jinzhu/copier v0.3.5 github.com/minio/minio-go/v7 v7.0.16 - github.com/onsi/ginkgo/v2 v2.11.0 + github.com/onsi/ginkgo/v2 v2.12.0 github.com/onsi/gomega v1.27.10 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -71,13 +71,13 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/net v0.12.0 // indirect + golang.org/x/net v0.14.0 // indirect golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect - golang.org/x/sys v0.10.0 // indirect + golang.org/x/sys v0.11.0 // indirect golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.9.3 // indirect + golang.org/x/tools v0.12.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index 1519f619e..caf194af9 100644 --- a/go.sum +++ b/go.sum @@ -260,6 +260,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= +github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= @@ -437,6 +439,8 @@ golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -452,6 +456,8 @@ golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -504,6 +510,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From fed6f8a18b32fef47758971e7a0c08992f52e767 Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Tue, 12 Sep 2023 10:47:52 -0700 Subject: [PATCH 83/85] changed ioutils to os Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- .../license-manager/fixture/license_fixture.go | 18 +++++++++--------- .../implementation/license_test.go | 4 ++-- pkg/gateway/splunk/services/fixture/fixture.go | 15 ++++++++------- .../services/implementation/splunk_test.go | 11 ++++++----- .../splunk/implementation/license_test.go | 2 +- .../splunk/implementation/splunk.go | 2 -- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/gateway/splunk/license-manager/fixture/license_fixture.go b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go index d2ff5bd3b..a1b9a2940 100644 --- a/pkg/gateway/splunk/license-manager/fixture/license_fixture.go +++ b/pkg/gateway/splunk/license-manager/fixture/license_fixture.go @@ -9,7 +9,7 @@ import ( "path/filepath" //"encoding/json" - "io/ioutil" + "net/http" "github.com/go-logr/logr" @@ -94,7 +94,7 @@ func (p *fixtureGateway) GetLicenseGroup(ctx context.Context) (*[]licensemodel.L log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_group.json") + content, err := os.ReadFile(relativePath + "/license_group.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -142,7 +142,7 @@ func (p *fixtureGateway) GetLicense(ctx context.Context) (*[]licensemodel.Licens log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license.json") + content, err := os.ReadFile(relativePath + "/license.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -199,7 +199,7 @@ func (p *fixtureGateway) GetLicenseLocalPeer(ctx context.Context) (*[]licensemod log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_local_peer.json") + content, err := os.ReadFile(relativePath + "/license_local_peer.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -255,7 +255,7 @@ func (p *fixtureGateway) GetLicenseMessage(ctx context.Context) (*[]licensemodel log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_message.json") + content, err := os.ReadFile(relativePath + "/license_message.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -311,7 +311,7 @@ func (p *fixtureGateway) GetLicensePools(ctx context.Context) (*[]licensemodel.L log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_pools.json") + content, err := os.ReadFile(relativePath + "/license_pools.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -367,7 +367,7 @@ func (p *fixtureGateway) GetLicensePeers(context context.Context) (*[]licensemod log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_peers.json") + content, err := os.ReadFile(relativePath + "/license_peers.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -423,7 +423,7 @@ func (p *fixtureGateway) GetLicenseUsage(ctx context.Context) (*[]licensemodel.L log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_usage.json") + content, err := os.ReadFile(relativePath + "/license_usage.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -479,7 +479,7 @@ func (p *fixtureGateway) GetLicenseStacks(ctx context.Context) (*[]licensemodel. log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/license_stack.json") + content, err := os.ReadFile(relativePath + "/license_stack.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err diff --git a/pkg/gateway/splunk/license-manager/implementation/license_test.go b/pkg/gateway/splunk/license-manager/implementation/license_test.go index 477d91e75..a168ee220 100644 --- a/pkg/gateway/splunk/license-manager/implementation/license_test.go +++ b/pkg/gateway/splunk/license-manager/implementation/license_test.go @@ -3,6 +3,7 @@ package impl import ( "context" "fmt" + "os" "time" "github.com/go-resty/resty/v2" @@ -12,7 +13,6 @@ import ( //managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" //peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" - "io/ioutil" "testing" logz "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -61,7 +61,7 @@ func GetLicenseGroup(t *testing.T) { ctx := context.TODO() sm := setCreds(t) httpmock.ActivateNonDefault(sm.client.GetClient()) - content, err := ioutil.ReadFile("../fixture/license_group.json") + content, err := os.ReadFile("../fixture/license_group.json") if err != nil { t.Errorf("fixture: error in get cluster manager health %v", err) } diff --git a/pkg/gateway/splunk/services/fixture/fixture.go b/pkg/gateway/splunk/services/fixture/fixture.go index 36d2c7086..9b55b66a5 100644 --- a/pkg/gateway/splunk/services/fixture/fixture.go +++ b/pkg/gateway/splunk/services/fixture/fixture.go @@ -9,7 +9,7 @@ import ( "path/filepath" //"encoding/json" - "io/ioutil" + "net/http" "github.com/go-logr/logr" @@ -19,6 +19,7 @@ import ( clustermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster" managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" model "github.com/splunk/splunk-operator/pkg/splunk/model" + // peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" // searchheadmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/searchhead" // commonmodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/common" @@ -101,7 +102,7 @@ func (p *fixtureGateway) GetClusterManagerInfo(ctx context.Context) (*[]managerm log.Error(err, "fixture: unable to find path") return nil, err } - content, err := ioutil.ReadFile(relativePath + "/cluster_config.json") + content, err := os.ReadFile(relativePath + "/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -150,7 +151,7 @@ func (p *fixtureGateway) GetClusterManagerPeers(ctx context.Context) (*[]manager } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile(relativePath + "cluster_config.json") + content, err := os.ReadFile(relativePath + "cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -204,7 +205,7 @@ func (p *fixtureGateway) GetClusterManagerHealth(ctx context.Context) (*[]manage // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile(relativePath + "cluster_config.json") + content, err := os.ReadFile(relativePath + "cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -255,7 +256,7 @@ func (p *fixtureGateway) GetClusterManagerSites(ctx context.Context) (*[]manager } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile(relativePath + "/cluster_config.json") + content, err := os.ReadFile(relativePath + "/cluster_config.json") if err != nil { log.Error(err, "fixture: error in get cluster config") return nil, err @@ -304,7 +305,7 @@ func (p *fixtureGateway) GetClusterManagerStatus(ctx context.Context) (*[]manage } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile(relativePath + "/cluster_manager_status.json") + content, err := os.ReadFile(relativePath + "/cluster_manager_status.json") if err != nil { log.Error(err, "fixture: error in get cluster manager search heads") return nil, err @@ -355,7 +356,7 @@ func (p *fixtureGateway) SetClusterInMaintenanceMode(context context.Context, mo } // Read entire file content, giving us little control but // making it very simple. No need to close the file. - content, err := ioutil.ReadFile(relativePath + "/cluster_maintenance.json") + content, err := os.ReadFile(relativePath + "/cluster_maintenance.json") if err != nil { log.Error(err, "fixture: error in post cluster maintenance") return err diff --git a/pkg/gateway/splunk/services/implementation/splunk_test.go b/pkg/gateway/splunk/services/implementation/splunk_test.go index 03e1429d3..4faaed56f 100644 --- a/pkg/gateway/splunk/services/implementation/splunk_test.go +++ b/pkg/gateway/splunk/services/implementation/splunk_test.go @@ -3,6 +3,7 @@ package impl import ( "context" "fmt" + "os" "time" "github.com/go-resty/resty/v2" @@ -12,7 +13,7 @@ import ( //managermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/manager" //peermodel "github.com/splunk/splunk-operator/pkg/gateway/splunk/model/services/cluster/peer" - "io/ioutil" + "testing" logz "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -61,7 +62,7 @@ func TestGetClusterManagerHealth(t *testing.T) { ctx := context.TODO() sm := setCreds(t) httpmock.ActivateNonDefault(sm.client.GetClient()) - content, err := ioutil.ReadFile("../fixture/cluster_manager_health.json") + content, err := os.ReadFile("../fixture/cluster_manager_health.json") if err != nil { t.Errorf("fixture: error in get cluster manager health %v", err) } @@ -83,7 +84,7 @@ func TestGetClusterManagerInfo(t *testing.T) { ctx := context.TODO() sm := setCreds(t) httpmock.ActivateNonDefault(sm.client.GetClient()) - content, err := ioutil.ReadFile("../fixture/cluster_manager_info.json") + content, err := os.ReadFile("../fixture/cluster_manager_info.json") if err != nil { t.Errorf("fixture: error in get cluster manager info %v", err) } @@ -105,7 +106,7 @@ func TestGetClusterManagerPeers(t *testing.T) { ctx := context.TODO() sm := setCreds(t) httpmock.ActivateNonDefault(sm.client.GetClient()) - content, err := ioutil.ReadFile("../fixture/cluster_manager_peers.json") + content, err := os.ReadFile("../fixture/cluster_manager_peers.json") if err != nil { t.Errorf("fixture: error in get cluster manager peers %v", err) } @@ -130,7 +131,7 @@ func TestSetClusterInMaintenanceeMode(t *testing.T) { ctx := context.TODO() sm := setCreds(t) httpmock.ActivateNonDefault(sm.client.GetClient()) - content, err := ioutil.ReadFile("../fixture/cluster_maintenance.json") + content, err := os.ReadFile("../fixture/cluster_maintenance.json") if err != nil { t.Errorf("fixture: error in get cluster manager peers %v", err) } diff --git a/pkg/provisioner/splunk/implementation/license_test.go b/pkg/provisioner/splunk/implementation/license_test.go index 006754588..beac466d6 100644 --- a/pkg/provisioner/splunk/implementation/license_test.go +++ b/pkg/provisioner/splunk/implementation/license_test.go @@ -44,6 +44,6 @@ func TestGetLicenseLocalPeer(t *testing.T) { t.Errorf("fixture: error in set cluster manager %v", err) } if conditions == nil || len(*conditions) == 0 { - t.Errorf("fixture: error in conditions for lm %v", err) + t.Errorf("fixture: error in conditions for license manager %v", err) } } diff --git a/pkg/provisioner/splunk/implementation/splunk.go b/pkg/provisioner/splunk/implementation/splunk.go index d8fcbd829..ead2bea25 100644 --- a/pkg/provisioner/splunk/implementation/splunk.go +++ b/pkg/provisioner/splunk/implementation/splunk.go @@ -135,8 +135,6 @@ func (p *splunkProvisioner) GetClusterManagerStatus(ctx context.Context, conditi meta.SetStatusCondition(conditions, condition) } - // business logic starts here - //healthList, err := callGetClusterManagerHealth(ctx, p) healthList, err := callGetClusterManagerHealth(ctx, p) if err != nil { return result, err From ad0882ac6b340c4ffed83a011d3fbb853612b2de Mon Sep 17 00:00:00 2001 From: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:18:59 -0700 Subject: [PATCH 84/85] changed few formatting issues Signed-off-by: vivekr-splunk <94569031+vivekr-splunk@users.noreply.github.com> --- pkg/provisioner/splunk/implementation/license.go | 4 ++-- pkg/provisioner/splunk/implementation/license_test.go | 2 +- pkg/provisioner/splunk/provisioner.go | 2 +- pkg/splunk/enterprise/licensemanager.go | 2 +- test/licensemanager/lm_s1_test.go | 3 +-- 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/provisioner/splunk/implementation/license.go b/pkg/provisioner/splunk/implementation/license.go index cad54bff5..77a1e65e5 100644 --- a/pkg/provisioner/splunk/implementation/license.go +++ b/pkg/provisioner/splunk/implementation/license.go @@ -59,8 +59,8 @@ func (p *splunkProvisioner) GetLicenseLocalPeer(ctx context.Context, conditions return result, err } -// GetClusterManagerStatus Access cluster node configuration details. -func (p *splunkProvisioner) GetLicense(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { +// GetLicenseStatus Access cluster node configuration details. +func (p *splunkProvisioner) GetLicenseStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) { _, err = callLicense(ctx, p) lslistptr, err := callLicense(ctx, p) if err != nil { diff --git a/pkg/provisioner/splunk/implementation/license_test.go b/pkg/provisioner/splunk/implementation/license_test.go index beac466d6..b2ca49306 100644 --- a/pkg/provisioner/splunk/implementation/license_test.go +++ b/pkg/provisioner/splunk/implementation/license_test.go @@ -20,7 +20,7 @@ func TestGetGetLicense(t *testing.T) { ctx := context.TODO() - _, err := provisioner.GetLicense(ctx, conditions) + _, err := provisioner.GetLicenseStatus(ctx, conditions) if err != nil { t.Errorf("fixture: error in set cluster manager %v", err) } diff --git a/pkg/provisioner/splunk/provisioner.go b/pkg/provisioner/splunk/provisioner.go index a2d9b78a6..25b64dff5 100644 --- a/pkg/provisioner/splunk/provisioner.go +++ b/pkg/provisioner/splunk/provisioner.go @@ -33,5 +33,5 @@ type Provisioner interface { // GetLicenseLocalPeer GetLicenseLocalPeer(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) - GetLicense(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) + GetLicenseStatus(ctx context.Context, conditions *[]metav1.Condition) (result provmodel.Result, err error) } diff --git a/pkg/splunk/enterprise/licensemanager.go b/pkg/splunk/enterprise/licensemanager.go index bfea285ee..0597eb58d 100644 --- a/pkg/splunk/enterprise/licensemanager.go +++ b/pkg/splunk/enterprise/licensemanager.go @@ -183,7 +183,7 @@ func (p *splunkManager) ApplyLicenseManager(ctx context.Context, client splcommo // Verification of splunk instance update CR status // We are using Conditions to update status information provResult := provmodel.Result{} - provResult, err = p.provisioner.GetLicense(ctx, &cr.Status.Conditions) + provResult, err = p.provisioner.GetLicenseStatus(ctx, &cr.Status.Conditions) if err != nil { cr.Status.ErrorMessage = provResult.ErrorMessage } diff --git a/test/licensemanager/lm_s1_test.go b/test/licensemanager/lm_s1_test.go index 6e2f3d6ee..d85dddb04 100644 --- a/test/licensemanager/lm_s1_test.go +++ b/test/licensemanager/lm_s1_test.go @@ -58,8 +58,7 @@ var _ = Describe("Licensemanager test", func() { // Download License File downloadDir := "licenseFolder" switch testenv.ClusterProvider { - case "eks": - licenseFilePath, err := testenv.DownloadLicenseFromS3Bucket() + case "eks": licenseFilePath, err := testenv.DownloadLicenseFromS3Bucket() Expect(err).To(Succeed(), "Unable to download license file from S3") // Create License Config Map testcaseEnvInst.CreateLicenseConfigMap(licenseFilePath) From a5ae87fe0c67d0463cde1cbe3aef1624418e3b79 Mon Sep 17 00:00:00 2001 From: vivekr-splunk Date: Wed, 27 Sep 2023 13:17:41 -0700 Subject: [PATCH 85/85] adding lm to upgrade testing Signed-off-by: vivekr-splunk --- kuttl/tests/upgrade/c3-with-operator/c3_config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml index fd00ad06d..eaa4b9a9f 100644 --- a/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml +++ b/kuttl/tests/upgrade/c3-with-operator/c3_config.yaml @@ -16,6 +16,9 @@ sva: searchHeadClusters: - name: shc + licenseManager: + - name: lm + indexerCluster: enabled: true