From 37b23749ec3ee128cf6879eed26e5d0b79a6f3a2 Mon Sep 17 00:00:00 2001 From: Arjun Kondur Date: Tue, 17 Dec 2024 18:12:08 -0600 Subject: [PATCH 1/6] Initial changes to support deployer spec in SHC CRD --- api/v4/searchheadcluster_types.go | 3 + api/v4/zz_generated.deepcopy.go | 1 + ...erprise.splunk.com_searchheadclusters.yaml | 53 ++++++++++++++++ go.mod | 4 +- go.sum | 4 ++ pkg/splunk/enterprise/searchheadcluster.go | 18 ++++++ .../manager_custom_resource_crud_c3_test.go | 61 +++++++++++++++++++ test/testenv/verificationutils.go | 47 ++++++++++++++ 8 files changed, 189 insertions(+), 2 deletions(-) diff --git a/api/v4/searchheadcluster_types.go b/api/v4/searchheadcluster_types.go index 83b84fa0b..312674fcf 100644 --- a/api/v4/searchheadcluster_types.go +++ b/api/v4/searchheadcluster_types.go @@ -44,6 +44,9 @@ type SearchHeadClusterSpec struct { // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Splunk Deployer resource spec + DeployerResourceSpec corev1.ResourceRequirements `json:"deployerResourceSpec,omitempty"` } // SearchHeadClusterMemberStatus is used to track the status of each search head cluster member diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 4c10f8035..cc2c5c3b6 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -872,6 +872,7 @@ func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + in.DeployerResourceSpec.DeepCopyInto(&out.DeployerResourceSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index d4eeec141..c37ff5f0d 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -5295,6 +5295,59 @@ spec: will be installed on the CM, standalone, search head deployer or license manager instance. type: string + deployerResourceSpec: + description: Splunk Deployer resource spec + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object etcVolumeStorageConfig: description: Storage configuration for /opt/splunk/etc volume properties: diff --git a/go.mod b/go.mod index 966cf2f57..8d978b82a 100644 --- a/go.mod +++ b/go.mod @@ -13,8 +13,8 @@ require ( github.com/google/uuid v1.6.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 - github.com/onsi/ginkgo/v2 v2.21.0 - github.com/onsi/gomega v1.35.1 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index edf4e3250..51e5583b2 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index 1c514e829..8c31a3d07 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -651,6 +651,21 @@ func getSearchHeadStatefulSet(ctx context.Context, client splcommon.ControllerCl return ss, nil } +// CSPL-3652 Configure deployer resources if configured +// Use default otherwise +func setDeployerResources(cr *enterpriseApi.SearchHeadCluster, podTemplate *corev1.PodTemplateSpec) { + depRes := cr.Spec.DeployerResourceSpec + for i := range podTemplate.Spec.Containers { + if len(depRes.Requests) != 0 { + podTemplate.Spec.Containers[i].Resources.Requests = cr.Spec.DeployerResourceSpec.Requests + } + + if len(depRes.Limits) != 0 { + podTemplate.Spec.Containers[i].Resources.Limits = cr.Spec.DeployerResourceSpec.Limits + } + } +} + // getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager. func getDeployerStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (*appsv1.StatefulSet, error) { ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas)) @@ -658,6 +673,9 @@ func getDeployerStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, err } + // CSPL-3562 - Set deployer resources if configured + setDeployerResources(cr, &ss.Spec.Template) + // Setup App framework staging volume for apps setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) diff --git a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go index a86a58322..facbb8c85 100644 --- a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go @@ -158,6 +158,67 @@ var _ = Describe("Crcrud test for SVA C3", func() { }) }) + Context("Search Head Cluster", func() { + It("managercrcrud, smoke, c3: can deploy Search Head Cluster with Deployer resource spec configured", func() { + shcName := fmt.Sprintf("%s-shc", deployment.GetName()) + _, err := deployment.DeploySearchHeadCluster(ctx, shcName, "", "", "", "") + if err != nil { + Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster", "Shc", shcName) + } + + // Verify CPU limits on Search Heads and deployer before updating CR + searchHeadCount := 3 + for i := 0; i < searchHeadCount; i++ { + SearchHeadPodName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) + testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), SearchHeadPodName, defaultCPULimits) + } + + DeployerPodName := fmt.Sprintf(testenv.DeployerPod, deployment.GetName()) + testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), DeployerPodName, defaultCPULimits) + + shc := &enterpriseApi.SearchHeadCluster{} + err = deployment.GetInstance(ctx, shcName, shc) + Expect(err).To(Succeed(), "Unable to fetch Search Head Cluster deployment") + + // Assign new resources for deployer pod only + newCPULimits = "4" + newCPURequests := "2" + newMemoryLimits := "14Gi" + newMemoryRequests := "12Gi" + + depResSpec := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse(newCPURequests), + "memory": resource.MustParse(newMemoryRequests), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse(newCPULimits), + "memory": resource.MustParse(newMemoryLimits), + }, + } + shc.Spec.DeployerResourceSpec = depResSpec + + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster with updated CR") + + // Verify Search Head Cluster is updating + testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseUpdating) + + // Verify Search Head go to ready state + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify CPU limits on Search Heads - Should be same as before + searchHeadCount = 3 + for i := 0; i < searchHeadCount; i++ { + SearchHeadPodName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) + testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), SearchHeadPodName, defaultCPULimits) + } + + // Verify modified deployer spec + testenv.VerifyResourceConstraints(deployment, testcaseEnvInst.GetName(), DeployerPodName, depResSpec) + }) + }) + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { It("managercrcrud, integration, c3: can verify IDXC, CM and SHC PVCs are correctly deleted after the CRs deletion", func() { diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index d2fe8e802..507501941 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -25,6 +25,7 @@ import ( "time" gomega "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -41,6 +42,10 @@ type PodDetailsStruct struct { CPU string `json:"cpu"` Memory string `json:"memory"` } `json:"limits"` + Requests struct { + CPU string `json:"cpu"` + Memory string `json:"memory"` + } `json:"requests"` } `json:"resources"` } ServiceAccount string `json:"serviceAccount"` @@ -641,6 +646,48 @@ func VerifyCPULimits(deployment *Deployment, ns string, podName string, expected }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) } +// VerifyResourceConstraints verifies value of CPU limits is as expected +func VerifyResourceConstraints(deployment *Deployment, ns string, podName string, res corev1.ResourceRequirements) { + gomega.Eventually(func() bool { + output, err := exec.Command("kubectl", "get", "pods", "-n", ns, podName, "-o", "json").Output() + if err != nil { + cmd := fmt.Sprintf("kubectl get pods -n %s %s -o json", ns, podName) + logf.Log.Error(err, "Failed to execute command", "command", cmd) + return false + } + restResponse := PodDetailsStruct{} + err = json.Unmarshal([]byte(output), &restResponse) + if err != nil { + logf.Log.Error(err, "Failed to parse JSON") + return false + } + result := false + + for i := 0; i < len(restResponse.Spec.Containers); i++ { + if strings.Contains(restResponse.Spec.Containers[i].Resources.Limits.CPU, res.Limits.Cpu().String()) { + result = true + logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[0].Resources.Limits.CPU, "EXPECTED", res.Limits.Cpu().String()) + } + + if strings.Contains(restResponse.Spec.Containers[i].Resources.Limits.Memory, res.Limits.Memory().String()) { + result = true + logf.Log.Info("Verifying Memory limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Limits.Memory, "EXPECTED", res.Limits.Memory().String()) + } + + if strings.Contains(restResponse.Spec.Containers[i].Resources.Requests.CPU, res.Requests.Cpu().String()) { + result = true + logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Requests.CPU, "EXPECTED", res.Requests.Cpu().String()) + } + + if strings.Contains(restResponse.Spec.Containers[i].Resources.Requests.Memory, res.Requests.Memory().String()) { + result = true + logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Requests.Memory, "EXPECTED", res.Requests.Memory().String()) + } + } + return result + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) +} + // VerifyClusterManagerPhase verify phase of cluster manager func VerifyClusterManagerPhase(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, phase enterpriseApi.Phase) { cm := &enterpriseApi.ClusterManager{} From 9fc919f1fae29112bf902993b20aeff326a8d3ae Mon Sep 17 00:00:00 2001 From: Arjun Kondur Date: Wed, 18 Dec 2024 09:10:28 -0600 Subject: [PATCH 2/6] Integration testing enabled --- .github/workflows/int-test-workflow.yml | 1 + .../manager_custom_resource_crud_c3_test.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index cb0dd7d9f..64799f8e5 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -5,6 +5,7 @@ on: - develop - main - feature** + - CSPL_3256 jobs: build-operator-image: runs-on: ubuntu-latest diff --git a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go index facbb8c85..9eef5ef7c 100644 --- a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go @@ -159,7 +159,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { }) Context("Search Head Cluster", func() { - It("managercrcrud, smoke, c3: can deploy Search Head Cluster with Deployer resource spec configured", func() { + It("managercrcrud, integration, c3: can deploy Search Head Cluster with Deployer resource spec configured", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) _, err := deployment.DeploySearchHeadCluster(ctx, shcName, "", "", "", "") if err != nil { From 79d28de55c44420b06ed1b509a9db896966fce6b Mon Sep 17 00:00:00 2001 From: Arjun Kondur Date: Wed, 18 Dec 2024 14:27:45 -0600 Subject: [PATCH 3/6] Remove SHC updating phase check --- .github/workflows/int-test-workflow.yml | 18 +++++++++--------- .../manager_custom_resource_crud_c3_test.go | 3 --- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 64799f8e5..c7c12e151 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -56,16 +56,16 @@ jobs: matrix: test: [ - appframeworksS1, - managerappframeworkc3, - managerappframeworkm4, - managersecret, - managersmartstore, - managermc1, - managermc2, + #appframeworksS1, + #managerappframeworkc3, + #managerappframeworkm4, + #managersecret, + #managersmartstore, + #managermc1, + #managermc2, managercrcrud, - licensemanager, - managerdeletecr, + #licensemanager, + #managerdeletecr, ] runs-on: ubuntu-latest needs: build-operator-image diff --git a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go index 9eef5ef7c..10b590913 100644 --- a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go @@ -201,9 +201,6 @@ var _ = Describe("Crcrud test for SVA C3", func() { err = deployment.UpdateCR(ctx, shc) Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster with updated CR") - // Verify Search Head Cluster is updating - testenv.VerifySearchHeadClusterPhase(ctx, deployment, testcaseEnvInst, enterpriseApi.PhaseUpdating) - // Verify Search Head go to ready state testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) From 75ded11b7b14686cd02465d2cd23e049f7ba0128 Mon Sep 17 00:00:00 2001 From: Arjun Kondur Date: Wed, 18 Dec 2024 19:03:29 -0600 Subject: [PATCH 4/6] Fix int test bug --- .github/workflows/int-test-workflow.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 00f51ce2e..8f0b39a55 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -155,9 +155,6 @@ jobs: - name: Login to Amazon ECR id: login-ecr uses: aws-actions/amazon-ecr-login@v1 - - name: Pull Splunk Operator Image Locally and change name - run: | - docker tag ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA - name: Tag and Push Splunk Enterprise Image to ECR run: | docker tag ${{ env.SPLUNK_ENTERPRISE_IMAGE }} ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_ENTERPRISE_IMAGE }} From 502397de2f13f0b59f306c3252214013654ca2f4 Mon Sep 17 00:00:00 2001 From: Arjun Kondur Date: Thu, 19 Dec 2024 09:02:34 -0600 Subject: [PATCH 5/6] Add a comment, rename TC. --- .../manager_custom_resource_crud_c3_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go index 10b590913..826dc7dd9 100644 --- a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go @@ -159,7 +159,8 @@ var _ = Describe("Crcrud test for SVA C3", func() { }) Context("Search Head Cluster", func() { - It("managercrcrud, integration, c3: can deploy Search Head Cluster with Deployer resource spec configured", func() { + // CSPL-3256 - Adding the SHC only test case under c3 as IDXC is irrelevant for this test case + It("managercrcrud, integration, shc: can deploy Search Head Cluster with Deployer resource spec configured", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) _, err := deployment.DeploySearchHeadCluster(ctx, shcName, "", "", "", "") if err != nil { From 478244ad053623129cb46b0b9670cf95152b1532 Mon Sep 17 00:00:00 2001 From: Arjun Kondur Date: Thu, 19 Dec 2024 10:07:14 -0600 Subject: [PATCH 6/6] Add UT and return error if not deployer sts --- pkg/splunk/enterprise/searchheadcluster.go | 15 ++++++- .../enterprise/searchheadcluster_test.go | 42 +++++++++++++++++++ 2 files changed, 55 insertions(+), 2 deletions(-) diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index 8c31a3d07..f2cb9d8c3 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -17,6 +17,7 @@ package enterprise import ( "context" + "errors" "fmt" "reflect" "strings" @@ -653,7 +654,12 @@ func getSearchHeadStatefulSet(ctx context.Context, client splcommon.ControllerCl // CSPL-3652 Configure deployer resources if configured // Use default otherwise -func setDeployerResources(cr *enterpriseApi.SearchHeadCluster, podTemplate *corev1.PodTemplateSpec) { +// Make sure to set the resources ONLY for the deployer +func setDeployerResources(cr *enterpriseApi.SearchHeadCluster, podTemplate *corev1.PodTemplateSpec) error { + // Break out if this is not a deployer + if !strings.Contains("deployer", podTemplate.Labels["app.kubernetes.io/name"]) { + return errors.New("not a deployer, skipping setting resources") + } depRes := cr.Spec.DeployerResourceSpec for i := range podTemplate.Spec.Containers { if len(depRes.Requests) != 0 { @@ -664,6 +670,8 @@ func setDeployerResources(cr *enterpriseApi.SearchHeadCluster, podTemplate *core podTemplate.Spec.Containers[i].Resources.Limits = cr.Spec.DeployerResourceSpec.Limits } } + + return nil } // getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager. @@ -674,7 +682,10 @@ func getDeployerStatefulSet(ctx context.Context, client splcommon.ControllerClie } // CSPL-3562 - Set deployer resources if configured - setDeployerResources(cr, &ss.Spec.Template) + err = setDeployerResources(cr, &ss.Spec.Template) + if err != nil { + return ss, err + } // Setup App framework staging volume for apps setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) diff --git a/pkg/splunk/enterprise/searchheadcluster_test.go b/pkg/splunk/enterprise/searchheadcluster_test.go index 0ce729aa3..d63f04b58 100644 --- a/pkg/splunk/enterprise/searchheadcluster_test.go +++ b/pkg/splunk/enterprise/searchheadcluster_test.go @@ -22,6 +22,7 @@ import ( "net/http" "os" "path/filepath" + "reflect" "runtime/debug" "strings" "testing" @@ -31,6 +32,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -1892,3 +1894,43 @@ func TestSearchHeadClusterWithReadyState(t *testing.T) { t.Errorf("Unexpected error while running reconciliation for search head cluster with app framework. Error=%v", err) } } + +func TestSetDeployerResources(t *testing.T) { + ctx := context.TODO() + client := spltest.NewMockClient() + depResSpec := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("14Gi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("7Gi"), + }, + } + + shc := enterpriseApi.SearchHeadCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.SearchHeadClusterSpec{ + DeployerResourceSpec: depResSpec, + }, + } + + // Get deployer STS and set resources + depSts, err := getSplunkStatefulSet(ctx, client, &shc, &shc.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(&shc, shc.Spec.Replicas)) + if err != nil { + t.Errorf("Failed to get deployer statefulset due to error=%s", err) + } + setDeployerResources(&shc, &depSts.Spec.Template) + if !reflect.DeepEqual(depResSpec.Limits, depSts.Spec.Template.Spec.Containers[0].Resources.Limits) { + t.Errorf("Failed to set deployer resources properly, limits are off") + } + + // Verify deployer resources are set properly + if !reflect.DeepEqual(depResSpec.Requests, depSts.Spec.Template.Spec.Containers[0].Resources.Requests) { + t.Errorf("Failed to set deployer resources properly, requests are off") + } +}