diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index e1079b464..8f0b39a55 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -5,6 +5,7 @@ on: - develop - main - feature** + - CSPL_3256 jobs: build-operator-image: runs-on: ubuntu-latest @@ -51,16 +52,16 @@ jobs: matrix: test: [ - appframeworksS1, - managerappframeworkc3, - managerappframeworkm4, - managersecret, - managersmartstore, - managermc1, - managermc2, + #appframeworksS1, + #managerappframeworkc3, + #managerappframeworkm4, + #managersecret, + #managersmartstore, + #managermc1, + #managermc2, managercrcrud, - licensemanager, - managerdeletecr, + #licensemanager, + #managerdeletecr, ] runs-on: ubuntu-latest needs: build-operator-image diff --git a/api/v4/searchheadcluster_types.go b/api/v4/searchheadcluster_types.go index 83b84fa0b..312674fcf 100644 --- a/api/v4/searchheadcluster_types.go +++ b/api/v4/searchheadcluster_types.go @@ -44,6 +44,9 @@ type SearchHeadClusterSpec struct { // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Splunk Deployer resource spec + DeployerResourceSpec corev1.ResourceRequirements `json:"deployerResourceSpec,omitempty"` } // SearchHeadClusterMemberStatus is used to track the status of each search head cluster member diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 4c10f8035..cc2c5c3b6 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -872,6 +872,7 @@ func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + in.DeployerResourceSpec.DeepCopyInto(&out.DeployerResourceSpec) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index d4eeec141..c37ff5f0d 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -5295,6 +5295,59 @@ spec: will be installed on the CM, standalone, search head deployer or license manager instance. type: string + deployerResourceSpec: + description: Splunk Deployer resource spec + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object etcVolumeStorageConfig: description: Storage configuration for /opt/splunk/etc volume properties: diff --git a/go.mod b/go.mod index 966cf2f57..8d978b82a 100644 --- a/go.mod +++ b/go.mod @@ -13,8 +13,8 @@ require ( github.com/google/uuid v1.6.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 - github.com/onsi/ginkgo/v2 v2.21.0 - github.com/onsi/gomega v1.35.1 + github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega v1.36.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index edf4e3250..51e5583b2 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index 1c514e829..f2cb9d8c3 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -17,6 +17,7 @@ package enterprise import ( "context" + "errors" "fmt" "reflect" "strings" @@ -651,6 +652,28 @@ func getSearchHeadStatefulSet(ctx context.Context, client splcommon.ControllerCl return ss, nil } +// CSPL-3652 Configure deployer resources if configured +// Use default otherwise +// Make sure to set the resources ONLY for the deployer +func setDeployerResources(cr *enterpriseApi.SearchHeadCluster, podTemplate *corev1.PodTemplateSpec) error { + // Break out if this is not a deployer + if !strings.Contains("deployer", podTemplate.Labels["app.kubernetes.io/name"]) { + return errors.New("not a deployer, skipping setting resources") + } + depRes := cr.Spec.DeployerResourceSpec + for i := range podTemplate.Spec.Containers { + if len(depRes.Requests) != 0 { + podTemplate.Spec.Containers[i].Resources.Requests = cr.Spec.DeployerResourceSpec.Requests + } + + if len(depRes.Limits) != 0 { + podTemplate.Spec.Containers[i].Resources.Limits = cr.Spec.DeployerResourceSpec.Limits + } + } + + return nil +} + // getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager. func getDeployerStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (*appsv1.StatefulSet, error) { ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas)) @@ -658,6 +681,12 @@ func getDeployerStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, err } + // CSPL-3562 - Set deployer resources if configured + err = setDeployerResources(cr, &ss.Spec.Template) + if err != nil { + return ss, err + } + // Setup App framework staging volume for apps setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) diff --git a/pkg/splunk/enterprise/searchheadcluster_test.go b/pkg/splunk/enterprise/searchheadcluster_test.go index 0ce729aa3..d63f04b58 100644 --- a/pkg/splunk/enterprise/searchheadcluster_test.go +++ b/pkg/splunk/enterprise/searchheadcluster_test.go @@ -22,6 +22,7 @@ import ( "net/http" "os" "path/filepath" + "reflect" "runtime/debug" "strings" "testing" @@ -31,6 +32,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -1892,3 +1894,43 @@ func TestSearchHeadClusterWithReadyState(t *testing.T) { t.Errorf("Unexpected error while running reconciliation for search head cluster with app framework. Error=%v", err) } } + +func TestSetDeployerResources(t *testing.T) { + ctx := context.TODO() + client := spltest.NewMockClient() + depResSpec := corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("14Gi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("7Gi"), + }, + } + + shc := enterpriseApi.SearchHeadCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.SearchHeadClusterSpec{ + DeployerResourceSpec: depResSpec, + }, + } + + // Get deployer STS and set resources + depSts, err := getSplunkStatefulSet(ctx, client, &shc, &shc.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(&shc, shc.Spec.Replicas)) + if err != nil { + t.Errorf("Failed to get deployer statefulset due to error=%s", err) + } + setDeployerResources(&shc, &depSts.Spec.Template) + if !reflect.DeepEqual(depResSpec.Limits, depSts.Spec.Template.Spec.Containers[0].Resources.Limits) { + t.Errorf("Failed to set deployer resources properly, limits are off") + } + + // Verify deployer resources are set properly + if !reflect.DeepEqual(depResSpec.Requests, depSts.Spec.Template.Spec.Containers[0].Resources.Requests) { + t.Errorf("Failed to set deployer resources properly, requests are off") + } +} diff --git a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go index a86a58322..826dc7dd9 100644 --- a/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/manager_custom_resource_crud_c3_test.go @@ -158,6 +158,65 @@ var _ = Describe("Crcrud test for SVA C3", func() { }) }) + Context("Search Head Cluster", func() { + // CSPL-3256 - Adding the SHC only test case under c3 as IDXC is irrelevant for this test case + It("managercrcrud, integration, shc: can deploy Search Head Cluster with Deployer resource spec configured", func() { + shcName := fmt.Sprintf("%s-shc", deployment.GetName()) + _, err := deployment.DeploySearchHeadCluster(ctx, shcName, "", "", "", "") + if err != nil { + Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster", "Shc", shcName) + } + + // Verify CPU limits on Search Heads and deployer before updating CR + searchHeadCount := 3 + for i := 0; i < searchHeadCount; i++ { + SearchHeadPodName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) + testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), SearchHeadPodName, defaultCPULimits) + } + + DeployerPodName := fmt.Sprintf(testenv.DeployerPod, deployment.GetName()) + testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), DeployerPodName, defaultCPULimits) + + shc := &enterpriseApi.SearchHeadCluster{} + err = deployment.GetInstance(ctx, shcName, shc) + Expect(err).To(Succeed(), "Unable to fetch Search Head Cluster deployment") + + // Assign new resources for deployer pod only + newCPULimits = "4" + newCPURequests := "2" + newMemoryLimits := "14Gi" + newMemoryRequests := "12Gi" + + depResSpec := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "cpu": resource.MustParse(newCPURequests), + "memory": resource.MustParse(newMemoryRequests), + }, + Limits: corev1.ResourceList{ + "cpu": resource.MustParse(newCPULimits), + "memory": resource.MustParse(newMemoryLimits), + }, + } + shc.Spec.DeployerResourceSpec = depResSpec + + err = deployment.UpdateCR(ctx, shc) + Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster with updated CR") + + // Verify Search Head go to ready state + testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst) + + // Verify CPU limits on Search Heads - Should be same as before + searchHeadCount = 3 + for i := 0; i < searchHeadCount; i++ { + SearchHeadPodName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) + testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), SearchHeadPodName, defaultCPULimits) + } + + // Verify modified deployer spec + testenv.VerifyResourceConstraints(deployment, testcaseEnvInst.GetName(), DeployerPodName, depResSpec) + }) + }) + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { It("managercrcrud, integration, c3: can verify IDXC, CM and SHC PVCs are correctly deleted after the CRs deletion", func() { diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index ee1b375e6..7a039833a 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -25,6 +25,7 @@ import ( "time" gomega "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" @@ -41,6 +42,10 @@ type PodDetailsStruct struct { CPU string `json:"cpu"` Memory string `json:"memory"` } `json:"limits"` + Requests struct { + CPU string `json:"cpu"` + Memory string `json:"memory"` + } `json:"requests"` } `json:"resources"` } ServiceAccount string `json:"serviceAccount"` @@ -641,6 +646,48 @@ func VerifyCPULimits(deployment *Deployment, ns string, podName string, expected }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) } +// VerifyResourceConstraints verifies value of CPU limits is as expected +func VerifyResourceConstraints(deployment *Deployment, ns string, podName string, res corev1.ResourceRequirements) { + gomega.Eventually(func() bool { + output, err := exec.Command("kubectl", "get", "pods", "-n", ns, podName, "-o", "json").Output() + if err != nil { + cmd := fmt.Sprintf("kubectl get pods -n %s %s -o json", ns, podName) + logf.Log.Error(err, "Failed to execute command", "command", cmd) + return false + } + restResponse := PodDetailsStruct{} + err = json.Unmarshal([]byte(output), &restResponse) + if err != nil { + logf.Log.Error(err, "Failed to parse JSON") + return false + } + result := false + + for i := 0; i < len(restResponse.Spec.Containers); i++ { + if strings.Contains(restResponse.Spec.Containers[i].Resources.Limits.CPU, res.Limits.Cpu().String()) { + result = true + logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[0].Resources.Limits.CPU, "EXPECTED", res.Limits.Cpu().String()) + } + + if strings.Contains(restResponse.Spec.Containers[i].Resources.Limits.Memory, res.Limits.Memory().String()) { + result = true + logf.Log.Info("Verifying Memory limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Limits.Memory, "EXPECTED", res.Limits.Memory().String()) + } + + if strings.Contains(restResponse.Spec.Containers[i].Resources.Requests.CPU, res.Requests.Cpu().String()) { + result = true + logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Requests.CPU, "EXPECTED", res.Requests.Cpu().String()) + } + + if strings.Contains(restResponse.Spec.Containers[i].Resources.Requests.Memory, res.Requests.Memory().String()) { + result = true + logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Requests.Memory, "EXPECTED", res.Requests.Memory().String()) + } + } + return result + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) +} + // VerifyClusterManagerPhase verify phase of cluster manager func VerifyClusterManagerPhase(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, phase enterpriseApi.Phase) { cm := &enterpriseApi.ClusterManager{}