Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

CSPL-3256 - Support to configure deployer resource spec in SHC CRD #1419

Open
wants to merge 8 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions .github/workflows/int-test-workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ on:
- develop
- main
- feature**
- CSPL_3256
akondur marked this conversation as resolved.
Show resolved Hide resolved
jobs:
build-operator-image:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -51,16 +52,16 @@ jobs:
matrix:
test:
[
appframeworksS1,
managerappframeworkc3,
managerappframeworkm4,
managersecret,
managersmartstore,
managermc1,
managermc2,
#appframeworksS1,
#managerappframeworkc3,
#managerappframeworkm4,
#managersecret,
#managersmartstore,
#managermc1,
#managermc2,
managercrcrud,
licensemanager,
managerdeletecr,
#licensemanager,
#managerdeletecr,
]
runs-on: ubuntu-latest
needs: build-operator-image
Expand Down
3 changes: 3 additions & 0 deletions api/v4/searchheadcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ type SearchHeadClusterSpec struct {

// Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management
AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"`

// Splunk Deployer resource spec
DeployerResourceSpec corev1.ResourceRequirements `json:"deployerResourceSpec,omitempty"`
}

// SearchHeadClusterMemberStatus is used to track the status of each search head cluster member
Expand Down
1 change: 1 addition & 0 deletions api/v4/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

53 changes: 53 additions & 0 deletions config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5295,6 +5295,59 @@ spec:
will be installed on the CM, standalone, search head deployer
or license manager instance.
type: string
deployerResourceSpec:
description: Splunk Deployer resource spec
properties:
claims:
description: |-
Claims lists the names of resources, defined in spec.resourceClaims,
that are used by this container.

This is an alpha field and requires enabling the
DynamicResourceAllocation feature gate.

This field is immutable.
items:
description: ResourceClaim references one entry in PodSpec.ResourceClaims.
properties:
name:
description: |-
Name must match the name of one entry in pod.spec.resourceClaims of
the Pod where this field is used. It makes that resource available
inside a container.
type: string
required:
- name
type: object
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
limits:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
Limits describes the maximum amount of compute resources allowed.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
requests:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: |-
Requests describes the minimum amount of compute resources required.
If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
otherwise to an implementation-defined value.
More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
type: object
type: object
etcVolumeStorageConfig:
description: Storage configuration for /opt/splunk/etc volume
properties:
Expand Down
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ require (
github.com/google/uuid v1.6.0
github.com/joho/godotenv v1.5.1
github.com/minio/minio-go/v7 v7.0.16
github.com/onsi/ginkgo/v2 v2.21.0
github.com/onsi/gomega v1.35.1
github.com/onsi/ginkgo/v2 v2.22.0
github.com/onsi/gomega v1.36.1
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.14.0
github.com/stretchr/testify v1.9.0
Expand Down
4 changes: 4 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -291,8 +291,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
Expand Down
29 changes: 29 additions & 0 deletions pkg/splunk/enterprise/searchheadcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ package enterprise

import (
"context"
"errors"
"fmt"
"reflect"
"strings"
Expand Down Expand Up @@ -651,13 +652,41 @@ func getSearchHeadStatefulSet(ctx context.Context, client splcommon.ControllerCl
return ss, nil
}

// CSPL-3652 Configure deployer resources if configured
// Use default otherwise
// Make sure to set the resources ONLY for the deployer
func setDeployerResources(cr *enterpriseApi.SearchHeadCluster, podTemplate *corev1.PodTemplateSpec) error {
// Break out if this is not a deployer
if !strings.Contains("deployer", podTemplate.Labels["app.kubernetes.io/name"]) {
return errors.New("not a deployer, skipping setting resources")
}
depRes := cr.Spec.DeployerResourceSpec
for i := range podTemplate.Spec.Containers {
if len(depRes.Requests) != 0 {
podTemplate.Spec.Containers[i].Resources.Requests = cr.Spec.DeployerResourceSpec.Requests
}

if len(depRes.Limits) != 0 {
podTemplate.Spec.Containers[i].Resources.Limits = cr.Spec.DeployerResourceSpec.Limits
}
}

return nil
}

// getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager.
func getDeployerStatefulSet(ctx context.Context, client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (*appsv1.StatefulSet, error) {
ss, err := getSplunkStatefulSet(ctx, client, cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas))
if err != nil {
return ss, err
}

// CSPL-3562 - Set deployer resources if configured
err = setDeployerResources(cr, &ss.Spec.Template)
if err != nil {
return ss, err
}

// Setup App framework staging volume for apps
setupAppsStagingVolume(ctx, client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig)

Expand Down
42 changes: 42 additions & 0 deletions pkg/splunk/enterprise/searchheadcluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"net/http"
"os"
"path/filepath"
"reflect"
"runtime/debug"
"strings"
"testing"
Expand All @@ -31,6 +32,7 @@ import (

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
Expand Down Expand Up @@ -1892,3 +1894,43 @@ func TestSearchHeadClusterWithReadyState(t *testing.T) {
t.Errorf("Unexpected error while running reconciliation for search head cluster with app framework. Error=%v", err)
}
}

func TestSetDeployerResources(t *testing.T) {
ctx := context.TODO()
client := spltest.NewMockClient()
depResSpec := corev1.ResourceRequirements{
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4"),
corev1.ResourceMemory: resource.MustParse("14Gi"),
},
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("2"),
corev1.ResourceMemory: resource.MustParse("7Gi"),
},
}

shc := enterpriseApi.SearchHeadCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "stack1",
Namespace: "test",
},
Spec: enterpriseApi.SearchHeadClusterSpec{
DeployerResourceSpec: depResSpec,
},
}

// Get deployer STS and set resources
depSts, err := getSplunkStatefulSet(ctx, client, &shc, &shc.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(&shc, shc.Spec.Replicas))
if err != nil {
t.Errorf("Failed to get deployer statefulset due to error=%s", err)
}
setDeployerResources(&shc, &depSts.Spec.Template)
if !reflect.DeepEqual(depResSpec.Limits, depSts.Spec.Template.Spec.Containers[0].Resources.Limits) {
t.Errorf("Failed to set deployer resources properly, limits are off")
}

// Verify deployer resources are set properly
if !reflect.DeepEqual(depResSpec.Requests, depSts.Spec.Template.Spec.Containers[0].Resources.Requests) {
t.Errorf("Failed to set deployer resources properly, requests are off")
}
}
59 changes: 59 additions & 0 deletions test/custom_resource_crud/manager_custom_resource_crud_c3_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,65 @@ var _ = Describe("Crcrud test for SVA C3", func() {
})
})

Context("Search Head Cluster", func() {
// CSPL-3256 - Adding the SHC only test case under c3 as IDXC is irrelevant for this test case
It("managercrcrud, integration, shc: can deploy Search Head Cluster with Deployer resource spec configured", func() {
shcName := fmt.Sprintf("%s-shc", deployment.GetName())
_, err := deployment.DeploySearchHeadCluster(ctx, shcName, "", "", "", "")
if err != nil {
Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster", "Shc", shcName)
}

// Verify CPU limits on Search Heads and deployer before updating CR
searchHeadCount := 3
for i := 0; i < searchHeadCount; i++ {
SearchHeadPodName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i)
testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), SearchHeadPodName, defaultCPULimits)
}

DeployerPodName := fmt.Sprintf(testenv.DeployerPod, deployment.GetName())
testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), DeployerPodName, defaultCPULimits)

shc := &enterpriseApi.SearchHeadCluster{}
err = deployment.GetInstance(ctx, shcName, shc)
Expect(err).To(Succeed(), "Unable to fetch Search Head Cluster deployment")

// Assign new resources for deployer pod only
newCPULimits = "4"
newCPURequests := "2"
newMemoryLimits := "14Gi"
newMemoryRequests := "12Gi"

depResSpec := corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"cpu": resource.MustParse(newCPURequests),
"memory": resource.MustParse(newMemoryRequests),
},
Limits: corev1.ResourceList{
"cpu": resource.MustParse(newCPULimits),
"memory": resource.MustParse(newMemoryLimits),
},
}
shc.Spec.DeployerResourceSpec = depResSpec

err = deployment.UpdateCR(ctx, shc)
Expect(err).To(Succeed(), "Unable to deploy Search Head Cluster with updated CR")

// Verify Search Head go to ready state
testenv.SearchHeadClusterReady(ctx, deployment, testcaseEnvInst)

// Verify CPU limits on Search Heads - Should be same as before
searchHeadCount = 3
for i := 0; i < searchHeadCount; i++ {
SearchHeadPodName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i)
testenv.VerifyCPULimits(deployment, testcaseEnvInst.GetName(), SearchHeadPodName, defaultCPULimits)
}

// Verify modified deployer spec
testenv.VerifyResourceConstraints(deployment, testcaseEnvInst.GetName(), DeployerPodName, depResSpec)
})
})

Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() {
It("managercrcrud, integration, c3: can verify IDXC, CM and SHC PVCs are correctly deleted after the CRs deletion", func() {

Expand Down
47 changes: 47 additions & 0 deletions test/testenv/verificationutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"time"

gomega "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"

enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3"
enterpriseApi "github.com/splunk/splunk-operator/api/v4"
Expand All @@ -41,6 +42,10 @@ type PodDetailsStruct struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
} `json:"limits"`
Requests struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
} `json:"requests"`
} `json:"resources"`
}
ServiceAccount string `json:"serviceAccount"`
Expand Down Expand Up @@ -641,6 +646,48 @@ func VerifyCPULimits(deployment *Deployment, ns string, podName string, expected
}, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true))
}

// VerifyResourceConstraints verifies value of CPU limits is as expected
func VerifyResourceConstraints(deployment *Deployment, ns string, podName string, res corev1.ResourceRequirements) {
gomega.Eventually(func() bool {
output, err := exec.Command("kubectl", "get", "pods", "-n", ns, podName, "-o", "json").Output()
if err != nil {
cmd := fmt.Sprintf("kubectl get pods -n %s %s -o json", ns, podName)
logf.Log.Error(err, "Failed to execute command", "command", cmd)
return false
}
restResponse := PodDetailsStruct{}
err = json.Unmarshal([]byte(output), &restResponse)
if err != nil {
logf.Log.Error(err, "Failed to parse JSON")
return false
}
result := false

for i := 0; i < len(restResponse.Spec.Containers); i++ {
if strings.Contains(restResponse.Spec.Containers[i].Resources.Limits.CPU, res.Limits.Cpu().String()) {
result = true
logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[0].Resources.Limits.CPU, "EXPECTED", res.Limits.Cpu().String())
}

if strings.Contains(restResponse.Spec.Containers[i].Resources.Limits.Memory, res.Limits.Memory().String()) {
result = true
logf.Log.Info("Verifying Memory limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Limits.Memory, "EXPECTED", res.Limits.Memory().String())
}

if strings.Contains(restResponse.Spec.Containers[i].Resources.Requests.CPU, res.Requests.Cpu().String()) {
result = true
logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Requests.CPU, "EXPECTED", res.Requests.Cpu().String())
}

if strings.Contains(restResponse.Spec.Containers[i].Resources.Requests.Memory, res.Requests.Memory().String()) {
result = true
logf.Log.Info("Verifying CPU limits: ", "POD", podName, "FOUND", restResponse.Spec.Containers[i].Resources.Requests.Memory, "EXPECTED", res.Requests.Memory().String())
}
}
return result
}, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true))
}

// VerifyClusterManagerPhase verify phase of cluster manager
func VerifyClusterManagerPhase(ctx context.Context, deployment *Deployment, testenvInstance *TestCaseEnv, phase enterpriseApi.Phase) {
cm := &enterpriseApi.ClusterManager{}
Expand Down
Loading