Skip to content

Commit

Permalink
Add test for recomputing similar nodegroups
Browse files Browse the repository at this point in the history
  • Loading branch information
rrangith committed Dec 16, 2024
1 parent de5d64f commit 66f7b3e
Show file tree
Hide file tree
Showing 2 changed files with 110 additions and 74 deletions.
152 changes: 88 additions & 64 deletions cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
}
orchestrator := New()
orchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
expander := NewMockRepotingStrategy(t, config.ExpansionOptionToChoose)
expander := NewMockReportingStrategy(t, config.ExpansionOptionToChoose, nil)
context.ExpanderStrategy = expander

// scale up
Expand Down Expand Up @@ -1217,7 +1217,7 @@ func TestBinpackingLimiter(t *testing.T) {
suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})

expander := NewMockRepotingStrategy(t, nil)
expander := NewMockReportingStrategy(t, nil, nil)
context.ExpanderStrategy = expander

scaleUpStatus, err := suOrchestrator.ScaleUp([]*apiv1.Pod{extraPod}, nodes, []*appsv1.DaemonSet{}, nodeInfos, false)
Expand Down Expand Up @@ -1433,80 +1433,104 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
}

func TestScaleUpBalanceGroups(t *testing.T) {
provider := testprovider.NewTestCloudProvider(func(string, int) error {
return nil
}, nil)
// this will set the similar node groups to empty, it should be recomputed during scaleup
emptySimilarNodeGroups := []string{}
noSimilarNodeGroupsExpander := NewMockReportingStrategy(t, &GroupSizeChange{GroupName: "ng2", SizeChange: 2}, &emptySimilarNodeGroups)

type ngInfo struct {
min, max, size int
}
testCfg := map[string]ngInfo{
"ng1": {min: 1, max: 1, size: 1},
"ng2": {min: 1, max: 2, size: 1},
"ng3": {min: 1, max: 5, size: 1},
"ng4": {min: 1, max: 5, size: 3},
testCases := []struct {
name string
expanderStrategy *MockReportingStrategy
}{
{
name: "balance groups",
},
{
name: "balance groups recomputes similar nodegroups",
expanderStrategy: noSimilarNodeGroupsExpander,
},
}
podList := make([]*apiv1.Pod, 0, len(testCfg))
nodes := make([]*apiv1.Node, 0)

now := time.Now()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
provider := testprovider.NewTestCloudProvider(func(string, int) error {
return nil
}, nil)

for gid, gconf := range testCfg {
provider.AddNodeGroup(gid, gconf.min, gconf.max, gconf.size)
for i := 0; i < gconf.size; i++ {
nodeName := fmt.Sprintf("%v-node-%v", gid, i)
node := BuildTestNode(nodeName, 100, 1000)
SetNodeReadyState(node, true, now.Add(-2*time.Minute))
nodes = append(nodes, node)
type ngInfo struct {
min, max, size int
}
testCfg := map[string]ngInfo{
"ng1": {min: 1, max: 1, size: 1},
"ng2": {min: 1, max: 2, size: 1},
"ng3": {min: 1, max: 5, size: 1},
"ng4": {min: 1, max: 5, size: 3},
}
podList := make([]*apiv1.Pod, 0, len(testCfg))
nodes := make([]*apiv1.Node, 0)

pod := BuildTestPod(fmt.Sprintf("%v-pod-%v", gid, i), 80, 0)
pod.Spec.NodeName = nodeName
podList = append(podList, pod)
now := time.Now()

provider.AddNode(gid, node)
}
}
for gid, gconf := range testCfg {
provider.AddNodeGroup(gid, gconf.min, gconf.max, gconf.size)
for i := 0; i < gconf.size; i++ {
nodeName := fmt.Sprintf("%v-node-%v", gid, i)
node := BuildTestNode(nodeName, 100, 1000)
SetNodeReadyState(node, true, now.Add(-2*time.Minute))
nodes = append(nodes, node)

podLister := kube_util.NewTestPodLister(podList)
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)
pod := BuildTestPod(fmt.Sprintf("%v-pod-%v", gid, i), 80, 0)
pod.Spec.NodeName = nodeName
podList = append(podList, pod)

options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
BalanceSimilarNodeGroups: true,
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.SetClusterState(nodes, podList, drasnapshot.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
provider.AddNode(gid, node)
}
}

pods := make([]*apiv1.Pod, 0)
for i := 0; i < 2; i++ {
pods = append(pods, BuildTestPod(fmt.Sprintf("test-pod-%v", i), 80, 0))
}
podLister := kube_util.NewTestPodLister(podList)
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil)

processors := processorstest.NewTestProcessors(&context)
suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, typedErr := suOrchestrator.ScaleUp(pods, nodes, []*appsv1.DaemonSet{}, nodeInfos, false)
options := config.AutoscalingOptions{
EstimatorName: estimator.BinpackingEstimatorName,
BalanceSimilarNodeGroups: true,
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.SetClusterState(nodes, podList, drasnapshot.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())

assert.NoError(t, typedErr)
assert.True(t, scaleUpStatus.WasSuccessful())
groupMap := make(map[string]cloudprovider.NodeGroup, 3)
for _, group := range provider.NodeGroups() {
groupMap[group.Id()] = group
}
pods := make([]*apiv1.Pod, 0)
for i := 0; i < 2; i++ {
pods = append(pods, BuildTestPod(fmt.Sprintf("test-pod-%v", i), 80, 0))
}

ng2size, err := groupMap["ng2"].TargetSize()
assert.NoError(t, err)
ng3size, err := groupMap["ng3"].TargetSize()
assert.NoError(t, err)
assert.Equal(t, 2, ng2size)
assert.Equal(t, 2, ng3size)
if tc.expanderStrategy != nil {
context.ExpanderStrategy = tc.expanderStrategy
}
processors := processorstest.NewTestProcessors(&context)
suOrchestrator := New()
suOrchestrator.Initialize(&context, processors, clusterState, newEstimatorBuilder(), taints.TaintConfig{})
scaleUpStatus, typedErr := suOrchestrator.ScaleUp(pods, nodes, []*appsv1.DaemonSet{}, nodeInfos, false)

assert.NoError(t, typedErr)
assert.True(t, scaleUpStatus.WasSuccessful())
groupMap := make(map[string]cloudprovider.NodeGroup, 3)
for _, group := range provider.NodeGroups() {
groupMap[group.Id()] = group
}

ng2size, err := groupMap["ng2"].TargetSize()
assert.NoError(t, err)
ng3size, err := groupMap["ng3"].TargetSize()
assert.NoError(t, err)
assert.Equal(t, 2, ng2size)
assert.Equal(t, 2, ng3size)
})
}
}

func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
Expand Down
32 changes: 22 additions & 10 deletions cluster-autoscaler/core/test/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package test
import (
"fmt"
"reflect"
"slices"
"testing"
"time"

Expand Down Expand Up @@ -341,19 +342,21 @@ type expanderResults struct {

// MockReportingStrategy implements expander.Strategy
type MockReportingStrategy struct {
defaultStrategy expander.Strategy
optionToChoose *GroupSizeChange
t *testing.T
results *expanderResults
defaultStrategy expander.Strategy
optionToChoose *GroupSizeChange
similarNodeGroupsToChoose *[]string
t *testing.T
results *expanderResults
}

// NewMockRepotingStrategy creates an expander strategy with reporting and mocking capabilities.
func NewMockRepotingStrategy(t *testing.T, optionToChoose *GroupSizeChange) *MockReportingStrategy {
func NewMockReportingStrategy(t *testing.T, optionToChoose *GroupSizeChange, similarNodeGroupsToChoose *[]string) *MockReportingStrategy {
return &MockReportingStrategy{
defaultStrategy: random.NewStrategy(),
results: &expanderResults{},
optionToChoose: optionToChoose,
t: t,
defaultStrategy: random.NewStrategy(),
results: &expanderResults{},
optionToChoose: optionToChoose,
similarNodeGroupsToChoose: similarNodeGroupsToChoose,
t: t,
}
}

Expand All @@ -373,7 +376,16 @@ func (r *MockReportingStrategy) BestOption(options []expander.Option, nodeInfo m
for _, option := range options {
groupSizeChange := expanderOptionToGroupSizeChange(option)
if groupSizeChange == *r.optionToChoose {
return &option
bestOption := option
if r.similarNodeGroupsToChoose != nil {
bestOption.SimilarNodeGroups = []cloudprovider.NodeGroup{}
for _, nodeGroup := range options {
if slices.Contains(*r.similarNodeGroupsToChoose, nodeGroup.NodeGroup.Id()) {
bestOption.SimilarNodeGroups = append(bestOption.SimilarNodeGroups, nodeGroup.NodeGroup)
}
}
}
return &bestOption
}
}
assert.Fail(r.t, "did not find expansionOptionToChoose %+v", r.optionToChoose)
Expand Down

0 comments on commit 66f7b3e

Please sign in to comment.