Skip to content

Commit

Permalink
Merge pull request #6169 from hbostan/master
Browse files Browse the repository at this point in the history
Add HasNodeGroupStartedScaleUp to cluster state registry.
  • Loading branch information
k8s-ci-robot authored Oct 13, 2023
2 parents 5c65c39 + 833e4cb commit 3065285
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 0 deletions.
11 changes: 11 additions & 0 deletions cluster-autoscaler/clusterstate/clusterstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -501,6 +501,17 @@ func (csr *ClusterStateRegistry) IsNodeGroupScalingUp(nodeGroupName string) bool
return found
}

// HasNodeGroupStartedScaleUp returns true if the node group has started scale up regardless
// of whether there are any upcoming nodes. This is useful in the case when the node group's
// size reverts back to its previous size before the next UpdatesCall and we want to know
// if a scale up for node group has started.
func (csr *ClusterStateRegistry) HasNodeGroupStartedScaleUp(nodeGroupName string) bool {
csr.Lock()
defer csr.Unlock()
_, found := csr.scaleUpRequests[nodeGroupName]
return found
}

// AcceptableRange contains information about acceptable size of a node group.
type AcceptableRange struct {
// MinNodes is the minimum number of nodes in the group.
Expand Down
43 changes: 43 additions & 0 deletions cluster-autoscaler/clusterstate/clusterstate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ func TestEmptyOK(t *testing.T) {
assert.Empty(t, clusterstate.GetScaleUpFailures())
assert.True(t, clusterstate.IsNodeGroupHealthy("ng1"))
assert.False(t, clusterstate.IsNodeGroupScalingUp("ng1"))
assert.False(t, clusterstate.HasNodeGroupStartedScaleUp("ng1"))

provider.AddNodeGroup("ng1", 0, 10, 3)
clusterstate.RegisterOrUpdateScaleUp(provider.GetNodeGroup("ng1"), 3, now.Add(-3*time.Second))
Expand All @@ -133,6 +134,48 @@ func TestEmptyOK(t *testing.T) {
assert.True(t, clusterstate.IsClusterHealthy())
assert.True(t, clusterstate.IsNodeGroupHealthy("ng1"))
assert.True(t, clusterstate.IsNodeGroupScalingUp("ng1"))
assert.True(t, clusterstate.HasNodeGroupStartedScaleUp("ng1"))
}

func TestHasNodeGroupStartedScaleUp(t *testing.T) {
tests := map[string]struct {
initialSize int
delta int
}{
"Target size reverts back to zero": {
initialSize: 0,
delta: 3,
},
}
for tn, tc := range tests {
t.Run(tn, func(t *testing.T) {
now := time.Now()
provider := testprovider.NewTestCloudProvider(nil, nil)
provider.AddNodeGroup("ng1", 0, 5, tc.initialSize)
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: time.Minute}))
err := clusterstate.UpdateNodes([]*apiv1.Node{}, nil, now.Add(-5*time.Second))
assert.NoError(t, err)
assert.False(t, clusterstate.IsNodeGroupScalingUp("ng1"))
assert.False(t, clusterstate.HasNodeGroupStartedScaleUp("ng1"))

provider.AddNodeGroup("ng1", 0, 5, tc.initialSize+tc.delta)
clusterstate.RegisterOrUpdateScaleUp(provider.GetNodeGroup("ng1"), tc.delta, now.Add(-3*time.Second))
err = clusterstate.UpdateNodes([]*apiv1.Node{}, nil, now)
assert.NoError(t, err)
assert.True(t, clusterstate.IsNodeGroupScalingUp("ng1"))
assert.True(t, clusterstate.HasNodeGroupStartedScaleUp("ng1"))

provider.AddNodeGroup("ng1", 0, 5, tc.initialSize)
clusterstate.Recalculate()
assert.False(t, clusterstate.IsNodeGroupScalingUp("ng1"))
assert.True(t, clusterstate.HasNodeGroupStartedScaleUp("ng1"))
})
}
}

func TestOKOneUnreadyNode(t *testing.T) {
Expand Down

0 comments on commit 3065285

Please sign in to comment.