Skip to content

Commit

Permalink
refactor: remove clusterStateRegistry from delete_in_batch tests
Browse files Browse the repository at this point in the history
- not needed anymore since we have `scaleStateNotifier`
Signed-off-by: vadasambar <[email protected]>
  • Loading branch information
vadasambar committed Dec 20, 2023
1 parent 8cfc12a commit 0b77571
Showing 1 changed file with 0 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,13 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
clusterstate_utils "k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/deletiontracker"
. "k8s.io/autoscaler/cluster-autoscaler/core/test"
"k8s.io/autoscaler/cluster-autoscaler/observers/nodegroupchange"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
kube_record "k8s.io/client-go/tools/record"
)

func TestAddNodeToBucket(t *testing.T) {
Expand Down Expand Up @@ -140,7 +136,6 @@ func TestRemove(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
test := test
fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := clusterstate_utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")

failedNodeDeletion := make(map[string]bool)
deletedNodes := make(chan string, 10)
Expand All @@ -164,13 +159,11 @@ func TestRemove(t *testing.T) {
})

ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, nil, provider, nil, nil)
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}))
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}

scaleStateNotifier := nodegroupchange.NewNodeGroupChangeObserversList()
scaleStateNotifier.RegisterForNotifications(clusterStateRegistry)

ng := "ng"
provider.AddNodeGroup(ng, 1, 10, test.numNodes)
Expand Down

0 comments on commit 0b77571

Please sign in to comment.