From b5d2e255bf55225559c40405c2105a31035025e5 Mon Sep 17 00:00:00 2001 From: willie-yao Date: Tue, 29 Aug 2023 21:13:21 +0000 Subject: [PATCH] Add unit tests for blueprint, current state, & desired state. --- .../topology/cluster/blueprint_test.go | 87 ++- .../topology/cluster/current_state_test.go | 288 +++++++- .../topology/cluster/desired_state_test.go | 641 +++++++++++++++++- internal/test/builder/infrastructure.go | 4 + 4 files changed, 1002 insertions(+), 18 deletions(-) diff --git a/internal/controllers/topology/cluster/blueprint_test.go b/internal/controllers/topology/cluster/blueprint_test.go index 621328b62a49..886a5dcc3f7c 100644 --- a/internal/controllers/topology/cluster/blueprint_test.go +++ b/internal/controllers/topology/cluster/blueprint_test.go @@ -37,6 +37,8 @@ func TestGetBlueprint(t *testing.T) { builder.GenericInfrastructureClusterTemplateCRD, builder.GenericInfrastructureMachineTemplateCRD, builder.GenericInfrastructureMachineCRD, + builder.GenericInfrastructureMachinePoolTemplateCRD, + builder.GenericInfrastructureMachinePoolCRD, builder.GenericControlPlaneTemplateCRD, builder.GenericBootstrapConfigTemplateCRD, } @@ -56,6 +58,8 @@ func TestGetBlueprint(t *testing.T) { workerInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "workerinframachinetemplate1"). Build() + workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "workerinframachinepooltemplate1"). + Build() workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "workerbootstraptemplate1"). Build() machineHealthCheck := &clusterv1.MachineHealthCheckClass{ @@ -73,6 +77,14 @@ func TestGetBlueprint(t *testing.T) { mds := []clusterv1.MachineDeploymentClass{*machineDeployment} + machinePools := builder.MachinePoolClass("workerclass2"). + WithLabels(map[string]string{"foo": "bar"}). + WithAnnotations(map[string]string{"a": "b"}). + WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate). + WithBootstrapTemplate(workerBootstrapTemplate). + Build() + mps := []clusterv1.MachinePoolClass{*machinePools} + // Define test cases. tests := []struct { name string @@ -141,6 +153,7 @@ func TestGetBlueprint(t *testing.T) { Template: controlPlaneTemplate, }, MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, { @@ -167,6 +180,7 @@ func TestGetBlueprint(t *testing.T) { InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, }, MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, { @@ -217,10 +231,11 @@ func TestGetBlueprint(t *testing.T) { MachineHealthCheck: machineHealthCheck, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, }, }, { - name: "Fails if ClusterClass has a MachineDeploymentClass referencing a BootstrapTemplate that does not exist", + name: "Fails if ClusterClass has a MachineDeploymentClass referencing a BootstrapConfigTemplate that does not exist", clusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). WithInfrastructureClusterTemplate(infraClusterTemplate). WithControlPlaneTemplate(controlPlaneTemplate). @@ -276,8 +291,75 @@ func TestGetBlueprint(t *testing.T) { MachineHealthCheck: machineHealthCheck, }, MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, + }, + }, + { + name: "Should read a ClusterClass with a MachinePoolClass", + clusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + objects: []client.Object{ + infraClusterTemplate, + controlPlaneTemplate, + workerInfrastructureMachinePoolTemplate, + workerBootstrapTemplate, + }, + want: &scope.ClusterBlueprint{ + ClusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplate, + }, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "workerclass2": { + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "bar"}, + Annotations: map[string]string{"a": "b"}, + }, + InfrastructureMachinePoolTemplate: workerInfrastructureMachinePoolTemplate, + BootstrapTemplate: workerBootstrapTemplate, + }, + }, }, }, + { + name: "Fails if ClusterClass has a MachinePoolClass referencing a BootstrapConfigTemplate that does not exist", + clusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + objects: []client.Object{ + infraClusterTemplate, + controlPlaneTemplate, + workerInfrastructureMachinePoolTemplate, + // workerBootstrapTemplate is missing! + }, + wantErr: true, + }, + { + name: "Fails if ClusterClass has a MachinePoolClass referencing a InfrastructureMachinePoolTemplate that does not exist", + clusterClass: builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithInfrastructureClusterTemplate(infraClusterTemplate). + WithControlPlaneTemplate(controlPlaneTemplate). + WithWorkerMachinePoolClasses(mps...). + Build(), + objects: []client.Object{ + infraClusterTemplate, + controlPlaneTemplate, + workerBootstrapTemplate, + // workerInfrastructureMachinePoolTemplate is missing! + }, + wantErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -333,7 +415,8 @@ func TestGetBlueprint(t *testing.T) { g.Expect(tt.want.ClusterClass).To(EqualObject(got.ClusterClass, IgnoreAutogeneratedMetadata)) g.Expect(tt.want.InfrastructureClusterTemplate).To(EqualObject(got.InfrastructureClusterTemplate), cmp.Diff(got.InfrastructureClusterTemplate, tt.want.InfrastructureClusterTemplate)) g.Expect(got.ControlPlane).To(BeComparableTo(tt.want.ControlPlane), cmp.Diff(got.ControlPlane, tt.want.ControlPlane)) - g.Expect(tt.want.MachineDeployments).To(BeComparableTo(got.MachineDeployments), got.MachineDeployments, tt.want.MachineDeployments) + g.Expect(tt.want.MachineDeployments).To(BeComparableTo(got.MachineDeployments), cmp.Diff(got.MachineDeployments, tt.want.MachineDeployments)) + g.Expect(tt.want.MachinePools).To(BeComparableTo(got.MachinePools), cmp.Diff(got.MachinePools, tt.want.MachinePools)) }) } } diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index cea202a5e82e..88908d9b7646 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -43,6 +43,8 @@ func TestGetCurrentState(t *testing.T) { builder.GenericBootstrapConfigTemplateCRD, builder.GenericInfrastructureMachineTemplateCRD, builder.GenericInfrastructureMachineCRD, + builder.GenericInfrastructureMachinePoolTemplateCRD, + builder.GenericInfrastructureMachinePoolCRD, } // The following is a block creating a number of objects for use in the test cases. @@ -157,6 +159,39 @@ func TestGetCurrentState(t *testing.T) { WithClusterName("cluster1"). Build() + // MachinePool and related objects. + emptyMachinePools := make(map[string]*scope.MachinePoolState) + + machinePoolInfrastructureTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "infra2"). + Build() + machinePoolInfrastructure := builder.InfrastructureMachinePool(metav1.NamespaceDefault, "infra2"). + Build() + machinePoolInfrastructure.SetLabels(map[string]string{clusterv1.ClusterTopologyOwnedLabel: ""}) + machinePoolBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "bootstrap2"). + Build() + machinePoolBootstrap := builder.BootstrapConfig(metav1.NamespaceDefault, "bootstrap2"). + Build() + machinePoolBootstrap.SetLabels(map[string]string{clusterv1.ClusterTopologyOwnedLabel: ""}) + + machinePool := builder.MachinePool(metav1.NamespaceDefault, "mp1"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "mp1", + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build() + machinePool2 := builder.MachinePool(metav1.NamespaceDefault, "mp2"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "mp2", + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build() + tests := []struct { name string cluster *clusterv1.Cluster @@ -177,6 +212,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -233,7 +269,7 @@ func TestGetCurrentState(t *testing.T) { objects: []client.Object{ infraCluster, }, - // Expecting valid return with no ControlPlane or MachineDeployment state defined but with a valid Infrastructure state. + // Expecting valid return with no ControlPlane, MachineDeployment, or MachinePool state defined but with a valid Infrastructure state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -241,6 +277,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: infraCluster, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -262,7 +299,7 @@ func TestGetCurrentState(t *testing.T) { clusterClassWithNoControlPlaneInfra, // Workers are missing! }, - // Expecting valid return with ControlPlane, no ControlPlane Infrastructure state, InfrastructureCluster state and no defined MachineDeployment state. + // Expecting valid return with ControlPlane, no ControlPlane Infrastructure state, InfrastructureCluster state, and no defined MachineDeployment or MachinePool state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithControlPlane(controlPlane). @@ -271,6 +308,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{Object: controlPlane, InfrastructureMachineTemplate: nil}, InfrastructureCluster: infraCluster, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -312,7 +350,7 @@ func TestGetCurrentState(t *testing.T) { controlPlaneInfrastructureMachineTemplate, // Workers are missing! }, - // Expecting valid return with valid ControlPlane state, but no ControlPlane Infrastructure, InfrastructureCluster or MachineDeployment state defined. + // Expecting valid return with valid ControlPlane state, but no ControlPlane Infrastructure, InfrastructureCluster, MachineDeployment, or MachinePool state defined. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithControlPlane(controlPlaneWithInfra). @@ -320,6 +358,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, InfrastructureCluster: nil, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -343,7 +382,7 @@ func TestGetCurrentState(t *testing.T) { controlPlaneWithInfra, // Workers are missing! }, - // Expecting valid return with valid ControlPlane state, ControlPlane Infrastructure state and InfrastructureCluster state, but no defined MachineDeployment state. + // Expecting valid return with valid ControlPlane state, ControlPlane Infrastructure state and InfrastructureCluster state, but no defined MachineDeployment or MachinePool state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -352,6 +391,7 @@ func TestGetCurrentState(t *testing.T) { ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, InfrastructureCluster: infraCluster, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -362,6 +402,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), blueprint: &scope.ClusterBlueprint{ @@ -377,6 +421,12 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrap, + InfrastructureMachinePoolTemplate: machinePoolInfrastructure, + }, + }, }, objects: []client.Object{ infraCluster, @@ -386,8 +436,11 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentInfrastructure, machineDeploymentBootstrap, machineDeployment, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, }, - // Expecting valid return with valid ControlPlane, ControlPlane Infrastructure and InfrastructureCluster state, but no defined MachineDeployment state. + // Expecting valid return with valid ControlPlane, ControlPlane Infrastructure, InfrastructureCluster, MachineDeployment and MachinePool state. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithTopology(builder.ClusterTopology(). @@ -395,12 +448,18 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, MachineDeployments: map[string]*scope.MachineDeploymentState{ "md1": {Object: machineDeployment, BootstrapTemplate: machineDeploymentBootstrap, InfrastructureMachineTemplate: machineDeploymentInfrastructure}}, + MachinePools: map[string]*scope.MachinePoolState{ + "mp1": {Object: machinePool, BootstrapObject: machinePoolBootstrap, InfrastructureMachinePoolObject: machinePoolInfrastructure}}, }, }, { @@ -443,7 +502,7 @@ func TestGetCurrentState(t *testing.T) { wantErr: true, }, { - name: "Should ignore unmanaged MachineDeployments and MachineDeployments belonging to other clusters", + name: "Should ignore unmanaged MachineDeployments/MachinePools and MachineDeployments/MachinePools belonging to other clusters", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build(), blueprint: &scope.ClusterBlueprint{ClusterClass: clusterClassWithControlPlaneInfra}, @@ -466,14 +525,32 @@ func TestGetCurrentState(t *testing.T) { WithBootstrapTemplate(machineDeploymentBootstrap). WithInfrastructureTemplate(machineDeploymentInfrastructure). Build(), + builder.MachinePool(metav1.NamespaceDefault, "no-managed-label"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + // topology.cluster.x-k8s.io/owned label is missing (unmanaged)! + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), + builder.MachinePool(metav1.NamespaceDefault, "wrong-cluster-label"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "another-cluster", + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "mp1", + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), }, - // Expect valid return with empty MachineDeployments properly filtered by label. + // Expect valid return with empty MachineDeployments and MachinePools properly filtered by label. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build(), ControlPlane: &scope.ControlPlaneState{}, InfrastructureCluster: nil, MachineDeployments: emptyMachineDeployments, + MachinePools: emptyMachinePools, }, }, { @@ -528,7 +605,58 @@ func TestGetCurrentState(t *testing.T) { wantErr: true, }, { - name: "Should read a full Cluster (With InfrastructureCluster, ControlPlane and ControlPlane Infrastructure, MachineDeployments)", + name: "Fails if there are MachinePools without the topology.cluster.x-k8s.io/pool-name", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + Build(), + blueprint: &scope.ClusterBlueprint{ClusterClass: clusterClassWithControlPlaneInfra}, + objects: []client.Object{ + clusterClassWithControlPlaneInfra, + builder.MachinePool(metav1.NamespaceDefault, "missing-topology-mp-labelName"). + WithLabels(map[string]string{ + clusterv1.ClusterNameLabel: "cluster1", + clusterv1.ClusterTopologyOwnedLabel: "", + // topology.cluster.x-k8s.io/pool-name label is missing! + }). + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), + }, + // Expect error to be thrown as no managed MachinePool is reconcilable unless it has a ClusterTopologyMachinePoolNameLabel. + wantErr: true, + }, + { + name: "Fails if there are MachinePools with the same topology.cluster.x-k8s.io/pool-name", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). + Build()). + Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + }, + objects: []client.Object{ + clusterClassWithControlPlaneInfra, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, + builder.MachinePool(metav1.NamespaceDefault, "duplicate-labels"). + WithLabels(machinePool.Labels). // Another machine pool with the same labels. + WithBootstrap(machinePoolBootstrap). + WithInfrastructure(machinePoolInfrastructure). + Build(), + }, + // Expect error as two MachinePools with the same ClusterTopologyOwnedLabel should not exist for one cluster + wantErr: true, + }, + { + name: "Should read a full Cluster (With InfrastructureCluster, ControlPlane and ControlPlane Infrastructure, MachineDeployments, MachinePools)", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). @@ -537,6 +665,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), blueprint: &scope.ClusterBlueprint{ @@ -552,6 +684,12 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrapTemplate, + InfrastructureMachinePoolTemplate: machinePoolInfrastructureTemplate, + }, + }, }, objects: []client.Object{ infraCluster, @@ -561,8 +699,11 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentInfrastructure, machineDeploymentBootstrap, machineDeployment, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, }, - // Expect valid return of full ClusterState with MachineDeployments properly filtered by label. + // Expect valid return of full ClusterState with MachineDeployments and MachinePools properly filtered by label. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -572,6 +713,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, @@ -583,10 +728,17 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolState{ + "mp1": { + Object: machinePool, + BootstrapObject: machinePoolBootstrap, + InfrastructureMachinePoolObject: machinePoolInfrastructure, + }, + }, }, }, { - name: "Should read a full Cluster, even if a MachineDeployment topology has been deleted and the MachineDeployment still exists", + name: "Should read a full Cluster, even if a MachineDeployment & MachinePool topology has been deleted and the MachineDeployment & MachinePool still exists", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). WithControlPlane(controlPlaneWithInfra). @@ -595,6 +747,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), blueprint: &scope.ClusterBlueprint{ @@ -610,6 +766,12 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrapTemplate, + InfrastructureMachinePoolTemplate: machinePoolInfrastructureTemplate, + }, + }, }, objects: []client.Object{ infraCluster, @@ -620,8 +782,12 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentBootstrap, machineDeployment, machineDeployment2, + machinePoolInfrastructure, + machinePoolBootstrap, + machinePool, + machinePool2, }, - // Expect valid return of full ClusterState with MachineDeployments properly filtered by label. + // Expect valid return of full ClusterState with MachineDeployments and MachinePools properly filtered by label. want: &scope.ClusterState{ Cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). WithInfrastructureCluster(infraCluster). @@ -631,6 +797,10 @@ func TestGetCurrentState(t *testing.T) { Class: "mdClass", Name: "md1", }). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). Build()). Build(), ControlPlane: &scope.ControlPlaneState{Object: controlPlaneWithInfra, InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate}, @@ -647,6 +817,18 @@ func TestGetCurrentState(t *testing.T) { InfrastructureMachineTemplate: machineDeploymentInfrastructure, }, }, + MachinePools: map[string]*scope.MachinePoolState{ + "mp1": { + Object: machinePool, + BootstrapObject: machinePoolBootstrap, + InfrastructureMachinePoolObject: machinePoolInfrastructure, + }, + "mp2": { + Object: machinePool2, + BootstrapObject: machinePoolBootstrap, + InfrastructureMachinePoolObject: machinePoolInfrastructure, + }, + }, }, }, { @@ -681,13 +863,52 @@ func TestGetCurrentState(t *testing.T) { machineDeploymentInfrastructure, builder.MachineDeployment(metav1.NamespaceDefault, "no-bootstrap"). WithLabels(machineDeployment.Labels). - // No BootstrapTemplate reference! + // No BootstrapConfigTemplate reference! WithInfrastructureTemplate(machineDeploymentInfrastructure). Build(), }, // Expect error as Bootstrap Template not defined for MachineDeployments relevant to the Cluster. wantErr: true, }, + { + name: "Fails if a Cluster has a MachinePool without the Bootstrap Template ref", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). + Build()). + Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrapTemplate, + InfrastructureMachinePoolTemplate: machinePoolInfrastructureTemplate, + }, + }, + }, + objects: []client.Object{ + infraCluster, + clusterClassWithControlPlaneInfra, + controlPlaneInfrastructureMachineTemplate, + controlPlaneWithInfra, + machinePoolInfrastructure, + builder.MachinePool(metav1.NamespaceDefault, "no-bootstrap"). + WithLabels(machinePool.Labels). + // No BootstrapConfig reference! + WithInfrastructure(machinePoolInfrastructure). + Build(), + }, + // Expect error as BootstrapConfig not defined for MachinePools relevant to the Cluster. + wantErr: true, + }, { name: "Fails if a Cluster has a MachineDeployments without the InfrastructureMachineTemplate ref", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). @@ -727,6 +948,45 @@ func TestGetCurrentState(t *testing.T) { // Expect error as Infrastructure Template not defined for MachineDeployment relevant to the Cluster. wantErr: true, }, + { + name: "Fails if a Cluster has a MachinePools without the InfrastructureMachinePool ref", + cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). + WithTopology(builder.ClusterTopology(). + WithMachinePool(clusterv1.MachinePoolTopology{ + Class: "mpClass", + Name: "mp1", + }). + Build()). + Build(), + blueprint: &scope.ClusterBlueprint{ + ClusterClass: clusterClassWithControlPlaneInfra, + InfrastructureClusterTemplate: infraClusterTemplate, + ControlPlane: &scope.ControlPlaneBlueprint{ + Template: controlPlaneTemplateWithInfrastructureMachine, + InfrastructureMachineTemplate: controlPlaneInfrastructureMachineTemplate, + }, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "mpClass": { + BootstrapTemplate: machinePoolBootstrapTemplate, + InfrastructureMachinePoolTemplate: machinePoolInfrastructureTemplate, + }, + }, + }, + objects: []client.Object{ + infraCluster, + clusterClassWithControlPlaneInfra, + controlPlaneInfrastructureMachineTemplate, + controlPlaneWithInfra, + machinePoolBootstrap, + builder.MachinePool(metav1.NamespaceDefault, "no-infra"). + WithLabels(machinePool.Labels). + WithBootstrap(machinePoolBootstrap). + // No InfrastructureMachinePool reference! + Build(), + }, + // Expect error as InfrastructureMachinePool not defined for MachinePool relevant to the Cluster. + wantErr: true, + }, { name: "Pass reading a full Cluster with MachineHealthChecks for ControlPlane and MachineDeployment)", cluster: builder.Cluster(metav1.NamespaceDefault, "cluster1"). @@ -790,6 +1050,7 @@ func TestGetCurrentState(t *testing.T) { MachineHealthCheck: machineHealthCheckForMachineDeployment, }, }, + MachinePools: emptyMachinePools, }, }, } @@ -838,7 +1099,8 @@ func TestGetCurrentState(t *testing.T) { g.Expect(got.Cluster).To(EqualObject(tt.want.Cluster, IgnoreAutogeneratedMetadata)) g.Expect(got.InfrastructureCluster).To(EqualObject(tt.want.InfrastructureCluster)) g.Expect(got.ControlPlane).To(BeComparableTo(tt.want.ControlPlane), cmp.Diff(got.ControlPlane, tt.want.ControlPlane)) - g.Expect(got.MachineDeployments).To(BeComparableTo(tt.want.MachineDeployments)) + g.Expect(got.MachineDeployments).To(BeComparableTo(tt.want.MachineDeployments), cmp.Diff(got.MachineDeployments, tt.want.MachineDeployments)) + g.Expect(got.MachinePools).To(BeComparableTo(tt.want.MachinePools), cmp.Diff(got.MachinePools, tt.want.MachinePools)) }) } } diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index 94feb132d8a7..e402cd95e4b4 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -32,6 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -677,6 +678,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { topologyVersion string controlPlaneObj *unstructured.Unstructured upgradingMachineDeployments []string + upgradingMachinePools []string expectedVersion string wantErr bool }{ @@ -743,7 +745,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { expectedVersion: "v1.2.2", }, { - name: "should return controlplane.spec.version if control plane is not upgrading and not scaling and one of the machine deployments is upgrading", + name: "should return controlplane.spec.version if control plane is not upgrading and not scaling and one of the MachineDeployments and one of the MachinePools is upgrading", topologyVersion: "v1.2.3", controlPlaneObj: builder.ControlPlane("test1", "cp1"). WithSpecFields(map[string]interface{}{ @@ -759,10 +761,11 @@ func TestComputeControlPlaneVersion(t *testing.T) { }). Build(), upgradingMachineDeployments: []string{"md1"}, + upgradingMachinePools: []string{"mp1"}, expectedVersion: "v1.2.2", }, { - name: "should return cluster.spec.topology.version if control plane is not upgrading and not scaling and none of the machine deployments are upgrading - hook returns non blocking response", + name: "should return cluster.spec.topology.version if control plane is not upgrading and not scaling and none of the MachineDeployments and MachinePools are upgrading - hook returns non blocking response", hookResponse: nonBlockingBeforeClusterUpgradeResponse, topologyVersion: "v1.2.3", controlPlaneObj: builder.ControlPlane("test1", "cp1"). @@ -779,6 +782,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { }). Build(), upgradingMachineDeployments: []string{}, + upgradingMachinePools: []string{}, expectedVersion: "v1.2.3", }, { @@ -847,6 +851,9 @@ func TestComputeControlPlaneVersion(t *testing.T) { if len(tt.upgradingMachineDeployments) > 0 { s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) } + if len(tt.upgradingMachinePools) > 0 { + s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) + } runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). WithCatalog(catalog). @@ -1508,7 +1515,7 @@ func TestComputeMachineDeployment(t *testing.T) { Replicas: ¤tReplicas, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: pointer.String("v1.21.2"), + Version: pointer.String(version), Bootstrap: clusterv1.Bootstrap{ ConfigRef: contract.ObjToRef(workerBootstrapTemplate), }, @@ -1718,6 +1725,365 @@ func TestComputeMachineDeployment(t *testing.T) { }) } +func TestComputeMachinePool(t *testing.T) { + workerInfrastructureMachinePool := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepool"). + Build() + workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepooltemplate"). + Build() + workerBootstrapConfig := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstrap"). + Build() + workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate"). + Build() + labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"} + annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"} + + clusterClassDuration := metav1.Duration{Duration: 20 * time.Second} + clusterClassFailureDomains := []string{"A", "B"} + var clusterClassMinReadySeconds int32 = 20 + mp1 := builder.MachinePoolClass("linux-worker"). + WithLabels(labels). + WithAnnotations(annotations). + WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate). + WithBootstrapTemplate(workerBootstrapTemplate). + WithFailureDomains("A", "B"). + WithNodeDrainTimeout(&clusterClassDuration). + WithNodeVolumeDetachTimeout(&clusterClassDuration). + WithNodeDeletionTimeout(&clusterClassDuration). + WithMinReadySeconds(&clusterClassMinReadySeconds). + Build() + mcps := []clusterv1.MachinePoolClass{*mp1} + fakeClass := builder.ClusterClass(metav1.NamespaceDefault, "class1"). + WithWorkerMachinePoolClasses(mcps...). + Build() + + version := "v1.21.3" + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: version, + }, + }, + } + + blueprint := &scope.ClusterBlueprint{ + Topology: cluster.Spec.Topology, + ClusterClass: fakeClass, + MachinePools: map[string]*scope.MachinePoolBlueprint{ + "linux-worker": { + Metadata: clusterv1.ObjectMeta{ + Labels: labels, + Annotations: annotations, + }, + BootstrapTemplate: workerBootstrapTemplate, + InfrastructureMachinePoolTemplate: workerInfrastructureMachinePoolTemplate, + }, + }, + } + + replicas := int32(5) + topologyFailureDomains := []string{"A", "B"} + topologyDuration := metav1.Duration{Duration: 10 * time.Second} + var topologyMinReadySeconds int32 = 10 + mpTopology := clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{ + // Should overwrite the label from the MachinePool class. + "fooLabel": "baz", + }, + Annotations: map[string]string{ + // Should overwrite the annotation from the MachinePool class. + "fooAnnotation": "baz", + // These annotations should not be propagated to the MachinePool. + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", + }, + }, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + FailureDomains: topologyFailureDomains, + NodeDrainTimeout: &topologyDuration, + NodeVolumeDetachTimeout: &topologyDuration, + NodeDeletionTimeout: &topologyDuration, + MinReadySeconds: &topologyMinReadySeconds, + } + + t.Run("Generates the machine pool and the referenced templates", func(t *testing.T) { + g := NewWithT(t) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + actual, err := computeMachinePool(ctx, scope, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(actual.BootstrapObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines")) + + // Ensure Cluster ownership is added to generated BootstrapObject. + g.Expect(actual.BootstrapObject.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster")) + g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name)) + + g.Expect(actual.InfrastructureMachinePoolObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines")) + + // Ensure Cluster ownership is added to generated InfrastructureMachinePool. + g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()).To(HaveLen(1)) + g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster")) + g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name)) + + actualMp := actual.Object + g.Expect(*actualMp.Spec.Replicas).To(Equal(replicas)) + g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds)) + g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration)) + g.Expect(actualMp.Spec.ClusterName).To(Equal("cluster1")) + g.Expect(actualMp.Name).To(ContainSubstring("cluster1")) + g.Expect(actualMp.Name).To(ContainSubstring("big-pool-of-machines")) + + expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations) + delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation) + delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation) + g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations)) + g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations)) + + g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + + g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate")) + g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate")) + }) + t.Run("Generates the machine pool and the referenced templates using ClusterClass defaults", func(t *testing.T) { + g := NewWithT(t) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + mpTopology := clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "baz"}, + }, + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: &replicas, + // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy + } + + actual, err := computeMachinePool(ctx, scope, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + + // checking only values from CC defaults + actualMp := actual.Object + g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds)) + g.Expect(actualMp.Spec.FailureDomains).To(Equal(clusterClassFailureDomains)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration)) + g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration)) + }) + + t.Run("If there is already a machine pool, it preserves the object name and the reference names", func(t *testing.T) { + g := NewWithT(t) + s := scope.New(cluster) + s.Blueprint = blueprint + + currentReplicas := int32(3) + currentMp := &expv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-pool-1", + }, + Spec: expv1.MachinePoolSpec{ + Replicas: ¤tReplicas, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: pointer.String(version), + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: contract.ObjToRef(workerBootstrapConfig), + }, + InfrastructureRef: *contract.ObjToRef(workerInfrastructureMachinePool), + }, + }, + }, + } + s.Current.MachinePools = map[string]*scope.MachinePoolState{ + "big-pool-of-machines": { + Object: currentMp, + BootstrapObject: workerBootstrapConfig, + InfrastructureMachinePoolObject: workerInfrastructureMachinePool, + }, + } + + actual, err := computeMachinePool(ctx, s, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + + actualMp := actual.Object + + g.Expect(*actualMp.Spec.Replicas).NotTo(Equal(currentReplicas)) + g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains)) + g.Expect(actualMp.Name).To(Equal("existing-pool-1")) + + expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations) + delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation) + delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation) + g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations)) + g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations)) + + g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", + }))) + + g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinepool")) + g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstrap")) + }) + + t.Run("If a machine pool references a topology class that does not exist, machine pool generation fails", func(t *testing.T) { + g := NewWithT(t) + scope := scope.New(cluster) + scope.Blueprint = blueprint + + mpTopology = clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Labels: map[string]string{"foo": "baz"}, + }, + Class: "windows-worker", + Name: "big-pool-of-machines", + } + + _, err := computeMachinePool(ctx, scope, mpTopology) + g.Expect(err).To(HaveOccurred()) + }) + + t.Run("Should choose the correct version for machine pool", func(t *testing.T) { + controlPlaneStable123 := builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.3", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.3", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + + // Note: in all the following tests we are setting it up so that the control plane is already + // stable at the topology version. + // A more extensive list of scenarios is tested in TestComputeMachinePoolVersion. + tests := []struct { + name string + upgradingMachinePools []string + currentMPVersion *string + upgradeConcurrency string + topologyVersion string + expectedVersion string + }{ + { + name: "use cluster.spec.topology.version if creating a new machine pool", + upgradingMachinePools: []string{}, + upgradeConcurrency: "1", + currentMPVersion: nil, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + }, + { + name: "use cluster.spec.topology.version if creating a new machine pool while another machine pool is upgrading", + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: "1", + currentMPVersion: nil, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + }, + { + name: "use machine pool's spec.template.spec.version if one of the machine pools is upgrading, concurrency limit reached", + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: "1", + currentMPVersion: pointer.String("v1.2.2"), + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + }, + { + name: "use cluster.spec.topology.version if one of the machine pools is upgrading, concurrency limit not reached", + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: "2", + currentMPVersion: pointer.String("v1.2.2"), + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + testCluster := cluster.DeepCopy() + if testCluster.Annotations == nil { + testCluster.Annotations = map[string]string{} + } + testCluster.Annotations[clusterv1.ClusterTopologyUpgradeConcurrencyAnnotation] = tt.upgradeConcurrency + + s := scope.New(testCluster) + s.Blueprint = blueprint + s.Blueprint.Topology.Version = tt.topologyVersion + s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + } + s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{} + + mpsState := scope.MachinePoolsStateMap{} + if tt.currentMPVersion != nil { + // testing a case with an existing machine pool + // add the stable machine pool to the current machine pools state + mp := builder.MachinePool("test-namespace", "big-pool-of-machines"). + WithReplicas(2). + WithVersion(*tt.currentMPVersion). + WithStatus(expv1.MachinePoolStatus{ + ObservedGeneration: 2, + Replicas: 2, + ReadyReplicas: 2, + AvailableReplicas: 2, + }). + Build() + mpsState = duplicateMachinePoolsState(mpsState) + mpsState["big-pool-of-machines"] = &scope.MachinePoolState{ + Object: mp, + } + } + s.Current.MachinePools = mpsState + s.Current.ControlPlane = &scope.ControlPlaneState{ + Object: controlPlaneStable123, + } + + mpTopology := clusterv1.MachinePoolTopology{ + Class: "linux-worker", + Name: "big-pool-of-machines", + Replicas: pointer.Int32(2), + } + s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) + obj, err := computeMachinePool(ctx, s, mpTopology) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) + }) + } + }) +} + func TestComputeMachineDeploymentVersion(t *testing.T) { controlPlaneObj := builder.ControlPlane("test1", "cp1"). Build() @@ -1896,6 +2262,184 @@ func TestComputeMachineDeploymentVersion(t *testing.T) { } } +func TestComputeMachinePoolVersion(t *testing.T) { + controlPlaneObj := builder.ControlPlane("test1", "cp1"). + Build() + + mpName := "mp-1" + currentMachinePoolState := &scope.MachinePoolState{Object: builder.MachinePool("test1", mpName).WithVersion("v1.2.2").Build()} + + tests := []struct { + name string + machinePoolTopology clusterv1.MachinePoolTopology + currentMachinePoolState *scope.MachinePoolState + upgradingMachinePools []string + upgradeConcurrency int + controlPlaneStartingUpgrade bool + controlPlaneUpgrading bool + controlPlaneScaling bool + controlPlaneProvisioning bool + afterControlPlaneUpgradeHookBlocking bool + topologyVersion string + expectedVersion string + expectPendingCreate bool + expectPendingUpgrade bool + }{ + { + name: "should return cluster.spec.topology.version if creating a new MachinePool and if control plane is stable - not marked as pending create", + currentMachinePoolState: nil, + machinePoolTopology: clusterv1.MachinePoolTopology{ + Name: "mp-topology-1", + }, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingCreate: false, + }, + { + name: "should return cluster.spec.topology.version if creating a new MachinePool and if control plane is not stable - marked as pending create", + controlPlaneScaling: true, + machinePoolTopology: clusterv1.MachinePoolTopology{ + Name: "mp-topology-1", + }, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingCreate: true, + }, + { + name: "should return MachinePool's spec.template.spec.version if upgrade is deferred", + machinePoolTopology: clusterv1.MachinePoolTopology{ + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + }, + }, + }, + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + // Control plane is considered upgrading if the control plane's spec.version and status.version is not equal. + name: "should return MachinePool's spec.template.spec.version if control plane is upgrading", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + controlPlaneUpgrading: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + // Control plane is considered ready to upgrade if spec.version of current and desired control planes are not equal. + name: "should return MachinePool's spec.template.spec.version if control plane is starting upgrade", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + controlPlaneStartingUpgrade: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + // Control plane is considered scaling if its spec.replicas is not equal to any of status.replicas, status.readyReplicas or status.updatedReplicas. + name: "should return MachinePool's spec.template.spec.version if control plane is scaling", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + controlPlaneScaling: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + name: "should return cluster.spec.topology.version if the control plane is not upgrading, not scaling, not ready to upgrade and none of the MachinePools are upgrading", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{}, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingUpgrade: false, + }, + { + name: "should return MachinePool's spec.template.spec.version if control plane is stable, other MachinePools are upgrading, concurrency limit not reached but AfterControlPlaneUpgrade hook is blocking", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: 2, + afterControlPlaneUpgradeHookBlocking: true, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + { + name: "should return cluster.spec.topology.version if control plane is stable, other MachinePools are upgrading, concurrency limit not reached", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{"upgrading-mp1"}, + upgradeConcurrency: 2, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.3", + expectPendingUpgrade: false, + }, + { + name: "should return MachinePool's spec.template.spec.version if control plane is stable, other MachinePools are upgrading, concurrency limit reached", + currentMachinePoolState: currentMachinePoolState, + upgradingMachinePools: []string{"upgrading-mp1", "upgrading-mp2"}, + upgradeConcurrency: 2, + topologyVersion: "v1.2.3", + expectedVersion: "v1.2.2", + expectPendingUpgrade: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + s := &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ + Version: tt.topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + Workers: &clusterv1.WorkersTopology{}, + }}, + Current: &scope.ClusterState{ + ControlPlane: &scope.ControlPlaneState{Object: controlPlaneObj}, + }, + UpgradeTracker: scope.NewUpgradeTracker(scope.MaxMPUpgradeConcurrency(tt.upgradeConcurrency)), + HookResponseTracker: scope.NewHookResponseTracker(), + } + if tt.afterControlPlaneUpgradeHookBlocking { + s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, &runtimehooksv1.AfterControlPlaneUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + RetryAfterSeconds: 10, + }, + }) + } + s.UpgradeTracker.ControlPlane.IsStartingUpgrade = tt.controlPlaneStartingUpgrade + s.UpgradeTracker.ControlPlane.IsUpgrading = tt.controlPlaneUpgrading + s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling + s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning + s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) + version := computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState) + g.Expect(version).To(Equal(tt.expectedVersion)) + + if tt.currentMachinePoolState != nil { + // Verify that if the upgrade is pending it is captured in the upgrade tracker. + if tt.expectPendingUpgrade { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeTrue(), "MachinePool should be marked as pending upgrade") + } else { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeFalse(), "MachinePool should not be marked as pending upgrade") + } + } else { + // Verify that if create the pending it is capture in the tracker. + if tt.expectPendingCreate { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeTrue(), "MachinePool topology should be marked as pending create") + } else { + g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeFalse(), "MachinePool topology should not be marked as pending create") + } + } + }) + } +} + func TestIsMachineDeploymentDeferred(t *testing.T) { clusterTopology := &clusterv1.Topology{ Workers: &clusterv1.WorkersTopology{ @@ -1979,6 +2523,89 @@ func TestIsMachineDeploymentDeferred(t *testing.T) { } } +func TestIsMachinePoolDeferred(t *testing.T) { + clusterTopology := &clusterv1.Topology{ + Workers: &clusterv1.WorkersTopology{ + MachinePools: []clusterv1.MachinePoolTopology{ + { + Name: "mp-with-defer-upgrade", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + }, + }, + }, + { + Name: "mp-without-annotations", + }, + { + Name: "mp-with-hold-upgrade-sequence", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", + }, + }, + }, + { + Name: "mp-after-mp-with-hold-upgrade-sequence", + }, + }, + }, + } + + tests := []struct { + name string + mpTopology clusterv1.MachinePoolTopology + deferred bool + }{ + { + name: "MP with defer-upgrade annotation is deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-with-defer-upgrade", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyDeferUpgradeAnnotation: "", + }, + }, + }, + deferred: true, + }, + { + name: "MP without annotations is not deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-without-annotations", + }, + deferred: false, + }, + { + name: "MP with hold-upgrade-sequence annotation is deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-with-hold-upgrade-sequence", + Metadata: clusterv1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "", + }, + }, + }, + deferred: true, + }, + { + name: "MP after mp with hold-upgrade-sequence is deferred", + mpTopology: clusterv1.MachinePoolTopology{ + Name: "mp-after-mp-with-hold-upgrade-sequence", + }, + deferred: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(isMachinePoolDeferred(clusterTopology, tt.mpTopology)).To(Equal(tt.deferred)) + }) + } +} + func TestTemplateToObject(t *testing.T) { template := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infrastructureClusterTemplate"). WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}). @@ -2192,6 +2819,14 @@ func duplicateMachineDeploymentsState(s scope.MachineDeploymentsStateMap) scope. return n } +func duplicateMachinePoolsState(s scope.MachinePoolsStateMap) scope.MachinePoolsStateMap { + n := make(scope.MachinePoolsStateMap) + for k, v := range s { + n[k] = v + } + return n +} + func TestMergeMap(t *testing.T) { t.Run("Merge maps", func(t *testing.T) { g := NewWithT(t) diff --git a/internal/test/builder/infrastructure.go b/internal/test/builder/infrastructure.go index 8e0d78e197e5..88e00875493a 100644 --- a/internal/test/builder/infrastructure.go +++ b/internal/test/builder/infrastructure.go @@ -37,9 +37,13 @@ var ( // GenericInfrastructureMachinePoolTemplateKind is the Kind for the GenericInfrastructureMachinePoolTemplate. GenericInfrastructureMachinePoolTemplateKind = "GenericInfrastructureMachinePoolTemplate" + // GenericInfrastructureMachinePoolTemplateCRD is a generic infrastructure machine pool template CRD. + GenericInfrastructureMachinePoolTemplateCRD = untypedCRD(InfrastructureGroupVersion.WithKind(GenericInfrastructureMachinePoolTemplateKind)) // GenericInfrastructureMachinePoolKind is the Kind for the GenericInfrastructureMachinePool. GenericInfrastructureMachinePoolKind = "GenericInfrastructureMachinePool" + // GenericInfrastructureMachinePoolCRD is a generic infrastructure machine pool CRD. + GenericInfrastructureMachinePoolCRD = untypedCRD(InfrastructureGroupVersion.WithKind(GenericInfrastructureMachinePoolKind)) // GenericInfrastructureClusterKind is the kind for the GenericInfrastructureCluster type. GenericInfrastructureClusterKind = "GenericInfrastructureCluster"