diff --git a/cmd/main.go b/cmd/main.go index e6044fcd..d7088f0f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -46,6 +46,7 @@ import ( "github.com/kubevirt/ipam-extensions/pkg/ipamclaimswebhook" "github.com/kubevirt/ipam-extensions/pkg/vminetworkscontroller" + "github.com/kubevirt/ipam-extensions/pkg/vmnetworkscontroller" //+kubebuilder:scaffold:imports ) @@ -156,6 +157,11 @@ func main() { os.Exit(1) } + if err = vmnetworkscontroller.NewVMReconciler(mgr).Setup(); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "VirtualMachine") + os.Exit(1) + } + if err = vminetworkscontroller.NewVMIReconciler(mgr).Setup(); err != nil { setupLog.Error(err, "unable to create controller", "controller", "VirtualMachineInstance") os.Exit(1) diff --git a/pkg/claims/claims.go b/pkg/claims/claims.go new file mode 100644 index 00000000..8c8a5370 --- /dev/null +++ b/pkg/claims/claims.go @@ -0,0 +1,43 @@ +package claims + +import ( + "context" + "fmt" + + apitypes "k8s.io/apimachinery/pkg/types" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + ipamclaimsapi "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + + virtv1 "kubevirt.io/api/core/v1" +) + +const KubevirtVMFinalizer = "kubevirt.io/persistent-ipam" + +func Cleanup(c client.Client, vmiKey apitypes.NamespacedName) error { + ipamClaims := &ipamclaimsapi.IPAMClaimList{} + listOpts := []client.ListOption{ + client.InNamespace(vmiKey.Namespace), + OwnedByVMLabel(vmiKey.Name), + } + if err := c.List(context.Background(), ipamClaims, listOpts...); err != nil { + return fmt.Errorf("could not get list of IPAMClaims owned by VM %q: %w", vmiKey.String(), err) + } + + for _, claim := range ipamClaims.Items { + if controllerutil.RemoveFinalizer(&claim, KubevirtVMFinalizer) { + if err := c.Update(context.Background(), &claim, &client.UpdateOptions{}); err != nil { + return client.IgnoreNotFound(err) + } + } + } + return nil +} + +func OwnedByVMLabel(vmiName string) client.MatchingLabels { + return map[string]string{ + virtv1.VirtualMachineLabel: vmiName, + } +} diff --git a/pkg/vminetworkscontroller/vmi_controller.go b/pkg/vminetworkscontroller/vmi_controller.go index 03b986dd..3681b4f3 100644 --- a/pkg/vminetworkscontroller/vmi_controller.go +++ b/pkg/vminetworkscontroller/vmi_controller.go @@ -8,6 +8,8 @@ import ( "github.com/go-logr/logr" + "k8s.io/utils/ptr" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -15,7 +17,6 @@ import ( controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -24,11 +25,10 @@ import ( virtv1 "kubevirt.io/api/core/v1" + "github.com/kubevirt/ipam-extensions/pkg/claims" "github.com/kubevirt/ipam-extensions/pkg/config" ) -const kubevirtVMFinalizer = "kubevirt.io/persistent-ipam" - // VirtualMachineInstanceReconciler reconciles a VirtualMachineInstance object type VirtualMachineInstanceReconciler struct { client.Client @@ -56,30 +56,25 @@ func (r *VirtualMachineInstanceReconciler) Reconcile( defer cancel() err := r.Client.Get(contextWithTimeout, request.NamespacedName, vmi) if apierrors.IsNotFound(err) { - r.Log.Info("could not retrieve VMI - will cleanup its IPAMClaims") - if err := r.Cleanup(request.NamespacedName); err != nil { - return controllerruntime.Result{}, fmt.Errorf("error removing the IPAMClaims finalizer: %w", err) - } - return controllerruntime.Result{}, nil + vmi = nil } else if err != nil { return controllerruntime.Result{}, err } - var ownerInfo metav1.OwnerReference - vm := &virtv1.VirtualMachine{} - contextWithTimeout, cancel = context.WithTimeout(ctx, time.Second) - defer cancel() - if err := r.Client.Get(contextWithTimeout, request.NamespacedName, vm); apierrors.IsNotFound(err) { - r.Log.Info("Corresponding VM not found", "vm", request.NamespacedName) - ownerInfo = metav1.OwnerReference{APIVersion: vmi.APIVersion, Kind: vmi.Kind, Name: vmi.Name, UID: vmi.UID} - } else if err == nil { - ownerInfo = metav1.OwnerReference{APIVersion: vm.APIVersion, Kind: vm.Kind, Name: vm.Name, UID: vm.UID} - } else { - return controllerruntime.Result{}, fmt.Errorf( - "error to get VMI %q corresponding VM: %w", - request.NamespacedName, - err, - ) + vm, err := getOwningVM(ctx, r.Client, request.NamespacedName) + if err != nil { + return controllerruntime.Result{}, err + } + + if shouldCleanFinalizers(vmi, vm) { + if err := claims.Cleanup(r.Client, request.NamespacedName); err != nil { + return controllerruntime.Result{}, fmt.Errorf("failed removing the IPAMClaims finalizer: %w", err) + } + return controllerruntime.Result{}, nil + } + + if vmi == nil { + return controllerruntime.Result{}, nil } vmiNetworks, err := r.vmiNetworksClaimingIPAM(ctx, vmi) @@ -87,6 +82,7 @@ func (r *VirtualMachineInstanceReconciler) Reconcile( return controllerruntime.Result{}, err } + ownerInfo := ownerReferenceFor(vmi, vm) for logicalNetworkName, netConfigName := range vmiNetworks { claimKey := fmt.Sprintf("%s.%s", vmi.Name, logicalNetworkName) ipamClaim := &ipamclaimsapi.IPAMClaim{ @@ -94,8 +90,8 @@ func (r *VirtualMachineInstanceReconciler) Reconcile( Name: claimKey, Namespace: vmi.Namespace, OwnerReferences: []metav1.OwnerReference{ownerInfo}, - Finalizers: []string{kubevirtVMFinalizer}, - Labels: ownedByVMLabel(vmi.Name), + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmi.Name), }, Spec: ipamclaimsapi.IPAMClaimSpec{ Network: netConfigName, @@ -148,7 +144,7 @@ func onVMIPredicates() predicate.Funcs { return true }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { - return false + return true }, GenericFunc: func(event.GenericEvent) bool { return false @@ -202,29 +198,47 @@ func (r *VirtualMachineInstanceReconciler) vmiNetworksClaimingIPAM( return vmiNets, nil } -func (r *VirtualMachineInstanceReconciler) Cleanup(vmiKey apitypes.NamespacedName) error { - ipamClaims := &ipamclaimsapi.IPAMClaimList{} - listOpts := []client.ListOption{ - client.InNamespace(vmiKey.Namespace), - ownedByVMLabel(vmiKey.Name), +func shouldCleanFinalizers(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) bool { + if vm != nil { + // VMI is gone and VM is marked for deletion + return vmi == nil && vm.DeletionTimestamp != nil + } else { + // VMI is gone or VMI is marked for deletion, virt-launcher is gone + return vmi == nil || (vmi.DeletionTimestamp != nil && len(vmi.Status.ActivePods) == 0) } - if err := r.Client.List(context.Background(), ipamClaims, listOpts...); err != nil { - return fmt.Errorf("could not get list of IPAMClaims owned by VM %q: %w", vmiKey.String(), err) +} + +func ownerReferenceFor(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine) metav1.OwnerReference { + var obj client.Object + if vm != nil { + obj = vm + } else { + obj = vmi } - for _, claim := range ipamClaims.Items { - removedFinalizer := controllerutil.RemoveFinalizer(&claim, kubevirtVMFinalizer) - if removedFinalizer { - if err := r.Client.Update(context.Background(), &claim, &client.UpdateOptions{}); err != nil { - return client.IgnoreNotFound(err) - } - } + aPIVersion := obj.GetObjectKind().GroupVersionKind().Group + "/" + obj.GetObjectKind().GroupVersionKind().Version + return metav1.OwnerReference{ + APIVersion: aPIVersion, + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + Name: obj.GetName(), + UID: obj.GetUID(), + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), } - return nil } -func ownedByVMLabel(vmiName string) client.MatchingLabels { - return map[string]string{ - virtv1.VirtualMachineLabel: vmiName, +// Gets the owning VM if any. for simplicity it just try to fetch the VM, +// even when the VMI exists, instead of parsing ownerReferences and handling differently the nil VMI case. +func getOwningVM(ctx context.Context, c client.Client, name apitypes.NamespacedName) (*virtv1.VirtualMachine, error) { + contextWithTimeout, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + vm := &virtv1.VirtualMachine{} + if err := c.Get(contextWithTimeout, name, vm); err == nil { + return vm, nil + } else if apierrors.IsNotFound(err) { + return nil, nil + } else { + return nil, fmt.Errorf("failed getting VM %q: %w", name, err) } } diff --git a/pkg/vminetworkscontroller/vmi_controller_test.go b/pkg/vminetworkscontroller/vmi_controller_test.go index 90e76d38..d9af04c2 100644 --- a/pkg/vminetworkscontroller/vmi_controller_test.go +++ b/pkg/vminetworkscontroller/vmi_controller_test.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -14,6 +15,7 @@ import ( apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -27,11 +29,13 @@ import ( ipamclaimsapi "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + "github.com/kubevirt/ipam-extensions/pkg/claims" ) func TestController(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller test suite") + RunSpecs(t, "VMI Controller test suite") } var ( @@ -48,7 +52,9 @@ type testConfig struct { expectedIPAMClaims []ipamclaimsapi.IPAMClaim } -var _ = Describe("vmi IPAM controller", Serial, func() { +const dummyUID = "dummyUID" + +var _ = Describe("VMI IPAM controller", Serial, func() { BeforeEach(func() { log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) testEnv = &envtest.Environment{} @@ -69,7 +75,6 @@ var _ = Describe("vmi IPAM controller", Serial, func() { nadName = "ns1/superdupernad" namespace = "ns1" vmName = "vm1" - dummyUID = "dummyUID" unexpectedUID = "unexpectedUID" ) @@ -118,20 +123,20 @@ var _ = Describe("vmi IPAM controller", Serial, func() { mgr, err := controllerruntime.NewManager(&rest.Config{}, ctrlOptions) Expect(err).NotTo(HaveOccurred()) - reconcileMachine := NewVMIReconciler(mgr) + vmiReconciler := NewVMIReconciler(mgr) if config.expectedError != nil { - _, err := reconcileMachine.Reconcile(context.Background(), controllerruntime.Request{NamespacedName: vmiKey}) + _, err := vmiReconciler.Reconcile(context.Background(), controllerruntime.Request{NamespacedName: vmiKey}) Expect(err).To(MatchError(config.expectedError)) } else { Expect( - reconcileMachine.Reconcile(context.Background(), controllerruntime.Request{NamespacedName: vmiKey}), + vmiReconciler.Reconcile(context.Background(), controllerruntime.Request{NamespacedName: vmiKey}), ).To(Equal(config.expectedResponse)) } if len(config.expectedIPAMClaims) > 0 { ipamClaimList := &ipamclaimsapi.IPAMClaimList{} - Expect(mgr.GetClient().List(context.Background(), ipamClaimList, ownedByVMLabel(vmName))).To(Succeed()) + Expect(mgr.GetClient().List(context.Background(), ipamClaimList, claims.OwnedByVMLabel(vmName))).To(Succeed()) Expect(ipamClaimsCleaner(ipamClaimList.Items...)).To(ConsistOf(config.expectedIPAMClaims)) } }, @@ -143,11 +148,17 @@ var _ = Describe("vmi IPAM controller", Serial, func() { expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ { ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), - Namespace: namespace, - Finalizers: []string{kubevirtVMFinalizer}, - Labels: ownedByVMLabel(vmName), - OwnerReferences: []metav1.OwnerReference{{Name: vmName}}, + Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), + Namespace: namespace, + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "kubevirt.io/v1", + Kind: "VirtualMachine", + Name: vmName, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true)}, + }, }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "goodnet"}, }, @@ -186,14 +197,123 @@ var _ = Describe("vmi IPAM controller", Serial, func() { Entry("the VMI does not exist on the datastore - it might have been deleted in the meantime", testConfig{ expectedResponse: reconcile.Result{}, }), - Entry("the VMI was deleted, thus the existing IPAMClaims finalizers must be removed", testConfig{ + Entry("the VMI was deleted (VM doesnt exists as well), thus IPAMClaims finalizers must be removed", testConfig{ + expectedResponse: reconcile.Result{}, + existingIPAMClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), + Namespace: namespace, + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1.randomnet", + Namespace: "ns1", + Labels: claims.OwnedByVMLabel(vmName), + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + }, + }), + Entry("the VM was stopped, thus the existing IPAMClaims finalizers should be kept", testConfig{ + inputVM: dummyVM(dummyVMISpec(nadName)), + expectedResponse: reconcile.Result{}, + existingIPAMClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), + Namespace: namespace, + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1.randomnet", + Namespace: "ns1", + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + }, + }), + Entry("standalone VMI, marked for deletion, without pods, thus IPAMClaims finalizers must be removed", testConfig{ + inputVMI: dummyMarkedForDeletionVMI(nadName), + expectedResponse: reconcile.Result{}, + existingIPAMClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), + Namespace: namespace, + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1.randomnet", + Namespace: "ns1", + Labels: claims.OwnedByVMLabel(vmName), + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + }, + }), + Entry("standalone VMI which is marked for deletion, with active pods, should keep IPAMClaims finalizers", testConfig{ + inputVMI: dummyMarkedForDeletionVMIWithActivePods(nadName), + inputNAD: dummyNAD(nadName), + expectedResponse: reconcile.Result{}, + existingIPAMClaim: &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), + Namespace: namespace, + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + OwnerReferences: []metav1.OwnerReference{{ + Name: vmName, + UID: dummyUID, + Kind: "VirtualMachineInstance", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }}, + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1.randomnet", + Namespace: "ns1", + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + OwnerReferences: []metav1.OwnerReference{{ + Name: vmName, + UID: dummyUID, + Kind: "VirtualMachineInstance", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }}, + }, + Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, + }, + }, + }), + Entry("VM which is marked for deletion, without VMI, thus IPAMClaims finalizers must be removed", testConfig{ + inputVM: dummyMarkedForDeletionVM(nadName), expectedResponse: reconcile.Result{}, existingIPAMClaim: &ipamclaimsapi.IPAMClaim{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), Namespace: namespace, - Finalizers: []string{kubevirtVMFinalizer}, - Labels: ownedByVMLabel(vmName), + Finalizers: []string{claims.KubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, }, @@ -202,7 +322,7 @@ var _ = Describe("vmi IPAM controller", Serial, func() { ObjectMeta: metav1.ObjectMeta{ Name: "vm1.randomnet", Namespace: "ns1", - Labels: ownedByVMLabel(vmName), + Labels: claims.OwnedByVMLabel(vmName), }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, }, @@ -237,8 +357,8 @@ var _ = Describe("vmi IPAM controller", Serial, func() { UID: dummyUID, }, }, - Labels: ownedByVMLabel(vmName), - Finalizers: []string{kubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + Finalizers: []string{claims.KubevirtVMFinalizer}, }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, }, @@ -248,7 +368,7 @@ var _ = Describe("vmi IPAM controller", Serial, func() { ObjectMeta: metav1.ObjectMeta{ Name: "vm1.randomnet", Namespace: "ns1", - Labels: ownedByVMLabel(vmName), + Labels: claims.OwnedByVMLabel(vmName), OwnerReferences: []metav1.OwnerReference{ { APIVersion: "v1", @@ -257,7 +377,7 @@ var _ = Describe("vmi IPAM controller", Serial, func() { UID: dummyUID, }, }, - Finalizers: []string{kubevirtVMFinalizer}, + Finalizers: []string{claims.KubevirtVMFinalizer}, }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, }, @@ -279,8 +399,8 @@ var _ = Describe("vmi IPAM controller", Serial, func() { UID: unexpectedUID, }, }, - Labels: ownedByVMLabel(vmName), - Finalizers: []string{kubevirtVMFinalizer}, + Labels: claims.OwnedByVMLabel(vmName), + Finalizers: []string{claims.KubevirtVMFinalizer}, }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "doesitmatter?"}, }, @@ -293,11 +413,17 @@ var _ = Describe("vmi IPAM controller", Serial, func() { expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ { ObjectMeta: metav1.ObjectMeta{ - Name: "vm1.randomnet", - Namespace: "ns1", - Labels: ownedByVMLabel(vmName), - Finalizers: []string{kubevirtVMFinalizer}, - OwnerReferences: []metav1.OwnerReference{{Name: vmName}}, + Name: "vm1.randomnet", + Namespace: "ns1", + Labels: claims.OwnedByVMLabel(vmName), + Finalizers: []string{claims.KubevirtVMFinalizer}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "kubevirt.io/v1", + Kind: "VirtualMachineInstance", + Name: vmName, + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true)}, + }, }, Spec: ipamclaimsapi.IPAMClaimSpec{Network: "goodnet"}, }, @@ -308,6 +434,10 @@ var _ = Describe("vmi IPAM controller", Serial, func() { func dummyVM(vmiSpec virtv1.VirtualMachineInstanceSpec) *virtv1.VirtualMachine { return &virtv1.VirtualMachine{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "kubevirt.io/v1", + Kind: "VirtualMachine", + }, ObjectMeta: metav1.ObjectMeta{ Name: "vm1", Namespace: "ns1", @@ -322,6 +452,10 @@ func dummyVM(vmiSpec virtv1.VirtualMachineInstanceSpec) *virtv1.VirtualMachine { func dummyVMI(vmiSpec virtv1.VirtualMachineInstanceSpec) *virtv1.VirtualMachineInstance { return &virtv1.VirtualMachineInstance{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "kubevirt.io/v1", + Kind: "VirtualMachineInstance", + }, ObjectMeta: metav1.ObjectMeta{ Name: "vm1", Namespace: "ns1", @@ -402,3 +536,98 @@ func decorateVMWithUID(uid string, vm *virtv1.VirtualMachine) *virtv1.VirtualMac vm.UID = apitypes.UID(uid) return vm } +func dummyMarkedForDeletionVMI(nadName string) *virtv1.VirtualMachineInstance { + vmi := dummyVMI(dummyVMISpec(nadName)) + vmi.DeletionTimestamp = &metav1.Time{Time: time.Now()} + vmi.ObjectMeta.Finalizers = []string{metav1.FinalizerDeleteDependents} + + return vmi +} + +func dummyMarkedForDeletionVMIWithActivePods(nadName string) *virtv1.VirtualMachineInstance { + vmi := dummyVMI(dummyVMISpec(nadName)) + vmi.DeletionTimestamp = &metav1.Time{Time: time.Now()} + vmi.ObjectMeta.Finalizers = []string{metav1.FinalizerDeleteDependents} + + vmi.Status.ActivePods = map[apitypes.UID]string{"podUID": "dummyNodeName"} + vmi.UID = apitypes.UID(dummyUID) + + return vmi +} + +func dummyMarkedForDeletionVM(nadName string) *virtv1.VirtualMachine { + vm := dummyVM(dummyVMISpec(nadName)) + vm.DeletionTimestamp = &metav1.Time{Time: time.Now()} + vm.ObjectMeta.Finalizers = []string{metav1.FinalizerDeleteDependents} + + return vm +} + +var _ = Describe("shouldCleanFinalizers", func() { + DescribeTable("should determine if finalizers should be cleaned up", + func(vmi *virtv1.VirtualMachineInstance, vm *virtv1.VirtualMachine, expectedResult bool) { + Expect(shouldCleanFinalizers(vmi, vm)).To(Equal(expectedResult)) + }, + Entry("VM exist, VMI gone, VM is marked for deletion", + nil, + &virtv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + }, + true, + ), + Entry("VM exist, VMI gone", + nil, + &virtv1.VirtualMachine{}, + false, + ), + Entry("VM exist, VMI exist", + &virtv1.VirtualMachineInstance{}, + &virtv1.VirtualMachine{}, + false), + Entry("VM exist, VMI exist, VM is marked for deletion", + &virtv1.VirtualMachineInstance{}, + &virtv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + }, + false, + ), + Entry("standalone VMI is gone", + nil, + nil, + true, + ), + Entry("standalone VMI, marked for deletion, without active pods", + &virtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Status: virtv1.VirtualMachineInstanceStatus{ + ActivePods: map[apitypes.UID]string{}, + }, + }, + nil, + true, + ), + Entry("standalone VMI exist", + &virtv1.VirtualMachineInstance{}, + nil, + false, + ), + Entry("standalone VMI, marked for deletion, with ActivePods", + &virtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Status: virtv1.VirtualMachineInstanceStatus{ + ActivePods: map[apitypes.UID]string{"uid": "node_name"}, + }, + }, + nil, + false, + ), + ) +}) diff --git a/pkg/vmnetworkscontroller/vm_controller.go b/pkg/vmnetworkscontroller/vm_controller.go new file mode 100644 index 00000000..f4ee9b9a --- /dev/null +++ b/pkg/vmnetworkscontroller/vm_controller.go @@ -0,0 +1,99 @@ +package vmnetworkscontroller + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + "github.com/kubevirt/ipam-extensions/pkg/claims" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + virtv1 "kubevirt.io/api/core/v1" +) + +// VirtualMachineReconciler reconciles a VirtualMachine object +type VirtualMachineReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + manager controllerruntime.Manager +} + +func NewVMReconciler(manager controllerruntime.Manager) *VirtualMachineReconciler { + return &VirtualMachineReconciler{ + Client: manager.GetClient(), + Log: controllerruntime.Log.WithName("controllers").WithName("VirtualMachine"), + Scheme: manager.GetScheme(), + manager: manager, + } +} + +func (r *VirtualMachineReconciler) Reconcile( + ctx context.Context, + request controllerruntime.Request, +) (controllerruntime.Result, error) { + vm := &virtv1.VirtualMachine{} + contextWithTimeout, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + shouldRemoveFinalizer := false + err := r.Client.Get(contextWithTimeout, request.NamespacedName, vm) + if apierrors.IsNotFound(err) { + shouldRemoveFinalizer = true + } else if err != nil { + return controllerruntime.Result{}, err + } + + if vm.DeletionTimestamp != nil { + vmi := &virtv1.VirtualMachineInstance{} + contextWithTimeout, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + err := r.Client.Get(contextWithTimeout, request.NamespacedName, vmi) + if apierrors.IsNotFound(err) { + shouldRemoveFinalizer = true + } else if err != nil { + return controllerruntime.Result{}, err + } + } + + if shouldRemoveFinalizer { + if err := claims.Cleanup(r.Client, request.NamespacedName); err != nil { + return controllerruntime.Result{}, fmt.Errorf("failed removing the IPAMClaims finalizer: %w", err) + } + } + + return controllerruntime.Result{}, nil +} + +// Setup sets up the controller with the Manager passed in the constructor. +func (r *VirtualMachineReconciler) Setup() error { + return controllerruntime.NewControllerManagedBy(r.manager). + For(&virtv1.VirtualMachine{}). + WithEventFilter(onVMPredicates()). + Complete(r) +} + +func onVMPredicates() predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(createEvent event.CreateEvent) bool { + return false + }, + DeleteFunc: func(event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(updateEvent event.UpdateEvent) bool { + return true + }, + GenericFunc: func(event.GenericEvent) bool { + return false + }, + } +} diff --git a/pkg/vmnetworkscontroller/vm_controller_test.go b/pkg/vmnetworkscontroller/vm_controller_test.go new file mode 100644 index 00000000..d41581b1 --- /dev/null +++ b/pkg/vmnetworkscontroller/vm_controller_test.go @@ -0,0 +1,246 @@ +package vmnetworkscontroller_test + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + virtv1 "kubevirt.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apitypes "k8s.io/apimachinery/pkg/types" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + + "k8s.io/utils/ptr" + + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + ipamclaimsapi "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + "github.com/kubevirt/ipam-extensions/pkg/claims" + "github.com/kubevirt/ipam-extensions/pkg/vmnetworkscontroller" +) + +func TestController(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "VM Controller test suite") +} + +type testConfig struct { + inputVM *virtv1.VirtualMachine + inputVMI *virtv1.VirtualMachineInstance + existingIPAMClaim *ipamclaimsapi.IPAMClaim + expectedError error + expectedResponse reconcile.Result + expectedIPAMClaims []ipamclaimsapi.IPAMClaim +} + +var ( + testEnv *envtest.Environment +) + +var _ = Describe("VM IPAM controller", Serial, func() { + BeforeEach(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + testEnv = &envtest.Environment{} + _, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + + Expect(virtv1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(nadv1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(ipamclaimsapi.AddToScheme(scheme.Scheme)).To(Succeed()) + // +kubebuilder:scaffold:scheme + }) + + AfterEach(func() { + Expect(testEnv.Stop()).To(Succeed()) + }) + + const ( + nadName = "ns1/superdupernad" + namespace = "ns1" + vmName = "vm1" + ) + + DescribeTable("reconcile behavior is as expected", func(config testConfig) { + var initialObjects []client.Object + + var vmKey apitypes.NamespacedName + if config.inputVM != nil { + vmKey = apitypes.NamespacedName{ + Namespace: config.inputVM.Namespace, + Name: config.inputVM.Name, + } + initialObjects = append(initialObjects, config.inputVM) + } + + if config.inputVMI != nil { + initialObjects = append(initialObjects, config.inputVMI) + } + + if config.existingIPAMClaim != nil { + initialObjects = append(initialObjects, config.existingIPAMClaim) + } + + if vmKey.Namespace == "" && vmKey.Name == "" { + vmKey = apitypes.NamespacedName{ + Namespace: namespace, + Name: vmName, + } + } + + ctrlOptions := controllerruntime.Options{ + Scheme: scheme.Scheme, + NewClient: func(_ *rest.Config, _ client.Options) (client.Client, error) { + return fake.NewClientBuilder(). + WithScheme(scheme.Scheme). + WithObjects(initialObjects...). + Build(), nil + }, + } + + mgr, err := controllerruntime.NewManager(&rest.Config{}, ctrlOptions) + Expect(err).NotTo(HaveOccurred()) + + reconcileVM := vmnetworkscontroller.NewVMReconciler(mgr) + if config.expectedError != nil { + _, err := reconcileVM.Reconcile(context.Background(), controllerruntime.Request{NamespacedName: vmKey}) + Expect(err).To(MatchError(config.expectedError)) + } else { + Expect( + reconcileVM.Reconcile(context.Background(), controllerruntime.Request{NamespacedName: vmKey}), + ).To(Equal(config.expectedResponse)) + } + + if len(config.expectedIPAMClaims) > 0 { + ipamClaimList := &ipamclaimsapi.IPAMClaimList{} + + Expect(mgr.GetClient().List(context.Background(), ipamClaimList, claims.OwnedByVMLabel(vmName))).To(Succeed()) + Expect(ipamClaimsCleaner(ipamClaimList.Items...)).To(ConsistOf(config.expectedIPAMClaims)) + } + }, + Entry("when the VM is marked for deletion and its VMI is gone", testConfig{ + inputVM: dummyMarkedForDeletionVM(nadName), + inputVMI: nil, + existingIPAMClaim: dummyIPAMClaimWithFinalizer(namespace, vmName), + expectedResponse: reconcile.Result{}, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + *dummyIPAMClaimmWithoutFinalizer(namespace, vmName), + }, + }), + Entry("when the VM is gone", testConfig{ + inputVM: nil, + inputVMI: nil, + existingIPAMClaim: dummyIPAMClaimWithFinalizer(namespace, vmName), + expectedResponse: reconcile.Result{}, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + *dummyIPAMClaimmWithoutFinalizer(namespace, vmName), + }, + }), + Entry("when the VM is marked for deletion and its VMI still exist", testConfig{ + inputVM: dummyMarkedForDeletionVM(nadName), + inputVMI: dummyVMI(nadName), + existingIPAMClaim: dummyIPAMClaimWithFinalizer(namespace, vmName), + expectedResponse: reconcile.Result{}, + expectedIPAMClaims: []ipamclaimsapi.IPAMClaim{ + *dummyIPAMClaimWithFinalizer(namespace, vmName), + }, + }), + ) +}) + +func dummyMarkedForDeletionVM(nadName string) *virtv1.VirtualMachine { + vm := dummyVM(nadName) + vm.DeletionTimestamp = &metav1.Time{Time: time.Now()} + vm.ObjectMeta.Finalizers = []string{metav1.FinalizerDeleteDependents} + + return vm +} + +func dummyVM(nadName string) *virtv1.VirtualMachine { + return &virtv1.VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1", + Namespace: "ns1", + }, + Spec: virtv1.VirtualMachineSpec{ + Template: &virtv1.VirtualMachineInstanceTemplateSpec{ + Spec: dummyVMISpec(nadName), + }, + }, + } +} + +func dummyVMI(nadName string) *virtv1.VirtualMachineInstance { + return &virtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vm1", + Namespace: "ns1", + }, + Spec: dummyVMISpec(nadName), + } +} + +func dummyVMISpec(nadName string) virtv1.VirtualMachineInstanceSpec { + return virtv1.VirtualMachineInstanceSpec{ + Networks: []virtv1.Network{ + { + Name: "podnet", + NetworkSource: virtv1.NetworkSource{Pod: &virtv1.PodNetwork{}}, + }, + { + Name: "randomnet", + NetworkSource: virtv1.NetworkSource{ + Multus: &virtv1.MultusNetwork{ + NetworkName: nadName, + }, + }, + }, + }, + } +} + +func ipamClaimsCleaner(ipamClaims ...ipamclaimsapi.IPAMClaim) []ipamclaimsapi.IPAMClaim { + for i := range ipamClaims { + ipamClaims[i].ObjectMeta.ResourceVersion = "" + } + return ipamClaims +} + +func dummyIPAMClaimWithFinalizer(namespace, vmName string) *ipamclaimsapi.IPAMClaim { + ipamClaim := dummyIPAMClaimmWithoutFinalizer(namespace, vmName) + ipamClaim.Finalizers = []string{claims.KubevirtVMFinalizer} + return ipamClaim +} + +func dummyIPAMClaimmWithoutFinalizer(namespace, vmName string) *ipamclaimsapi.IPAMClaim { + return &ipamclaimsapi.IPAMClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s", vmName, "randomnet"), + Namespace: namespace, + Labels: claims.OwnedByVMLabel(vmName), + OwnerReferences: []metav1.OwnerReference{{ + Name: vmName, + Kind: "VirtualMachine", + Controller: ptr.To(true), + BlockOwnerDeletion: ptr.To(true), + }}, + }, + Spec: ipamclaimsapi.IPAMClaimSpec{ + Network: "goodnet", + }, + } +} diff --git a/test/e2e/persistentips_test.go b/test/e2e/persistentips_test.go index f75f6c33..6d22ac7c 100644 --- a/test/e2e/persistentips_test.go +++ b/test/e2e/persistentips_test.go @@ -21,6 +21,7 @@ package e2e import ( "context" + "encoding/json" "fmt" "os/exec" "time" @@ -28,6 +29,9 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" kubevirtv1 "kubevirt.io/api/core/v1" @@ -61,7 +65,7 @@ var _ = Describe("Persistent IPs", func() { }) Context("and a virtual machine using it is also created", func() { BeforeEach(func() { - By("Creating VM using the nad") + By("Creating VM with secondary attachments") Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed()) By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name)) @@ -98,7 +102,7 @@ var _ = Describe("Persistent IPs", func() { }) - It("should garbage collect IPAMClaims after virtual machine deletion", func() { + It("should garbage collect IPAMClaims after VM deletion", func() { Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed()) Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). WithTimeout(time.Minute). @@ -106,6 +110,52 @@ var _ = Describe("Persistent IPs", func() { Should(BeEmpty()) }) + It("should garbage collect IPAMClaims after VM foreground deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + When("the VM is stopped", func() { + BeforeEach(func() { + By("Invoking virtctl stop") + output, err := exec.Command("virtctl", "stop", "-n", td.Namespace, vmi.Name).CombinedOutput() + Expect(err).NotTo(HaveOccurred(), output) + + By("Ensuring VM is not running") + Eventually(testenv.ThisVMI(vmi), 360*time.Second, 1*time.Second).Should( + SatisfyAll( + Not(testenv.BeCreated()), + Not(testenv.BeReady()), + )) + + Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM is deleted", func() { + By("Delete VM and check ipam claims are gone") + Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM is foreground deleted", func() { + By("Foreground delete VM and check ipam claims are gone") + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + It("should keep ips after restart", func() { vmiIPsBeforeRestart := vmi.Status.Interfaces[0].IPs vmiUUIDBeforeRestart := vmi.UID @@ -130,6 +180,55 @@ var _ = Describe("Persistent IPs", func() { }) }) + When("requested for a VM whose VMI has extra finalizers", func() { + const testFinalizer = "testFinalizer" + + BeforeEach(func() { + By("Adding VMI custom finalizer to control VMI deletion") + vm.Spec.Template.ObjectMeta.Finalizers = []string{testFinalizer} + + By("Creating VM with secondary attachments") + Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name)) + Eventually(testenv.ThisVMReadiness(vm)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(BeTrue()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + Expect(vmi.Status.Interfaces).NotTo(BeEmpty()) + Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM foreground deletion, only after VMI is gone", func() { + By("Foreground delete the VM, and validate the IPAMClaim isnt deleted since VMI exists") + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + By("Remove the finalizer (all the other are already deleted in this stage)") + patchData, err := removeFinalizersPatch() + Expect(err).NotTo(HaveOccurred()) + Expect(testenv.Client.Patch(context.TODO(), vmi, client.RawPatch(types.MergePatchType, patchData))).To(Succeed()) + + By("Check IPAMClaims are now deleted") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + Context("and a virtual machine instance using it is also created", func() { BeforeEach(func() { By("Creating VMI using the nad") @@ -163,14 +262,38 @@ var _ = Describe("Persistent IPs", func() { }) - It("should garbage collect IPAMClaims after virtual machine deletion", func() { + It("should garbage collect IPAMClaims after VMI deletion", func() { Expect(testenv.Client.Delete(context.Background(), vmi)).To(Succeed()) Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)). WithTimeout(time.Minute). WithPolling(time.Second). Should(BeEmpty()) }) + + It("should garbage collect IPAMClaims after VMI foreground deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vmi, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) }) }) }) + +func foregroundDeleteOptions() *client.DeleteOptions { + foreground := metav1.DeletePropagationForeground + return &client.DeleteOptions{ + PropagationPolicy: &foreground, + } +} + +func removeFinalizersPatch() ([]byte, error) { + patch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": []string{}, + }, + } + return json.Marshal(patch) +} diff --git a/test/env/matcher.go b/test/env/matcher.go index 3e07e536..ec789ce6 100644 --- a/test/env/matcher.go +++ b/test/env/matcher.go @@ -12,6 +12,8 @@ import ( kubevirtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + ipamclaimsv1alpha1 "github.com/k8snetworkplumbingwg/ipamclaims/pkg/crd/ipamclaims/v1alpha1" ) @@ -19,7 +21,7 @@ import ( func IPAMClaimsFromNamespace(namespace string) func() ([]ipamclaimsv1alpha1.IPAMClaim, error) { return func() ([]ipamclaimsv1alpha1.IPAMClaim, error) { ipamClaimList := &ipamclaimsv1alpha1.IPAMClaimList{} - if err := Client.List(context.Background(), ipamClaimList); err != nil { + if err := Client.List(context.Background(), ipamClaimList, client.InNamespace(namespace)); err != nil { return nil, err } return ipamClaimList.Items, nil @@ -70,6 +72,22 @@ func BeRestarted(oldUID types.UID) gomegatypes.GomegaMatcher { })) } +func BeCreated() gomegatypes.GomegaMatcher { + return gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Created": BeTrue(), + }), + })) +} + +func BeReady() gomegatypes.GomegaMatcher { + return gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Ready": BeTrue(), + }), + })) +} + func ContainConditionVMIReady() gomegatypes.GomegaMatcher { return WithTransform(vmiStatusConditions, ContainElement(SatisfyAll(