diff --git a/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go b/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go index 80965b00e..56568ac0d 100644 --- a/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go +++ b/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go @@ -316,28 +316,48 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - if err := r.ReconcileNormal(vmCtx); err != nil { + if err = r.ReconcileNormal(vmCtx); err != nil && !ignoredCreateErr(err) { vmCtx.Logger.Error(err, "Failed to reconcile VirtualMachine") return ctrl.Result{}, err } - return ctrl.Result{RequeueAfter: requeueDelay(vmCtx)}, nil + // Requeue after N amount of time according to the state of the VM. + return ctrl.Result{RequeueAfter: requeueDelay(vmCtx, err)}, nil } -// Determine if we should request a non-zero requeue delay in order to trigger a non-rate limited reconcile -// at some point in the future. Use this delay-based reconcile to trigger a specific reconcile to discovery the VM IP -// address rather than relying on the resync period to do. +// Determine if we should request a non-zero requeue delay in order to trigger a +// non-rate limited reconcile at some point in the future. // -// TODO: It would be much preferable to determine that a non-error resync is required at the source of the determination that -// TODO: the VM IP isn't available rather than up here in the reconcile loop. However, in the interest of time, we are making -// TODO: this determination here and will have to refactor at some later date. -func requeueDelay(ctx *pkgctx.VirtualMachineContext) time.Duration { - // If the VM is in Creating phase, the reconciler has run out of threads to Create VMs on the provider. Do not queue - // immediately to avoid exponential backoff. - if !conditions.IsTrue(ctx.VM, vmopv1.VirtualMachineConditionCreated) { +// When async signal is disabled, this is used to trigger a specific reconcile +// to discovery the VM IP address rather than relying on the resync period to +// do. +// +// TODO +// It would be much preferable to determine that a non-error resync is required +// at the source of the determination that the VM IP is not available rather +// than up here in the reconcile loop. However, in the interest of time, we are +// making this determination here and will have to refactor at some later date. +func requeueDelay( + ctx *pkgctx.VirtualMachineContext, + err error) time.Duration { + + // If there were too many concurrent create operations or if the VM is in + // Creating phase, the reconciler has run out of threads or goroutines to + // Create VMs on the provider. Do not queue immediately to avoid exponential + // backoff. + if ignoredCreateErr(err) || + !conditions.IsTrue(ctx.VM, vmopv1.VirtualMachineConditionCreated) { + return pkgcfg.FromContext(ctx).CreateVMRequeueDelay } + // Do not requeue for the IP address if async signal is enabled. + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation && + !pkgcfg.FromContext(ctx).AsyncSignalDisabled { + + return 0 + } + if ctx.VM.Status.PowerState == vmopv1.VirtualMachinePowerStateOn { networkSpec := ctx.VM.Spec.Network if networkSpec != nil && !networkSpec.Disabled { @@ -439,14 +459,54 @@ func (r *Reconciler) ReconcileNormal(ctx *pkgctx.VirtualMachineContext) (reterr // Upgrade schema fields where needed upgradeSchema(ctx) - err := r.VMProvider.CreateOrUpdateVirtualMachine(ctx, ctx.VM) + var ( + err error + chanErr <-chan error + ) + + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation && + !pkgcfg.FromContext(ctx).AsyncSignalDisabled && + !pkgcfg.FromContext(ctx).AsyncCreateDisabled { + // + // Non-blocking create + // + chanErr, err = r.VMProvider.CreateOrUpdateVirtualMachineAsync(ctx, ctx.VM) + } else { + // + // Blocking create + // + err = r.VMProvider.CreateOrUpdateVirtualMachine(ctx, ctx.VM) + } switch { - case ctxop.IsCreate(ctx): - r.Recorder.EmitEvent(ctx.VM, "Create", err, false) + case ctxop.IsCreate(ctx) && !ignoredCreateErr(err): + + if chanErr == nil { + // + // Blocking create + // + r.Recorder.EmitEvent(ctx.VM, "Create", err, false) + } else { + // + // Non-blocking create + // + if err != nil { + // Failed before goroutine. + r.Recorder.EmitEvent(ctx.VM, "Create", err, false) + } else { + // Emit event once goroutine is complete. + go func(obj client.Object) { + err := <-chanErr + r.Recorder.EmitEvent(obj, "Create", err, false) + }(ctx.VM.DeepCopy()) + } + } case ctxop.IsUpdate(ctx): + r.Recorder.EmitEvent(ctx.VM, "Update", err, false) - case err != nil: + + case err != nil && !ignoredCreateErr(err): + // Catch all event for neither create nor update op. r.Recorder.EmitEvent(ctx.VM, "ReconcileNormal", err, true) } @@ -468,3 +528,15 @@ func getIsDefaultVMClassController(ctx context.Context) bool { } return false } + +// ignoredCreateErr is written this way in order to illustrate coverage more +// accurately. +func ignoredCreateErr(err error) bool { + if err == providers.ErrDuplicateCreate { + return true + } + if err == providers.ErrTooManyCreates { + return true + } + return false +} diff --git a/controllers/virtualmachine/virtualmachine/virtualmachine_controller_intg_test.go b/controllers/virtualmachine/virtualmachine/virtualmachine_controller_intg_test.go index 36a606843..f8d6bad99 100644 --- a/controllers/virtualmachine/virtualmachine/virtualmachine_controller_intg_test.go +++ b/controllers/virtualmachine/virtualmachine/virtualmachine_controller_intg_test.go @@ -29,6 +29,7 @@ import ( pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" "github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels" pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" + "github.com/vmware-tanzu/vm-operator/pkg/providers" providerfake "github.com/vmware-tanzu/vm-operator/pkg/providers/fake" "github.com/vmware-tanzu/vm-operator/pkg/util/kube/cource" "github.com/vmware-tanzu/vm-operator/pkg/util/ptr" @@ -105,11 +106,13 @@ func intgTestsReconcile() { dummyInstanceUUID := uuid.NewString() BeforeEach(func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + return nil + }, + ) }) AfterEach(func() { @@ -128,6 +131,19 @@ func intgTestsReconcile() { }) It("Reconciles after VirtualMachine creation", func() { + var createAttempts int32 + + By("Exceed number of allowed concurrent create operations", func() { + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + atomic.AddInt32(&createAttempts, 1) + return providers.ErrTooManyCreates + }, + ) + }) + vm.Spec.Network = &vmopv1.VirtualMachineNetworkSpec{} Expect(ctx.Client.Create(ctx, vm)).To(Succeed()) @@ -135,14 +151,43 @@ func intgTestsReconcile() { waitForVirtualMachineFinalizer(ctx, vmKey) }) + Eventually(func(g Gomega) { + g.Expect(atomic.LoadInt32(&createAttempts)).To(BeNumerically(">=", int32(3))) + g.Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineConditionCreated)).To(BeFalse()) + }, "5s").Should( + Succeed(), + "waiting for reconcile to be requeued at least three times") + + atomic.StoreInt32(&createAttempts, 0) + + By("Causing duplicate creates", func() { + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + atomic.AddInt32(&createAttempts, 1) + return providers.ErrDuplicateCreate + }, + ) + }) + + Eventually(func(g Gomega) { + g.Expect(atomic.LoadInt32(&createAttempts)).To(BeNumerically(">=", int32(3))) + g.Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineConditionCreated)).To(BeFalse()) + }, "5s").Should( + Succeed(), + "waiting for reconcile to be requeued at least three times") + By("Set InstanceUUID in CreateOrUpdateVM", func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - // Just using InstanceUUID here for a field to update. - vm.Status.InstanceUUID = dummyInstanceUUID - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + // Just using InstanceUUID here for a field to update. + vm.Status.InstanceUUID = dummyInstanceUUID + return nil + }, + ) }) By("VirtualMachine should have InstanceUUID set", func() { @@ -155,13 +200,15 @@ func intgTestsReconcile() { }) By("Set Created condition in CreateOrUpdateVM", func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - vm.Status.PowerState = vmopv1.VirtualMachinePowerStateOn - conditions.MarkTrue(vm, vmopv1.VirtualMachineConditionCreated) - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + vm.Status.PowerState = vmopv1.VirtualMachinePowerStateOn + conditions.MarkTrue(vm, vmopv1.VirtualMachineConditionCreated) + return nil + }, + ) }) By("VirtualMachine should have Created condition set", func() { @@ -174,14 +221,16 @@ func intgTestsReconcile() { }) By("Set IP address in CreateOrUpdateVM", func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - vm.Status.Network = &vmopv1.VirtualMachineNetworkStatus{ - PrimaryIP4: dummyIPAddress, - } - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + vm.Status.Network = &vmopv1.VirtualMachineNetworkStatus{ + PrimaryIP4: dummyIPAddress, + } + return nil + }, + ) }) By("VirtualMachine should have IP address set", func() { @@ -213,13 +262,15 @@ func intgTestsReconcile() { biosUUID := uuid.NewString() BeforeEach(func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - vm.Status.InstanceUUID = instanceUUID - vm.Status.BiosUUID = biosUUID - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + vm.Status.InstanceUUID = instanceUUID + vm.Status.BiosUUID = biosUUID + return nil + }, + ) }) // NOTE: mutating webhook sets the default spec.instanceUUID, but is not run in this test - @@ -261,13 +312,15 @@ func intgTestsReconcile() { }) It("Reconciles after VirtualMachineClass change", func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - // Set this so requeueDelay() returns 0. - conditions.MarkTrue(vm, vmopv1.VirtualMachineConditionCreated) - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + // Set this so requeueDelay() returns 0. + conditions.MarkTrue(vm, vmopv1.VirtualMachineConditionCreated) + return nil + }, + ) Expect(ctx.Client.Create(ctx, vm)).To(Succeed()) // Wait for initial reconcile. @@ -283,12 +336,14 @@ func intgTestsReconcile() { instanceUUID := uuid.NewString() - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - vm.Status.InstanceUUID = instanceUUID - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + vm.Status.InstanceUUID = instanceUUID + return nil + }, + ) vmClass := builder.DummyVirtualMachineClass(vm.Spec.ClassName) vmClass.Namespace = vm.Namespace @@ -390,6 +445,8 @@ var _ = Describe( ctx = pkgcfg.UpdateContext( ctx, func(config *pkgcfg.Config) { + config.AsyncSignalDisabled = false + config.AsyncCreateDisabled = false config.Features.WorkloadDomainIsolation = true }, ) @@ -400,10 +457,14 @@ var _ = Describe( provider.VSphereClientFn = func(ctx context.Context) (*vsclient.Client, error) { return vsclient.NewClient(ctx, vcSimCtx.VCClientConfig) } - provider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, obj *vmopv1.VirtualMachine) error { - atomic.AddInt32(&numCreateOrUpdateCalls, 1) - return nil - } + providerfake.SetCreateOrUpdateFunction( + ctx, + provider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + atomic.AddInt32(&numCreateOrUpdateCalls, 1) + return nil + }, + ) vcSimCtx = builder.NewIntegrationTestContextForVCSim( ctx, diff --git a/controllers/virtualmachine/virtualmachine/virtualmachine_controller_suite_test.go b/controllers/virtualmachine/virtualmachine/virtualmachine_controller_suite_test.go index 46eae179f..e31a5ba22 100644 --- a/controllers/virtualmachine/virtualmachine/virtualmachine_controller_suite_test.go +++ b/controllers/virtualmachine/virtualmachine/virtualmachine_controller_suite_test.go @@ -28,6 +28,8 @@ var suite = builder.NewTestSuiteForControllerWithContext( config.SyncPeriod = 60 * time.Minute config.CreateVMRequeueDelay = 1 * time.Second config.PoweredOnVMHasIPRequeueDelay = 1 * time.Second + config.AsyncSignalDisabled = true + config.AsyncCreateDisabled = true })), virtualmachine.AddToManager, func(ctx *pkgctx.ControllerManagerContext, _ ctrlmgr.Manager) error { diff --git a/controllers/virtualmachine/virtualmachine/virtualmachine_controller_unit_test.go b/controllers/virtualmachine/virtualmachine/virtualmachine_controller_unit_test.go index 4de219cfc..cb6d2f236 100644 --- a/controllers/virtualmachine/virtualmachine/virtualmachine_controller_unit_test.go +++ b/controllers/virtualmachine/virtualmachine/virtualmachine_controller_unit_test.go @@ -6,7 +6,6 @@ package virtualmachine_test import ( "context" "errors" - "fmt" "strings" . "github.com/onsi/ginkgo/v2" @@ -77,6 +76,7 @@ func unitTestsReconcile() { ctx = suite.NewUnitTestContextForController(initObjects...) pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { config.MaxDeployThreadsOnProvider = 16 + config.Features.WorkloadDomainIsolation = false }) fakeProbeManagerIf := proberfake.NewFakeProberManager() @@ -160,9 +160,13 @@ func unitTestsReconcile() { Context("ProberManager", func() { It("Should call add to Prober Manager if ReconcileNormal fails", func() { - fakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - return errors.New(providerError) - } + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + return errors.New(providerError) + }, + ) err := reconciler.ReconcileNormal(vmCtx) Expect(err).To(HaveOccurred()) @@ -179,50 +183,112 @@ func unitTestsReconcile() { }) }) - It("Should emit a CreateSuccess event if ReconcileNormal causes a successful VM creation", func() { - fakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - ctxop.MarkCreate(ctx) - return nil - } + When("blocking create", func() { + JustBeforeEach(func() { + pkgcfg.SetContext(vmCtx, func(config *pkgcfg.Config) { + config.AsyncCreateDisabled = true + }) + }) + It("Should emit a CreateSuccess event if ReconcileNormal causes a successful VM creation", func() { + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + ctxop.MarkCreate(ctx) + return nil + }, + ) + Expect(reconciler.ReconcileNormal(vmCtx)).Should(Succeed()) + expectEvents(ctx, "CreateSuccess") + }) - Expect(reconciler.ReconcileNormal(vmCtx)).Should(Succeed()) - expectEvents(ctx, "CreateSuccess") + It("Should emit CreateFailure event if ReconcileNormal causes a failed VM create", func() { + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + ctxop.MarkCreate(ctx) + return errors.New("fake") + }, + ) + + Expect(reconciler.ReconcileNormal(vmCtx)).ShouldNot(Succeed()) + expectEvents(ctx, "CreateFailure") + }) }) - It("Should emit CreateFailure event if ReconcileNormal causes a failed VM create", func() { - fakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - ctxop.MarkCreate(ctx) - return fmt.Errorf("fake") - } + When("non-blocking create", func() { + JustBeforeEach(func() { + pkgcfg.SetContext(vmCtx, func(config *pkgcfg.Config) { + config.Features.WorkloadDomainIsolation = true + config.AsyncSignalDisabled = false + config.AsyncCreateDisabled = false + }) + }) + It("Should emit a CreateSuccess event if ReconcileNormal causes a successful VM creation", func() { + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + ctxop.MarkCreate(ctx) + return nil + }, + ) + Expect(reconciler.ReconcileNormal(vmCtx)).Should(Succeed()) + expectEvents(ctx, "CreateSuccess") + }) - Expect(reconciler.ReconcileNormal(vmCtx)).ShouldNot(Succeed()) - expectEvents(ctx, "CreateFailure") + It("Should emit CreateFailure event if ReconcileNormal causes a failed VM create", func() { + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + ctxop.MarkCreate(ctx) + return errors.New("fake") + }, + ) + + Expect(reconciler.ReconcileNormal(vmCtx)).ShouldNot(Succeed()) + expectEvents(ctx, "CreateFailure") + }) }) It("Should emit UpdateSuccess event if ReconcileNormal causes a successful VM update", func() { - fakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - ctxop.MarkUpdate(ctx) - return nil - } + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + ctxop.MarkUpdate(ctx) + return nil + }, + ) Expect(reconciler.ReconcileNormal(vmCtx)).Should(Succeed()) expectEvents(ctx, "UpdateSuccess") }) It("Should emit UpdateFailure event if ReconcileNormal causes a failed VM update", func() { - fakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - ctxop.MarkUpdate(ctx) - return fmt.Errorf("fake") - } + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + ctxop.MarkUpdate(ctx) + return errors.New("fake") + }, + ) Expect(reconciler.ReconcileNormal(vmCtx)).ShouldNot(Succeed()) expectEvents(ctx, "UpdateFailure") }) It("Should emit ReconcileNormalFailure if ReconcileNormal fails for neither create or update op", func() { - fakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - return fmt.Errorf("fake") - } + providerfake.SetCreateOrUpdateFunction( + vmCtx, + fakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + return errors.New("fake") + }, + ) Expect(reconciler.ReconcileNormal(vmCtx)).ShouldNot(Succeed()) expectEvents(ctx, "ReconcileNormalFailure") diff --git a/controllers/virtualmachinereplicaset/virtualmachinereplicaset_controller_intg_test.go b/controllers/virtualmachinereplicaset/virtualmachinereplicaset_controller_intg_test.go index b7a032f39..be8bc6eaa 100644 --- a/controllers/virtualmachinereplicaset/virtualmachinereplicaset_controller_intg_test.go +++ b/controllers/virtualmachinereplicaset/virtualmachinereplicaset_controller_intg_test.go @@ -19,7 +19,7 @@ import ( vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" vmopv1common "github.com/vmware-tanzu/vm-operator/api/v1alpha3/common" "github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels" - + providerfake "github.com/vmware-tanzu/vm-operator/pkg/providers/fake" "github.com/vmware-tanzu/vm-operator/test/builder" ) @@ -124,13 +124,15 @@ func intgTestsReconcile() { dummyInstanceUUID := "instanceUUID1234" BeforeEach(func() { - intgFakeVMProvider.Lock() - intgFakeVMProvider.CreateOrUpdateVirtualMachineFn = func(ctx context.Context, vm *vmopv1.VirtualMachine) error { - // Used below just to check for something in the Status is updated. - vm.Status.InstanceUUID = dummyInstanceUUID - return nil - } - intgFakeVMProvider.Unlock() + providerfake.SetCreateOrUpdateFunction( + ctx, + intgFakeVMProvider, + func(ctx context.Context, vm *vmopv1.VirtualMachine) error { + // Used below just to check for something in the Status is + // updated. + vm.Status.InstanceUUID = dummyInstanceUUID + return nil + }) }) AfterEach(func() { diff --git a/pkg/config/config.go b/pkg/config/config.go index 0abe76ced..11f03bccd 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -103,7 +103,20 @@ type Config struct { // AsyncSignalDisabled may be set to false to disable the vm-watcher service // used to reconcile VirtualMachine objects if their backend state has // changed. + // + // Please note, this flag has no impact if Features.WorkloadDomainIsolation + // is false. + // + // Defaults to true. AsyncSignalDisabled bool + + // AsyncCreateDisabled may be set to false to disable non-blocking create + // operations. + // + // Please note, this flag has no impact if AsyncSignalDisabled is true. + // + // Defaults to true. + AsyncCreateDisabled bool } // GetMaxDeployThreadsOnProvider returns MaxDeployThreadsOnProvider if it is >0 diff --git a/pkg/config/default.go b/pkg/config/default.go index 257ebea6c..532fa64f0 100644 --- a/pkg/config/default.go +++ b/pkg/config/default.go @@ -38,6 +38,7 @@ func Default() Config { MaxCreateVMsOnProvider: 80, MaxConcurrentReconciles: 1, AsyncSignalDisabled: false, + AsyncCreateDisabled: false, CreateVMRequeueDelay: 10 * time.Second, PoweredOnVMHasIPRequeueDelay: 10 * time.Second, NetworkProviderType: NetworkProviderTypeNamed, diff --git a/pkg/config/env.go b/pkg/config/env.go index 8b3858e22..618ab2eb2 100644 --- a/pkg/config/env.go +++ b/pkg/config/env.go @@ -28,6 +28,7 @@ func FromEnv() Config { setStringSlice(env.PrivilegedUsers, &config.PrivilegedUsers) setBool(env.LogSensitiveData, &config.LogSensitiveData) setBool(env.AsyncSignalDisabled, &config.AsyncSignalDisabled) + setBool(env.AsyncCreateDisabled, &config.AsyncCreateDisabled) setDuration(env.InstanceStoragePVPlacementFailedTTL, &config.InstanceStorage.PVPlacementFailedTTL) setFloat64(env.InstanceStorageJitterMaxFactor, &config.InstanceStorage.JitterMaxFactor) diff --git a/pkg/config/env/env.go b/pkg/config/env/env.go index 6b1787844..b9774dac4 100644 --- a/pkg/config/env/env.go +++ b/pkg/config/env/env.go @@ -25,6 +25,7 @@ const ( JSONExtraConfig LogSensitiveData AsyncSignalDisabled + AsyncCreateDisabled InstanceStoragePVPlacementFailedTTL InstanceStorageJitterMaxFactor InstanceStorageSeedRequeueDuration @@ -110,6 +111,8 @@ func (n VarName) String() string { return "LOG_SENSITIVE_DATA" case AsyncSignalDisabled: return "ASYNC_SIGNAL_DISABLED" + case AsyncCreateDisabled: + return "ASYNC_CREATE_DISABLED" case InstanceStoragePVPlacementFailedTTL: return "INSTANCE_STORAGE_PV_PLACEMENT_FAILED_TTL" case InstanceStorageJitterMaxFactor: diff --git a/pkg/config/env_test.go b/pkg/config/env_test.go index 88d2c9fc2..f610e34b1 100644 --- a/pkg/config/env_test.go +++ b/pkg/config/env_test.go @@ -77,6 +77,7 @@ var _ = Describe( Expect(os.Setenv("SYNC_PERIOD", "113h")).To(Succeed()) Expect(os.Setenv("MAX_CONCURRENT_RECONCILES", "114")).To(Succeed()) Expect(os.Setenv("ASYNC_SIGNAL_DISABLED", "true")).To(Succeed()) + Expect(os.Setenv("ASYNC_CREATE_DISABLED", "true")).To(Succeed()) Expect(os.Setenv("LEADER_ELECTION_ID", "115")).To(Succeed()) Expect(os.Setenv("POD_NAME", "116")).To(Succeed()) Expect(os.Setenv("POD_NAMESPACE", "117")).To(Succeed()) @@ -126,6 +127,7 @@ var _ = Describe( SyncPeriod: 113 * time.Hour, MaxConcurrentReconciles: 114, AsyncSignalDisabled: true, + AsyncCreateDisabled: true, LeaderElectionID: "115", PodName: "116", PodNamespace: "117", diff --git a/pkg/providers/fake/fake_vm_provider.go b/pkg/providers/fake/fake_vm_provider.go index b30fcd6cc..167f4bb5c 100644 --- a/pkg/providers/fake/fake_vm_provider.go +++ b/pkg/providers/fake/fake_vm_provider.go @@ -16,6 +16,7 @@ import ( imgregv1a1 "github.com/vmware-tanzu/image-registry-operator-api/api/v1alpha1" vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" "github.com/vmware-tanzu/vm-operator/pkg/providers" vsclient "github.com/vmware-tanzu/vm-operator/pkg/util/vsphere/client" ) @@ -28,9 +29,10 @@ import ( // expected to evolve as more tests get added in the future. type funcs struct { - CreateOrUpdateVirtualMachineFn func(ctx context.Context, vm *vmopv1.VirtualMachine) error - DeleteVirtualMachineFn func(ctx context.Context, vm *vmopv1.VirtualMachine) error - PublishVirtualMachineFn func(ctx context.Context, vm *vmopv1.VirtualMachine, + CreateOrUpdateVirtualMachineFn func(ctx context.Context, vm *vmopv1.VirtualMachine) error + CreateOrUpdateVirtualMachineAsyncFn func(ctx context.Context, vm *vmopv1.VirtualMachine) (<-chan error, error) + DeleteVirtualMachineFn func(ctx context.Context, vm *vmopv1.VirtualMachine) error + PublishVirtualMachineFn func(ctx context.Context, vm *vmopv1.VirtualMachine, vmPub *vmopv1.VirtualMachinePublishRequest, cl *imgregv1a1.ContentLibrary, actID string) (string, error) GetVirtualMachineGuestHeartbeatFn func(ctx context.Context, vm *vmopv1.VirtualMachine) (vmopv1.GuestHeartbeatStatus, error) GetVirtualMachinePropertiesFn func(ctx context.Context, vm *vmopv1.VirtualMachine, propertyPaths []string) (map[string]any, error) @@ -92,6 +94,16 @@ func (s *VMProvider) CreateOrUpdateVirtualMachine(ctx context.Context, vm *vmopv return nil } +func (s *VMProvider) CreateOrUpdateVirtualMachineAsync(ctx context.Context, vm *vmopv1.VirtualMachine) (<-chan error, error) { + s.Lock() + defer s.Unlock() + if s.CreateOrUpdateVirtualMachineAsyncFn != nil { + return s.CreateOrUpdateVirtualMachineAsyncFn(ctx, vm) + } + s.addToVMMap(vm) + return nil, nil +} + func (s *VMProvider) DeleteVirtualMachine(ctx context.Context, vm *vmopv1.VirtualMachine) error { s.Lock() defer s.Unlock() @@ -394,3 +406,35 @@ func NewVMProvider() *VMProvider { } return &provider } + +func SetCreateOrUpdateFunction( + ctx context.Context, + provider *VMProvider, + fn func( + ctx context.Context, + vm *vmopv1.VirtualMachine, + ) error, +) { + provider.Lock() + defer provider.Unlock() + + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation { + if !pkgcfg.FromContext(ctx).AsyncSignalDisabled { + + provider.CreateOrUpdateVirtualMachineAsyncFn = func( + ctx context.Context, + vm *vmopv1.VirtualMachine) (<-chan error, error) { + + chanErr := make(chan error) + close(chanErr) + + return chanErr, fn(ctx, vm) + } + + return + } + + } + + provider.CreateOrUpdateVirtualMachineFn = fn +} diff --git a/pkg/providers/vm_provider_interface.go b/pkg/providers/vm_provider_interface.go index 0e2a9df09..da841289a 100644 --- a/pkg/providers/vm_provider_interface.go +++ b/pkg/providers/vm_provider_interface.go @@ -5,6 +5,7 @@ package providers import ( "context" + "errors" "github.com/vmware/govmomi/vapi/library" vimtypes "github.com/vmware/govmomi/vim25/types" @@ -16,9 +17,22 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/util/vsphere/client" ) +var ( + // ErrTooManyCreates is returned from the CreateOrUpdateVirtualMachine and + // CreateOrUpdateVirtualMachineAsync functions when the number of create + // threads/goroutines have reached the allowed limit. + ErrTooManyCreates = errors.New("too many creates") + + // ErrDuplicateCreate is returned from the CreateOrUpdateVirtualMachineAsync + // function if it is called for a VM while a create goroutine for that VM is + // already executing. + ErrDuplicateCreate = errors.New("duplicate create") +) + // VirtualMachineProviderInterface is a pluggable interface for VM Providers. type VirtualMachineProviderInterface interface { CreateOrUpdateVirtualMachine(ctx context.Context, vm *vmopv1.VirtualMachine) error + CreateOrUpdateVirtualMachineAsync(ctx context.Context, vm *vmopv1.VirtualMachine) (<-chan error, error) DeleteVirtualMachine(ctx context.Context, vm *vmopv1.VirtualMachine) error PublishVirtualMachine(ctx context.Context, vm *vmopv1.VirtualMachine, vmPub *vmopv1.VirtualMachinePublishRequest, cl *imgregv1a1.ContentLibrary, actID string) (string, error) diff --git a/pkg/providers/vsphere/vmprovider_vm.go b/pkg/providers/vsphere/vmprovider_vm.go index a6f0ef6d0..8cfc497e7 100644 --- a/pkg/providers/vsphere/vmprovider_vm.go +++ b/pkg/providers/vsphere/vmprovider_vm.go @@ -22,6 +22,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" apierrorsutil "k8s.io/apimachinery/pkg/util/errors" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" imgregv1a1 "github.com/vmware-tanzu/image-registry-operator-api/api/v1alpha1" @@ -30,6 +31,8 @@ import ( "github.com/vmware-tanzu/vm-operator/pkg/conditions" pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" + ctxop "github.com/vmware-tanzu/vm-operator/pkg/context/operation" + "github.com/vmware-tanzu/vm-operator/pkg/providers" vcclient "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/client" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/clustermodules" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants" @@ -76,6 +79,10 @@ var ( createCountLock sync.Mutex concurrentCreateCount int + // currentlyCreating tracks the VMs currently being created in a + // non-blocking goroutine. + currentlyCreating sync.Map + // SkipVMImageCLProviderCheck skips the checks that a VM Image has a Content Library item provider // since a VirtualMachineImage created for a VM template won't have either. This has been broken for // a long time but was otherwise masked on how the tests used to be organized. @@ -86,10 +93,31 @@ func (vs *vSphereVMProvider) CreateOrUpdateVirtualMachine( ctx context.Context, vm *vmopv1.VirtualMachine) error { + return vs.createOrUpdateVirtualMachine(ctx, vm, nil) +} + +func (vs *vSphereVMProvider) CreateOrUpdateVirtualMachineAsync( + ctx context.Context, + vm *vmopv1.VirtualMachine) (<-chan error, error) { + + chanErr := make(chan error) + err := vs.createOrUpdateVirtualMachine(ctx, vm, chanErr) + return chanErr, err +} + +func (vs *vSphereVMProvider) createOrUpdateVirtualMachine( + ctx context.Context, + vm *vmopv1.VirtualMachine, + chanErr chan error) error { + vmCtx := pkgctx.VirtualMachineContext{ - Context: context.WithValue(ctx, vimtypes.ID{}, vs.getOpID(vm, "createOrUpdateVM")), - Logger: log.WithValues("vmName", vm.NamespacedName()), - VM: vm, + Context: context.WithValue( + ctx, + vimtypes.ID{}, + vs.getOpID(vm, "createOrUpdateVM"), + ), + Logger: log.WithValues("vmName", vm.NamespacedName()), + VM: vm, } client, err := vs.getVcClient(vmCtx) @@ -98,36 +126,104 @@ func (vs *vSphereVMProvider) CreateOrUpdateVirtualMachine( } // Set the VC UUID annotation on the VM before attempting creation or - // update. Among other things, the annotation facilitates differential handling - // of restore and fail-over operations. + // update. Among other things, the annotation facilitates differential + // handling of restore and fail-over operations. if vm.Annotations == nil { vm.Annotations = make(map[string]string) } - vm.Annotations[vmopv1.ManagerID] = client.VimClient().ServiceContent.About.InstanceUuid + vCenterInstanceUUID := client.VimClient().ServiceContent.About.InstanceUuid + vm.Annotations[vmopv1.ManagerID] = vCenterInstanceUUID - vcVM, err := vs.getVM(vmCtx, client, false) + // Check to see if the VM can be found on the underlying platform. + foundVM, err := vs.getVM(vmCtx, client, false) if err != nil { return err } - if vcVM == nil { - var createArgs *VMCreateArgs + if foundVM != nil { + // Mark that this is an update operation. + ctxop.MarkUpdate(vmCtx) + + if chanErr != nil { + defer close(chanErr) + } + return vs.updateVirtualMachine(vmCtx, foundVM, client, nil) + } + + // Mark that this is a create operation. + ctxop.MarkCreate(vmCtx) + + createArgs, err := vs.getCreateArgs(vmCtx, client) + if err != nil { + return err + } - vcVM, createArgs, err = vs.createVirtualMachine(vmCtx, client) + // Do not allow more than N create threads/goroutines. + // + // - In blocking create mode, this ensures there are reconciler threads + // available to reconcile non-create operations. + // + // - In non-blocking create mode, this ensures the number of goroutines + // spawned to create VMs does not take up too much memory. + allowed, createDeferFn := vs.vmCreateConcurrentAllowed(vmCtx) + if !allowed { + return providers.ErrTooManyCreates + } + + if chanErr == nil { + + vmCtx.Logger.V(4).Info("Doing a blocking create") + + defer createDeferFn() + + newVM, err := vs.createVirtualMachine(vmCtx, client, createArgs) if err != nil { return err } - if vcVM == nil { - // Creation was not ready or blocked for some reason. We depend on the controller - // to eventually retry the create. - return nil + if newVM != nil { + // If the create actually occurred, fall-through to an update + // post-reconfigure. + return vs.createdVirtualMachineFallthroughUpdate( + vmCtx, + newVM, + client, + createArgs) } - - return vs.createdVirtualMachineFallthroughUpdate(vmCtx, vcVM, client, createArgs) } - return vs.updateVirtualMachine(vmCtx, vcVM, client, nil) + if _, ok := currentlyCreating.LoadOrStore( + vm.NamespacedName(), + struct{}{}); ok { + + // If the VM is already being created in a goroutine, then there is no + // need to create it again. + // + // However, we need to make sure we decrement the number of concurrent + // creates before returning. + createDeferFn() + return providers.ErrDuplicateCreate + } + + vmCtx.Logger.V(4).Info("Doing a non-blocking create") + + // Create a copy of the context and replace its VM with a copy to + // ensure modifications in the goroutine below are not impacted or + // impact the operations above us in the call stack. + copyOfCtx := vmCtx + copyOfCtx.VM = vmCtx.VM.DeepCopy() + + // Start a goroutine to create the VM in the background. + go vs.createVirtualMachineAsync( + copyOfCtx, + client, + createArgs, + chanErr, + createDeferFn) + + // Return with no error. The VM will be re-enqueued once the create + // completes with success or failure. + return nil } func (vs *vSphereVMProvider) DeleteVirtualMachine( @@ -388,66 +484,117 @@ func (vs *vSphereVMProvider) vmCreatePathName( return nil } -func (vs *vSphereVMProvider) createVirtualMachine( +func (vs *vSphereVMProvider) getCreateArgs( vmCtx pkgctx.VirtualMachineContext, - vcClient *vcclient.Client) (*object.VirtualMachine, *VMCreateArgs, error) { + vcClient *vcclient.Client) (*VMCreateArgs, error) { createArgs, err := vs.vmCreateGetArgs(vmCtx, vcClient) if err != nil { - return nil, nil, err + return nil, err } - err = vs.vmCreateDoPlacement(vmCtx, vcClient, createArgs) - if err != nil { - return nil, nil, err + if err := vs.vmCreateDoPlacement(vmCtx, vcClient, createArgs); err != nil { + return nil, err } - err = vs.vmCreateGetFolderAndRPMoIDs(vmCtx, vcClient, createArgs) - if err != nil { - return nil, nil, err + if err := vs.vmCreateGetFolderAndRPMoIDs(vmCtx, vcClient, createArgs); err != nil { + return nil, err } - err = vs.vmCreatePathName(vmCtx, vcClient, createArgs) - if err != nil { - return nil, nil, err + if err := vs.vmCreatePathName(vmCtx, vcClient, createArgs); err != nil { + return nil, err } - err = vs.vmCreateFixupConfigSpec(vmCtx, vcClient, createArgs) - if err != nil { - return nil, nil, err + if err := vs.vmCreateFixupConfigSpec(vmCtx, vcClient, createArgs); err != nil { + return nil, err } - err = vs.vmCreateIsReady(vmCtx, vcClient, createArgs) - if err != nil { - return nil, nil, err + if err := vs.vmCreateIsReady(vmCtx, vcClient, createArgs); err != nil { + return nil, err } - // BMV: This is about where we used to do this check but it prb make more sense to do - // earlier, as to limit wasted work. Before DoPlacement() is likely the best place so - // the window between the placement decision and creating the VM on VC is small(ish). - allowed, createDeferFn := vs.vmCreateConcurrentAllowed(vmCtx) - if !allowed { - return nil, nil, nil - } - defer createDeferFn() + return createArgs, nil +} + +func (vs *vSphereVMProvider) createVirtualMachine( + ctx pkgctx.VirtualMachineContext, + vcClient *vcclient.Client, + args *VMCreateArgs) (*object.VirtualMachine, error) { moRef, err := vmlifecycle.CreateVirtualMachine( - vmCtx, + ctx, vcClient.RestClient(), vcClient.VimClient(), vcClient.Finder(), - &createArgs.CreateArgs) + &args.CreateArgs) if err != nil { - vmCtx.Logger.Error(err, "CreateVirtualMachine failed") - conditions.MarkFalse(vmCtx.VM, vmopv1.VirtualMachineConditionCreated, "Error", err.Error()) - return nil, nil, err + ctx.Logger.Error(err, "CreateVirtualMachine failed") + conditions.MarkFalse( + ctx.VM, + vmopv1.VirtualMachineConditionCreated, + "Error", + err.Error()) + return nil, err + } + + ctx.VM.Status.UniqueID = moRef.Reference().Value + conditions.MarkTrue(ctx.VM, vmopv1.VirtualMachineConditionCreated) + + return object.NewVirtualMachine(vcClient.VimClient(), *moRef), nil +} + +func (vs *vSphereVMProvider) createVirtualMachineAsync( + ctx pkgctx.VirtualMachineContext, + vcClient *vcclient.Client, + args *VMCreateArgs, + chanErr chan error, + createDeferFn func()) { + + defer func() { + close(chanErr) + createDeferFn() + currentlyCreating.Delete(ctx.VM.NamespacedName()) + }() + + moRef, vimErr := vmlifecycle.CreateVirtualMachine( + ctx, + vcClient.RestClient(), + vcClient.VimClient(), + vcClient.Finder(), + &args.CreateArgs) + + if vimErr != nil { + chanErr <- vimErr + ctx.Logger.Error(vimErr, "CreateVirtualMachine failed") } - vmCtx.VM.Status.UniqueID = moRef.Reference().Value - conditions.MarkTrue(vmCtx.VM, vmopv1.VirtualMachineConditionCreated) + _, k8sErr := controllerutil.CreateOrPatch( + ctx, + vs.k8sClient, + ctx.VM, + func() error { + + if vimErr != nil { + conditions.MarkFalse( + ctx.VM, + vmopv1.VirtualMachineConditionCreated, + "Error", + vimErr.Error()) + return nil //nolint:nilerr + } + + ctx.VM.Status.UniqueID = moRef.Reference().Value + conditions.MarkTrue(ctx.VM, vmopv1.VirtualMachineConditionCreated) - return object.NewVirtualMachine(vcClient.VimClient(), *moRef), createArgs, nil + return nil + }, + ) + + if k8sErr != nil { + chanErr <- k8sErr + ctx.Logger.Error(k8sErr, "Failed to patch VM status after create") + } } func (vs *vSphereVMProvider) createdVirtualMachineFallthroughUpdate( diff --git a/pkg/providers/vsphere/vmprovider_vm2_test.go b/pkg/providers/vsphere/vmprovider_vm2_test.go index e081a8837..0f786675f 100644 --- a/pkg/providers/vsphere/vmprovider_vm2_test.go +++ b/pkg/providers/vsphere/vmprovider_vm2_test.go @@ -70,6 +70,7 @@ func vmE2ETests() { JustBeforeEach(func() { ctx = suite.NewTestContextForVCSim(testConfig, initObjects...) pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { + config.AsyncSignalDisabled = true config.MaxDeployThreadsOnProvider = 1 }) vmProvider = vsphere.NewVSphereVMProviderFromClient(ctx, ctx.Client, ctx.Recorder) @@ -137,10 +138,10 @@ func vmE2ETests() { }) It("DoIt without an NPE", func() { - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) - err = vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err = createOrUpdateVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(vm.Status.UniqueID).ToNot(BeEmpty()) @@ -188,7 +189,7 @@ func vmE2ETests() { }) It("DoIt", func() { - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("network interface is not ready yet")) Expect(conditions.IsFalse(vm, vmopv1.VirtualMachineConditionNetworkReady)).To(BeTrue()) @@ -222,7 +223,7 @@ func vmE2ETests() { Expect(ctx.Client.Status().Update(ctx, netInterface)).To(Succeed()) }) - err = vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err = createOrUpdateVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("has expected conditions", func() { @@ -296,7 +297,7 @@ func vmE2ETests() { }) It("DoIt", func() { - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("network interface is not ready yet")) Expect(conditions.IsFalse(vm, vmopv1.VirtualMachineConditionNetworkReady)).To(BeTrue()) @@ -331,7 +332,7 @@ func vmE2ETests() { Expect(ctx.Client.Status().Update(ctx, netInterface)).To(Succeed()) }) - err = vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err = createOrUpdateVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("has expected conditions", func() { @@ -407,7 +408,7 @@ func vmE2ETests() { }) It("DoIt", func() { - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("subnetPort is not ready yet")) Expect(conditions.IsFalse(vm, vmopv1.VirtualMachineConditionNetworkReady)).To(BeTrue()) @@ -439,7 +440,7 @@ func vmE2ETests() { Expect(ctx.Client.Status().Update(ctx, subnetPort)).To(Succeed()) }) - err = vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err = createOrUpdateVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("has expected conditions", func() { diff --git a/pkg/providers/vsphere/vmprovider_vm_resize_test.go b/pkg/providers/vsphere/vmprovider_vm_resize_test.go index ffeed3595..01e2e678f 100644 --- a/pkg/providers/vsphere/vmprovider_vm_resize_test.go +++ b/pkg/providers/vsphere/vmprovider_vm_resize_test.go @@ -10,7 +10,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" vimtypes "github.com/vmware/govmomi/vim25/types" "k8s.io/apimachinery/pkg/api/resource" @@ -47,6 +46,7 @@ func vmResizeTests() { JustBeforeEach(func() { ctx = suite.NewTestContextForVCSim(testConfig, initObjects...) pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { + config.AsyncSignalDisabled = true config.MaxDeployThreadsOnProvider = 1 }) vmProvider = vsphere.NewVSphereVMProviderFromClient(ctx, ctx.Client, ctx.Recorder) @@ -68,21 +68,6 @@ func vmResizeTests() { return w.Bytes() } - createOrUpdateAndGetVcVM := func( - ctx *builder.TestContextForVCSim, - vm *vmopv1.VirtualMachine) (*object.VirtualMachine, error) { - - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) - if err != nil { - return nil, err - } - - ExpectWithOffset(1, vm.Status.UniqueID).ToNot(BeEmpty()) - vcVM := ctx.GetVMFromMoID(vm.Status.UniqueID) - ExpectWithOffset(1, vcVM).ToNot(BeNil()) - return vcVM, nil - } - createVMClass := func(cs vimtypes.VirtualMachineConfigSpec, name ...string) *vmopv1.VirtualMachineClass { var class *vmopv1.VirtualMachineClass if len(name) == 1 { @@ -197,8 +182,7 @@ func vmResizeTests() { vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOff vm.Spec.StorageClass = ctx.StorageClassName - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) }) Context("Powered off VM", func() { @@ -216,7 +200,7 @@ func vmResizeTests() { newVMClass := createVMClass(newCS) vm.Spec.ClassName = newVMClass.Name - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -237,7 +221,7 @@ func vmResizeTests() { newVMClass := createVMClass(newCS) vm.Spec.ClassName = newVMClass.Name - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -274,7 +258,7 @@ func vmResizeTests() { updateVMClassPolicies(newVMClass, polices) By("Resize set reservations", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -288,7 +272,7 @@ func vmResizeTests() { By("Resize back to initial class removes reservations", func() { vm.Spec.ClassName = vmClass.Name - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -318,7 +302,7 @@ func vmResizeTests() { vm.Spec.ClassName = newVMClass.Name vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -355,7 +339,7 @@ func vmResizeTests() { updateVMClassPolicies(newVMClass, polices) vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -381,7 +365,7 @@ func vmResizeTests() { delete(vm.Annotations, vmopv1util.LastResizedAnnotationKey) vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -406,7 +390,7 @@ func vmResizeTests() { Expect(vmopv1util.SetLastResizedAnnotationClassName(vm, vmClass.Name)).To(Succeed()) vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -429,7 +413,7 @@ func vmResizeTests() { updateVMClass(vmClass, newCS) vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -452,7 +436,7 @@ func vmResizeTests() { vm.Annotations[vmopv1.VirtualMachineSameVMClassResizeAnnotation] = "" vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -470,8 +454,7 @@ func vmResizeTests() { It("Resize Pending", func() { vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOn)) newCS := configSpec @@ -480,7 +463,7 @@ func vmResizeTests() { newVMClass := createVMClass(newCS) vm.Spec.ClassName = newVMClass.Name - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("Does not resize", func() { @@ -502,8 +485,7 @@ func vmResizeTests() { It("Has Same Class Resize Annotation", func() { vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOn)) newCS := configSpec @@ -512,7 +494,7 @@ func vmResizeTests() { updateVMClass(vmClass, newCS) vm.Annotations[vmopv1.VirtualMachineSameVMClassResizeAnnotation] = "" - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("Does not resize", func() { @@ -546,7 +528,7 @@ func vmResizeTests() { updateVMClass(vmClass, newCS) By("Does not resize without annotation", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -557,7 +539,7 @@ func vmResizeTests() { }) vm.Annotations[vmopv1.VirtualMachineSameVMClassResizeAnnotation] = "" - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -580,7 +562,7 @@ func vmResizeTests() { delete(vm.Annotations, vmopv1util.LastResizedAnnotationKey) By("Does not resize without same class annotation", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -594,7 +576,7 @@ func vmResizeTests() { }) vm.Annotations[vmopv1.VirtualMachineSameVMClassResizeAnnotation] = "" - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -608,8 +590,7 @@ func vmResizeTests() { It("Powered On brownfield VM", func() { vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOn)) // Remove annotation so the VM appears to be from before this feature. @@ -622,7 +603,7 @@ func vmResizeTests() { updateVMClass(vmClass, newCS) vm.Annotations[vmopv1.VirtualMachineSameVMClassResizeAnnotation] = "" - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("Does not resize powered on VM", func() { @@ -678,8 +659,7 @@ func vmResizeTests() { vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOff vm.Spec.StorageClass = ctx.StorageClassName - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) }) Context("ChangeBlockTracking", func() { @@ -688,7 +668,7 @@ func vmResizeTests() { ChangeBlockTracking: vimtypes.NewBool(true), } - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -711,7 +691,7 @@ func vmResizeTests() { ChangeBlockTracking: vimtypes.NewBool(true), } - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -739,7 +719,7 @@ func vmResizeTests() { ChangeBlockTracking: vimtypes.NewBool(true), } - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine diff --git a/pkg/providers/vsphere/vmprovider_vm_test.go b/pkg/providers/vsphere/vmprovider_vm_test.go index 899f59d3a..950ec579a 100644 --- a/pkg/providers/vsphere/vmprovider_vm_test.go +++ b/pkg/providers/vsphere/vmprovider_vm_test.go @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "math/rand" + "sync" "github.com/google/uuid" . "github.com/onsi/ginkgo/v2" @@ -73,6 +74,10 @@ func vmTests() { BeforeEach(func() { parentCtx = ctxop.WithContext(pkgcfg.NewContext()) + pkgcfg.SetContext(parentCtx, func(config *pkgcfg.Config) { + config.AsyncCreateDisabled = true + config.AsyncSignalDisabled = true + }) testConfig = builder.VCSimTestConfig{ WithContentLibrary: true, WithWorkloadIsolation: true, @@ -158,21 +163,6 @@ func vmTests() { vsphere.SkipVMImageCLProviderCheck = false }) - createOrUpdateAndGetVcVM := func( - ctx *builder.TestContextForVCSim, - vm *vmopv1.VirtualMachine) (*object.VirtualMachine, error) { - - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) - if err != nil { - return nil, err - } - - ExpectWithOffset(1, vm.Status.UniqueID).ToNot(BeEmpty()) - vcVM := ctx.GetVMFromMoID(vm.Status.UniqueID) - ExpectWithOffset(1, vcVM).ToNot(BeNil()) - return vcVM, nil - } - Context("VM Class and ConfigSpec", func() { var ( @@ -226,7 +216,7 @@ func vmTests() { if !skipCreateOrUpdateVM { var err error - vcVM, err = createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err = createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) } }) @@ -269,7 +259,7 @@ func vmTests() { assertClassNotFound := func(className string) { var err error - vcVM, err = createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err = createOrUpdateAndGetVcVM(ctx, vmProvider, vm) ExpectWithOffset(1, err).ToNot(BeNil()) ExpectWithOffset(1, err.Error()).To(ContainSubstring( fmt.Sprintf("virtualmachineclasses.vmoperator.vmware.com %q not found", className))) @@ -304,7 +294,7 @@ func vmTests() { assertPoweredOnNoVMClassCondition := func() { var err error - vcVM, err = createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err = createOrUpdateAndGetVcVM(ctx, vmProvider, vm) ExpectWithOffset(1, err).ToNot(HaveOccurred()) ExpectWithOffset(1, vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOn)) powerState, err := vcVM.PowerState(ctx) @@ -337,7 +327,7 @@ func vmTests() { }) It("should return an error", func() { var err error - vcVM, err = createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err = createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).To(MatchError("cannot synthesize class from nil ConfigInfo")) Expect(vcVM).To(BeNil()) }) @@ -1096,7 +1086,7 @@ func vmTests() { } Expect(ctx.Client.Create(ctx, pvc1)).To(Succeed()) - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1143,7 +1133,7 @@ func vmTests() { } Expect(ctx.Client.Create(ctx, pvc1)).To(Succeed()) - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1274,7 +1264,7 @@ func vmTests() { }) It("Basic VM", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1375,7 +1365,7 @@ func vmTests() { vm.Labels[kubeutil.CAPVClusterRoleLabelKey] = "" vm.Labels[kubeutil.CAPWClusterRoleLabelKey] = "" - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1396,7 +1386,7 @@ func vmTests() { vm.Annotations[vmopv1.ForceEnableBackupAnnotation] = "true" - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1486,17 +1476,104 @@ func vmTests() { Expect(m.MarkDefault(ctx, ctx.NativeKeyProviderID)).To(Succeed()) }) - It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) - Expect(vm.Status.Crypto).ToNot(BeNil()) - Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( - []vmopv1.VirtualMachineEncryptionType{ - vmopv1.VirtualMachineEncryptionTypeConfig, - })) - Expect(vm.Status.Crypto.ProviderID).To(Equal(ctx.NativeKeyProviderID)) - Expect(vm.Status.Crypto.KeyID).ToNot(BeEmpty()) - Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineEncryptionSynced)).To(BeTrue()) + When("using sync create", func() { + BeforeEach(func() { + pkgcfg.SetContext(parentCtx, func(config *pkgcfg.Config) { + config.AsyncCreateDisabled = true + config.AsyncSignalDisabled = false + config.Features.WorkloadDomainIsolation = true + }) + }) + It("should succeed", func() { + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) + Expect(vm.Status.Crypto).ToNot(BeNil()) + Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( + []vmopv1.VirtualMachineEncryptionType{ + vmopv1.VirtualMachineEncryptionTypeConfig, + })) + Expect(vm.Status.Crypto.ProviderID).To(Equal(ctx.NativeKeyProviderID)) + Expect(vm.Status.Crypto.KeyID).ToNot(BeEmpty()) + Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineEncryptionSynced)).To(BeTrue()) + }) + }) + + When("using async create", func() { + BeforeEach(func() { + pkgcfg.SetContext(parentCtx, func(config *pkgcfg.Config) { + config.AsyncCreateDisabled = false + config.AsyncSignalDisabled = false + config.Features.WorkloadDomainIsolation = true + }) + }) + It("should succeed", func() { + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) + Expect(vm.Status.Crypto).ToNot(BeNil()) + Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( + []vmopv1.VirtualMachineEncryptionType{ + vmopv1.VirtualMachineEncryptionTypeConfig, + })) + Expect(vm.Status.Crypto.ProviderID).To(Equal(ctx.NativeKeyProviderID)) + Expect(vm.Status.Crypto.KeyID).ToNot(BeEmpty()) + Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineEncryptionSynced)).To(BeTrue()) + }) + + // Please note this test uses FlakeAttempts(5) due to the + // validation of some predictable-over-time behavior. + When("there is a duplicate create", FlakeAttempts(5), func() { + JustBeforeEach(func() { + pkgcfg.SetContext(ctx, func(config *pkgcfg.Config) { + config.MaxDeployThreadsOnProvider = 16 + }) + }) + It("should return ErrDuplicateCreate", func() { + var ( + errs []error + errsMu sync.Mutex + done sync.WaitGroup + start = make(chan struct{}) + ) + + // Set up five goroutines that race to + // create the VM first. + for i := 0; i < 5; i++ { + done.Add(1) + go func(copyOfVM *vmopv1.VirtualMachine) { + defer done.Done() + <-start + err := createOrUpdateVM(ctx, vmProvider, copyOfVM) + if err != nil { + errsMu.Lock() + errs = append(errs, err) + errsMu.Unlock() + } else { + vm = copyOfVM + } + }(vm.DeepCopy()) + } + + close(start) + + done.Wait() + + Expect(errs).To(HaveLen(4)) + + Expect(errs).Should(ConsistOf( + providers.ErrDuplicateCreate, + providers.ErrDuplicateCreate, + providers.ErrDuplicateCreate, + providers.ErrDuplicateCreate, + )) + + Expect(vm.Status.Crypto).ToNot(BeNil()) + Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( + []vmopv1.VirtualMachineEncryptionType{ + vmopv1.VirtualMachineEncryptionTypeConfig, + })) + Expect(vm.Status.Crypto.ProviderID).To(Equal(ctx.NativeKeyProviderID)) + Expect(vm.Status.Crypto.KeyID).ToNot(BeEmpty()) + Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineEncryptionSynced)).To(BeTrue()) + }) + }) }) }) @@ -1507,8 +1584,7 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( []vmopv1.VirtualMachineEncryptionType{ @@ -1528,8 +1604,7 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( []vmopv1.VirtualMachineEncryptionType{ @@ -1565,12 +1640,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1590,12 +1663,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1616,12 +1687,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1640,8 +1709,7 @@ func vmTests() { When("there is no vTPM", func() { It("should not error, but have condition", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) c := conditions.Get(vm, vmopv1.VirtualMachineEncryptionSynced) Expect(c).ToNot(BeNil()) @@ -1656,12 +1724,10 @@ func vmTests() { hasVTPM = true }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1707,12 +1773,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1733,12 +1797,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1760,12 +1822,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1787,12 +1847,10 @@ func vmTests() { }) It("should succeed", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).To(BeNil()) - _, err = createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.Crypto).ToNot(BeNil()) Expect(vm.Status.Crypto.Encrypted).To(HaveExactElements( @@ -1827,7 +1885,7 @@ func vmTests() { }) It("VM should not have PCI devices from VM Class", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1847,7 +1905,7 @@ func vmTests() { It("Creates VM", func() { Expect(vm.Spec.StorageClass).To(BeEmpty()) - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1870,7 +1928,7 @@ func vmTests() { // TODO: Dedupe this with "Basic VM" above It("Clones VM", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -1942,7 +2000,7 @@ func vmTests() { ctx.ContentLibraryItemTemplate("DC0_C0_RP0_VM0", imageName) vm.Spec.ImageName = imageName - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) }) @@ -1953,7 +2011,7 @@ func vmTests() { It("creates VM in placement selected zone", func() { Expect(vm.Labels).ToNot(HaveKey(topology.KubernetesTopologyZoneLabelKey)) - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) azName, ok := vm.Labels[topology.KubernetesTopologyZoneLabelKey] @@ -1975,7 +2033,7 @@ func vmTests() { azName := ctx.ZoneNames[rand.Intn(len(ctx.ZoneNames))] vm.Labels[topology.KubernetesTopologyZoneLabelKey] = azName - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("VM is created in the zone's ResourcePool", func() { @@ -2033,7 +2091,7 @@ func vmTests() { Expect(ctx.Client.Status().Update(ctx, pvc1)).To(Succeed()) vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOff - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(vm.Status.Zone).To(Equal(azName)) }) @@ -2069,7 +2127,7 @@ func vmTests() { } It("creates VM without instance storage", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) }) @@ -2089,7 +2147,7 @@ func vmTests() { } Expect(ctx.Client.Update(ctx, vmClass)).To(Succeed()) - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).To(MatchError("instance storage PVCs are not bound yet")) Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineConditionCreated)).To(BeFalse()) @@ -2111,7 +2169,7 @@ func vmTests() { // Simulate what would be set by volume controller. vm.Annotations[constants.InstanceStoragePVCsBoundAnnotationKey] = "" - err = vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("status update pending for persistent volume: %s on VM", isVol0.Name))) @@ -2125,7 +2183,7 @@ func vmTests() { }) By("VM is now created", func() { - _, err = createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineConditionCreated)).To(BeTrue()) }) @@ -2133,12 +2191,12 @@ func vmTests() { }) It("Powers VM off", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) - Expect(err).ToNot(HaveOccurred()) - + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOn)) + vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOff - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) + Expect(err).ToNot(HaveOccurred()) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOff)) state, err := vcVM.PowerState(ctx) @@ -2148,7 +2206,7 @@ func vmTests() { It("returns error when StorageClass is required but none specified", func() { vm.Spec.StorageClass = "" - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(MatchError("StorageClass is required but not specified")) c := conditions.Get(vm, vmopv1.VirtualMachineConditionStorageReady) @@ -2161,14 +2219,14 @@ func vmTests() { }) It("Can be called multiple times", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine Expect(vcVM.Properties(ctx, vcVM.Reference(), nil, &o)).To(Succeed()) modified := o.Config.Modified - _, err = createOrUpdateAndGetVcVM(ctx, vm) + _, err = createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(vcVM.Properties(ctx, vcVM.Reference(), nil, &o)).To(Succeed()) @@ -2200,7 +2258,7 @@ func vmTests() { Transport: vmopv1.VirtualMachineMetadataExtraConfigTransport, } */ - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -2257,7 +2315,7 @@ func vmTests() { It("Should not have a nic", func() { Expect(vm.Spec.Network.Disabled).To(BeTrue()) - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(conditions.Has(vm, vmopv1.VirtualMachineConditionNetworkReady)).To(BeFalse()) @@ -2288,7 +2346,7 @@ func vmTests() { }) It("Has expected devices", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(conditions.IsTrue(vm, vmopv1.VirtualMachineConditionNetworkReady)).To(BeTrue()) @@ -2324,7 +2382,7 @@ func vmTests() { }) It("Succeeds", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -2341,7 +2399,7 @@ func vmTests() { }) It("Succeeds", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -2362,7 +2420,7 @@ func vmTests() { }) It("Succeeds", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -2383,7 +2441,7 @@ func vmTests() { } vm.Spec.Advanced.BootDiskCapacity = &newSize vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var o mo.VirtualMachine @@ -2399,7 +2457,7 @@ func vmTests() { It("CSI Volumes workflow", func() { vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOff - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) vm.Spec.PowerState = vmopv1.VirtualMachinePowerStateOn @@ -2417,7 +2475,7 @@ func vmTests() { }, } - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("status update pending for persistent volume: %s on VM", cnsVolumeName))) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOff)) @@ -2434,7 +2492,7 @@ func vmTests() { }, } - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("persistent volume: %s not attached to VM", cnsVolumeName))) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOff)) @@ -2447,21 +2505,21 @@ func vmTests() { Attached: true, }, } - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Status.PowerState).To(Equal(vmopv1.VirtualMachinePowerStateOn)) }) }) }) It("Reverse lookups existing VM into correct zone", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(vm.Labels).To(HaveKeyWithValue(topology.KubernetesTopologyZoneLabelKey, zoneName)) Expect(vm.Status.Zone).To(Equal(zoneName)) delete(vm.Labels, topology.KubernetesTopologyZoneLabelKey) - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) Expect(vm.Labels).To(HaveKeyWithValue(topology.KubernetesTopologyZoneLabelKey, zoneName)) Expect(vm.Status.Zone).To(Equal(zoneName)) }) @@ -2488,7 +2546,7 @@ func vmTests() { }) It("VM is created in child Folder and ResourcePool", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) By("has expected condition", func() { @@ -2513,7 +2571,7 @@ func vmTests() { }) It("Cluster Modules", func() { - vcVM, err := createOrUpdateAndGetVcVM(ctx, vm) + vcVM, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) var members []vimtypes.ManagedObjectReference @@ -2528,7 +2586,7 @@ func vmTests() { It("Returns error with non-existence cluster module", func() { vm.Annotations["vsphere-cluster-module-group"] = "bogusClusterMod" - err := vmProvider.CreateOrUpdateVirtualMachine(ctx, vm) + err := createOrUpdateVM(ctx, vmProvider, vm) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("ClusterModule bogusClusterMod not found")) }) @@ -2543,7 +2601,7 @@ func vmTests() { }) JustBeforeEach(func() { - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) }) Context("when the VM is off", func() { @@ -2579,7 +2637,7 @@ func vmTests() { }) It("returns NotFound when VM does not exist", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) Expect(vmProvider.DeleteVirtualMachine(ctx, vm)).To(Succeed()) @@ -2588,7 +2646,7 @@ func vmTests() { }) It("Deletes existing VM when zone info is missing", func() { - _, err := createOrUpdateAndGetVcVM(ctx, vm) + _, err := createOrUpdateAndGetVcVM(ctx, vmProvider, vm) Expect(err).ToNot(HaveOccurred()) uniqueID := vm.Status.UniqueID @@ -2604,7 +2662,7 @@ func vmTests() { Context("Guest Heartbeat", func() { JustBeforeEach(func() { - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) }) It("return guest heartbeat", func() { @@ -2617,7 +2675,7 @@ func vmTests() { Context("Web console ticket", func() { JustBeforeEach(func() { - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) }) It("return ticket", func() { @@ -2630,7 +2688,7 @@ func vmTests() { Context("VM hardware version", func() { JustBeforeEach(func() { - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) }) It("return version", func() { @@ -2639,59 +2697,59 @@ func vmTests() { Expect(version).To(Equal(vimtypes.VMX9)) }) }) - }) - Context("Create/Update/Delete ISO backed VirtualMachine", func() { - var ( - vm *vmopv1.VirtualMachine - vmClass *vmopv1.VirtualMachineClass - ) - - BeforeEach(func() { - vmClass = builder.DummyVirtualMachineClassGenName() - vm = builder.DummyBasicVirtualMachine("test-vm", "") - - // Reduce diff from old tests: by default don't create an NIC. - if vm.Spec.Network == nil { - vm.Spec.Network = &vmopv1.VirtualMachineNetworkSpec{} - } - vm.Spec.Network.Disabled = true - }) - - JustBeforeEach(func() { - vmClass.Namespace = nsInfo.Namespace - Expect(ctx.Client.Create(ctx, vmClass)).To(Succeed()) + Context("Create/Update/Delete ISO backed VirtualMachine", func() { + var ( + vm *vmopv1.VirtualMachine + vmClass *vmopv1.VirtualMachineClass + ) - clusterVMImage := &vmopv1.ClusterVirtualMachineImage{} - Expect(ctx.Client.Get(ctx, client.ObjectKey{Name: ctx.ContentLibraryIsoImageName}, clusterVMImage)).To(Succeed()) + BeforeEach(func() { + vmClass = builder.DummyVirtualMachineClassGenName() + vm = builder.DummyBasicVirtualMachine("test-vm", "") - vm.Namespace = nsInfo.Namespace - vm.Spec.ClassName = vmClass.Name - vm.Spec.ImageName = clusterVMImage.Name - vm.Spec.Image.Kind = cvmiKind - vm.Spec.Image.Name = clusterVMImage.Name - vm.Spec.StorageClass = ctx.StorageClassName - vm.Spec.Cdrom = []vmopv1.VirtualMachineCdromSpec{{ - Name: "cdrom0", - Image: vmopv1.VirtualMachineImageRef{ - Name: cvmiKind, - Kind: clusterVMImage.Name, - }, - }} - }) + // Reduce diff from old tests: by default don't create an NIC. + if vm.Spec.Network == nil { + vm.Spec.Network = &vmopv1.VirtualMachineNetworkSpec{} + } + vm.Spec.Network.Disabled = true + }) - Context("return config", func() { JustBeforeEach(func() { - Expect(vmProvider.CreateOrUpdateVirtualMachine(ctx, vm)).To(Succeed()) + vmClass.Namespace = nsInfo.Namespace + Expect(ctx.Client.Create(ctx, vmClass)).To(Succeed()) + + clusterVMImage := &vmopv1.ClusterVirtualMachineImage{} + Expect(ctx.Client.Get(ctx, client.ObjectKey{Name: ctx.ContentLibraryIsoImageName}, clusterVMImage)).To(Succeed()) + + vm.Namespace = nsInfo.Namespace + vm.Spec.ClassName = vmClass.Name + vm.Spec.ImageName = clusterVMImage.Name + vm.Spec.Image.Kind = cvmiKind + vm.Spec.Image.Name = clusterVMImage.Name + vm.Spec.StorageClass = ctx.StorageClassName + vm.Spec.Cdrom = []vmopv1.VirtualMachineCdromSpec{{ + Name: "cdrom0", + Image: vmopv1.VirtualMachineImageRef{ + Name: cvmiKind, + Kind: clusterVMImage.Name, + }, + }} }) - It("return config.files", func() { - vmPathName := "config.files.vmPathName" - props, err := vmProvider.GetVirtualMachineProperties(ctx, vm, []string{vmPathName}) - Expect(err).NotTo(HaveOccurred()) - var path object.DatastorePath - path.FromString(props[vmPathName].(string)) - Expect(path.Datastore).NotTo(BeEmpty()) + Context("return config", func() { + JustBeforeEach(func() { + Expect(createOrUpdateVM(ctx, vmProvider, vm)).To(Succeed()) + }) + + It("return config.files", func() { + vmPathName := "config.files.vmPathName" + props, err := vmProvider.GetVirtualMachineProperties(ctx, vm, []string{vmPathName}) + Expect(err).NotTo(HaveOccurred()) + var path object.DatastorePath + path.FromString(props[vmPathName].(string)) + Expect(path.Datastore).NotTo(BeEmpty()) + }) }) }) }) diff --git a/pkg/providers/vsphere/vsphere_suite_test.go b/pkg/providers/vsphere/vsphere_suite_test.go index f6b749b7d..039245788 100644 --- a/pkg/providers/vsphere/vsphere_suite_test.go +++ b/pkg/providers/vsphere/vsphere_suite_test.go @@ -4,10 +4,19 @@ package vsphere_test import ( + "fmt" "testing" . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/vmware/govmomi/object" + "sigs.k8s.io/controller-runtime/pkg/client" + + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" + ctxop "github.com/vmware-tanzu/vm-operator/pkg/context/operation" + "github.com/vmware-tanzu/vm-operator/pkg/providers" "github.com/vmware-tanzu/vm-operator/test/builder" ) @@ -31,3 +40,85 @@ func TestVSphereProvider(t *testing.T) { var _ = BeforeSuite(suite.BeforeSuite) var _ = AfterSuite(suite.AfterSuite) + +func createOrUpdateVM( + ctx *builder.TestContextForVCSim, + provider providers.VirtualMachineProviderInterface, + vm *vmopv1.VirtualMachine) error { + + if pkgcfg.FromContext(ctx).Features.WorkloadDomainIsolation { + if !pkgcfg.FromContext(ctx).AsyncSignalDisabled { + if !pkgcfg.FromContext(ctx).AsyncCreateDisabled { + By("non-blocking createOrUpdateVM") + return createOrUpdateVMAsync(ctx, provider, vm) + } + } + } + + By("blocking createOrUpdateVM") + return provider.CreateOrUpdateVirtualMachine(ctx, vm) +} + +func createOrUpdateAndGetVcVM( + ctx *builder.TestContextForVCSim, + provider providers.VirtualMachineProviderInterface, + vm *vmopv1.VirtualMachine) (*object.VirtualMachine, error) { + + if err := createOrUpdateVM(ctx, provider, vm); err != nil { + return nil, err + } + + ExpectWithOffset(1, vm.Status.UniqueID).ToNot(BeEmpty()) + vcVM := ctx.GetVMFromMoID(vm.Status.UniqueID) + ExpectWithOffset(1, vcVM).ToNot(BeNil()) + return vcVM, nil +} + +func createOrUpdateVMAsync( + testCtx *builder.TestContextForVCSim, + provider providers.VirtualMachineProviderInterface, + vm *vmopv1.VirtualMachine) error { + + // This ensures there is no current operation set on the context. + ctx := ctxop.WithContext(testCtx) + + chanErr, err := provider.CreateOrUpdateVirtualMachineAsync(ctx, vm) + if err != nil { + return err + } + + // Unlike the VM controller, this test helper blocks until the async + // parts of CreateOrUpdateVM are complete. This is to avoid a large + // refactor for now. + for err2 := range chanErr { + if err2 != nil { + if err == nil { + err = err2 + } else { + err = fmt.Errorf("%w,%w", err, err2) + } + } + } + if err != nil { + return err + } + + if ctxop.IsCreate(ctx) { + // The async create operation does not fall-through to the + // update logic, so we need to call CreateOrUpdateVirtualMachine + // a second time to cause the update. + ExpectWithOffset(1, testCtx.Client.Get( + ctx, + client.ObjectKeyFromObject(vm), + vm)).To(Succeed()) + + if _, err := provider.CreateOrUpdateVirtualMachineAsync( + ctx, + vm); err != nil { + + return err + } + } + + return nil +}