From 10f02aeae4cd702d615c55117bdf3bdad93f9f27 Mon Sep 17 00:00:00 2001 From: akutz Date: Fri, 13 Dec 2024 15:52:34 -0600 Subject: [PATCH] Fast Deploy (Experimental) This patch adds support for the Fast Deploy feature, i.e. the ability to quickly provision a VM as a linked clone, as an experimental feature that must be enabled manually. There are many things about this feature that may change prior to it being ready for production. The patch notes below are broken down into several sections: * Goals -- What is currently supported * Non-goals -- What is not on the table right now * Architecture * Activation -- How to enable this experimental feature * Placement -- Request datastore recommendations * Disk cache -- Per-datastore cache for Content Library item disk(s) * Create VM -- Create linked clone directly from cached disk -~= Goals =~- The following goals are what is considered in-scope for this experimental feature at this time. Just because something is not listed, it does not mean it will not be added before the feature is made generally available: * Support all VM images that are OVFs * Support multiple zones * Support workload-domain isolation * Support all datastore types, including host-local and vSAN -~= Non-goals =~- The following is a list of non-goals that are not in scope at this time, although most of them should be revisited prior to this feature graduating to production: * Support VM encryption Child disks can only be encrypted if their parent disks are encrypted. Users *could* deploy an encrypted VM without using Fast Deploy, and then publish that VM as an image to then be used as the source for provisioning encrypted VMs using Fast Deploy. However, child disks must also use the same encryption key as their parent disks. This limitation flies in the face of the upcoming Bring Your Own Key (BYOK) provider feature. To accommodate this feature, online disk promotion will be an option once the VM is deployed. This means VMs will be deployed linked clones, privy to the deploy speed a linked clone affords. However, once the VM is created, even if it is powered on, its disks will be promoted so they no longer point back to their parents. While the VM will no longer be save the storage space a linked clone offers, the VM will also be able to support encryption. * Support VM images that are VM templates (VMTX) The architecture behind Fast Deploy makes it trivial to support deploying VM images that point to VM templates. While not in scope at this time, it is likely this becomes part of the feature prior to it graduating to production-ready. * Support for backup/restore The qualified backup/restore workflows for VM Service VMs have never been validated with linked clones as they have not been supported by VM Service up until this point. Due to how the linked clones are created in this feature, users should not expect existing backup/restore software to work with VMs provisioned with Fast Deploy at this time. To accommodate this feature, online disk promotion will be an option once the VM is deployed. This means VMs will be deployed linked clones, privy to the deploy speed a linked clone affords. However, once the VM is created, even if it is powered on, its disks will be promoted so they no longer point back to their parents. While the VM will no longer be save the storage space a linked clone offers, the VM will also be able to support backup/restore. * Support for site replication Similar to backup/restore, site replication workflows may not work with linked clones from bare disks either. To accommodate this feature, online disk promotion will be an option once the VM is deployed. This means VMs will be deployed linked clones, privy to the deploy speed a linked clone affords. However, once the VM is created, even if it is powered on, its disks will be promoted so they no longer point back to their parents. While the VM will no longer be save the storage space a linked clone offers, the VM will also be able to support site replication. * Support for datastore maintenance/migration Existing datastore maintenance/migration workflows may not be aware of or know how to handle the top-level `.contentlib-cache` directories created to cache disks from Content Library items on recommended datastores. To accommodate this feature, the goal is to transition the cached disks to be First Class Disks (FCD), but that requires some features not yet available to FCDs, such as the ability to query for the existence of an FCD based on its metadata. -~= Architecture =~- The architecture is broken down into the following sections: * Activation -- How to enable this experimental feature * Placement -- Request datastore recommendations * Disk cache -- Per-datastore cache for Content Library item disk(s) * Create VM -- Create linked clone directly from cached disk --~~== Activation ==~~-- Enabling the experimental Fast Deploy feature requires setting the environment variable `FSS_WCP_VMSERVICE_FAST_DEPLOY` to `true` in the VM Operator deployment. Please note, even when the feature is activated, it is possible to bypass the feature altogether by specifying the following annotation on a VM: `vmoperator.vmware.com/fast-deploy: "false"`. This annotation is completely ignored unless the feature is already activated via environment variable as described above. --~~== Placement ==~~-- The following steps provide a broad overview of how placement works: 1. The ConfigSpec used to create/place the VM now includes: a. The disks and controllers used by the disks from the image. The disks also specify the VM spec's storage class's underlying storage policy ID. b. The image's guest ID if none was specified by the VM class or VM spec. c. The root `VMProfile` now specifies the VM spec's storage class's underlying storage policy ID 2. A placement recommendation for datastores is always required, which uses the storage policies specified in the ConfigSpec to recommend a compatible datastore. 3. A path is constructed that points to where the VM will be created on the recommended datastore, ex.: `[] /.vmx` --~~== Disk cache ==~~-- The disk(s) from a Content Library item are cached on-demand on the recommended datastore: 1. The path(s) to the image's VMDK file(s) from the underlying Content Library Item are retrieved. 2. A special, top-level directory named `.contentlib-cache` is created, if it does not exist, at the root of the recommended datastore. Please note, this does support vSAN and thus the top-level directory may actually be a UUID that is resolved to `.contentlib-cache`. 3. A path is constructed that points to where the disk(s) for the library item are expected to be cached on the recommended datastore, ex.: `[] .contentlib-cache//` If this path does not exist, it is created. 4. The following occurs for each of the library item's VMDK files: a. The first 17 characters of a SHA-1 sum of the VMDK file name are used to build the expected path to the VMDK file's cached location on the recommended datastore, ex.: `[] .contentlib-cache///<17_CHAR_SHA1_SUM>.vmdk` b. If there is no VMDK at the above path, the VMDK file is copied to the above path. The cached disks and entire cache folder structure are automatically removed once there are no longer any VMs deployed as linked clones using a cached disk. This will likely change in the future to prevent the need to re-cache a disk just because the VMs deployed from it are no longer using it. Otherwise disks may need to be continuously cached, which reduces the value this feature provides. --~~== Create VM ==~~-- 1. The `VirtualDisk` devices in the ConfigSpec used to create the VM are updated with `VirtualDiskFlatVer2BackingInfo` backings that specify a parent backing. This parent backing points to the appropriate, cached, base disk from above. 2. The `CreateVM_Task` VMODL1 API is used to create the VM. Because the the VM's disks have parent backings, this new VM is effectively a linked clone. --- .../wcp/vmoperator/manager_env_var_patch.yaml | 6 + .../virtualmachine_controller.go | 20 +- pkg/config/config.go | 2 + pkg/config/env.go | 2 +- pkg/config/env/env.go | 4 +- pkg/config/env_test.go | 2 + pkg/providers/vsphere/client/client_test.go | 4 +- .../contentlibrary/content_library_utils.go | 12 +- .../vsphere/placement/cluster_placement.go | 111 ++++- .../vsphere/placement/zone_placement.go | 181 +++++++- .../vsphere/placement/zone_placement_test.go | 165 +++++++- .../vsphere/virtualmachine/configspec.go | 38 +- pkg/providers/vsphere/vmlifecycle/create.go | 26 +- .../vmlifecycle/create_contentlibrary.go | 16 + .../create_contentlibrary_linked_clone.go | 202 +++++++++ pkg/providers/vsphere/vmprovider_vm.go | 231 ++++++++-- pkg/util/configspec.go | 84 ++++ pkg/util/configspec_test.go | 395 ++++++++++++++++++ pkg/util/vmopv1/image.go | 206 +++++++++ pkg/util/vsphere/client/client_test.go | 4 +- pkg/util/vsphere/contentlibrary/item_cache.go | 243 +++++++++++ pkg/util/vsphere/contentlibrary/item_sync.go | 43 ++ 22 files changed, 1874 insertions(+), 123 deletions(-) create mode 100644 pkg/providers/vsphere/vmlifecycle/create_contentlibrary_linked_clone.go create mode 100644 pkg/util/vmopv1/image.go create mode 100644 pkg/util/vsphere/contentlibrary/item_cache.go create mode 100644 pkg/util/vsphere/contentlibrary/item_sync.go diff --git a/config/wcp/vmoperator/manager_env_var_patch.yaml b/config/wcp/vmoperator/manager_env_var_patch.yaml index 430cee9a4..ef2e0652b 100644 --- a/config/wcp/vmoperator/manager_env_var_patch.yaml +++ b/config/wcp/vmoperator/manager_env_var_patch.yaml @@ -112,6 +112,12 @@ name: FSS_WCP_SUPERVISOR_ASYNC_UPGRADE value: "" +- op: add + path: /spec/template/spec/containers/0/env/- + value: + name: FSS_WCP_VMSERVICE_FAST_DEPLOY + value: "" + # # Feature state switch flags beneath this line are enabled on main and only # retained in this file because it is used by internal testing to determine the diff --git a/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go b/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go index f96e8be1c..36ee4e3cf 100644 --- a/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go +++ b/controllers/virtualmachine/virtualmachine/virtualmachine_controller.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "reflect" + "strconv" "strings" "time" @@ -288,9 +289,26 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, client.IgnoreNotFound(err) } + logger := ctrl.Log.WithName("VirtualMachine").WithValues("name", vm.NamespacedName()) + + if pkgcfg.FromContext(ctx).Features.FastDeploy { + // Allow the use of an annotation to control whether fast-deploy is used + // per-VM to deploy the VM. + if val := vm.Annotations["vmoperator.vmware.com/fast-deploy"]; val != "" { + if ok, _ := strconv.ParseBool(val); !ok { + // Create a copy of the config so the feature-state for + // FastDeploy can also be influenced by a VM annotation. + cfg := pkgcfg.FromContext(ctx) + cfg.Features.FastDeploy = false + ctx = pkgcfg.WithContext(ctx, cfg) + logger.Info("Disabled fast-deploy for this VM") + } + } + } + vmCtx := &pkgctx.VirtualMachineContext{ Context: ctx, - Logger: ctrl.Log.WithName("VirtualMachine").WithValues("name", vm.NamespacedName()), + Logger: logger, VM: vm, } diff --git a/pkg/config/config.go b/pkg/config/config.go index 469a8eccc..7ca54952c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -147,6 +147,8 @@ type FeatureStates struct { VMIncrementalRestore bool // FSS_WCP_VMSERVICE_INCREMENTAL_RESTORE BringYourOwnEncryptionKey bool // FSS_WCP_VMSERVICE_BYOK SVAsyncUpgrade bool // FSS_WCP_SUPERVISOR_ASYNC_UPGRADE + // TODO(akutz) This FSS is placeholder. + FastDeploy bool // FSS_WCP_VMSERVICE_FAST_DEPLOY } type InstanceStorage struct { diff --git a/pkg/config/env.go b/pkg/config/env.go index 9675b4135..a4cf0576d 100644 --- a/pkg/config/env.go +++ b/pkg/config/env.go @@ -63,7 +63,7 @@ func FromEnv() Config { setBool(env.FSSVMImportNewNet, &config.Features.VMImportNewNet) setBool(env.FSSVMIncrementalRestore, &config.Features.VMIncrementalRestore) setBool(env.FSSBringYourOwnEncryptionKey, &config.Features.BringYourOwnEncryptionKey) - + setBool(env.FSSFastDeploy, &config.Features.FastDeploy) setBool(env.FSSSVAsyncUpgrade, &config.Features.SVAsyncUpgrade) if !config.Features.SVAsyncUpgrade { // When SVAsyncUpgrade is enabled, we'll later use the capability CM to determine if diff --git a/pkg/config/env/env.go b/pkg/config/env/env.go index 93453f0cf..e42c14f76 100644 --- a/pkg/config/env/env.go +++ b/pkg/config/env/env.go @@ -58,7 +58,7 @@ const ( FSSVMIncrementalRestore FSSBringYourOwnEncryptionKey FSSSVAsyncUpgrade - + FSSFastDeploy _varNameEnd ) @@ -176,6 +176,8 @@ func (n VarName) String() string { return "FSS_WCP_VMSERVICE_BYOK" case FSSSVAsyncUpgrade: return "FSS_WCP_SUPERVISOR_ASYNC_UPGRADE" + case FSSFastDeploy: + return "FSS_WCP_VMSERVICE_FAST_DEPLOY" } panic("unknown environment variable") } diff --git a/pkg/config/env_test.go b/pkg/config/env_test.go index 33b633de1..47f2c021c 100644 --- a/pkg/config/env_test.go +++ b/pkg/config/env_test.go @@ -101,6 +101,7 @@ var _ = Describe( Expect(os.Setenv("FSS_WCP_VMSERVICE_INCREMENTAL_RESTORE", "true")).To(Succeed()) Expect(os.Setenv("FSS_WCP_VMSERVICE_BYOK", "true")).To(Succeed()) Expect(os.Setenv("FSS_WCP_SUPERVISOR_ASYNC_UPGRADE", "false")).To(Succeed()) + Expect(os.Setenv("FSS_WCP_VMSERVICE_FAST_DEPLOY", "true")).To(Succeed()) Expect(os.Setenv("CREATE_VM_REQUEUE_DELAY", "125h")).To(Succeed()) Expect(os.Setenv("POWERED_ON_VM_HAS_IP_REQUEUE_DELAY", "126h")).To(Succeed()) }) @@ -150,6 +151,7 @@ var _ = Describe( BringYourOwnEncryptionKey: true, SVAsyncUpgrade: false, // Capability gate so tested below WorkloadDomainIsolation: true, + FastDeploy: true, }, CreateVMRequeueDelay: 125 * time.Hour, PoweredOnVMHasIPRequeueDelay: 126 * time.Hour, diff --git a/pkg/providers/vsphere/client/client_test.go b/pkg/providers/vsphere/client/client_test.go index 8749740f1..724845a83 100644 --- a/pkg/providers/vsphere/client/client_test.go +++ b/pkg/providers/vsphere/client/client_test.go @@ -89,6 +89,8 @@ var _ = Describe("Client", Label(testlabels.VCSim), Ordered /* Avoided race for serverCertFile = f } + datacenter := simulator.Map.Any("Datacenter") + cfg = &config.VSphereVMProviderConfig{ VcPNID: server.URL.Hostname(), VcPort: server.URL.Port(), @@ -98,7 +100,7 @@ var _ = Describe("Client", Label(testlabels.VCSim), Ordered /* Avoided race for }, CAFilePath: serverCertFile, InsecureSkipTLSVerify: false, - Datacenter: simulator.Map.Any("Datacenter").Reference().Value, + Datacenter: datacenter.Reference().Value, } }) diff --git a/pkg/providers/vsphere/contentlibrary/content_library_utils.go b/pkg/providers/vsphere/contentlibrary/content_library_utils.go index 9cbe637b5..b84d6f2d2 100644 --- a/pkg/providers/vsphere/contentlibrary/content_library_utils.go +++ b/pkg/providers/vsphere/contentlibrary/content_library_utils.go @@ -84,15 +84,13 @@ func initImageStatusFromOVFVirtualSystem( // Use operating system info from the first os section in the VM image, if one exists. if os := ovfVirtualSystem.OperatingSystem; os != nil { - o := os - osInfo := &imageStatus.OSInfo - osInfo.ID = strconv.Itoa(int(o.ID)) - if o.Version != nil { - osInfo.Version = *o.Version + osInfo.ID = strconv.Itoa(int(os.ID)) + if os.Version != nil { + osInfo.Version = *os.Version } - if o.OSType != nil { - osInfo.Type = *o.OSType + if os.OSType != nil { + osInfo.Type = *os.OSType } } diff --git a/pkg/providers/vsphere/placement/cluster_placement.go b/pkg/providers/vsphere/placement/cluster_placement.go index 9963df495..37550dd44 100644 --- a/pkg/providers/vsphere/placement/cluster_placement.go +++ b/pkg/providers/vsphere/placement/cluster_placement.go @@ -8,37 +8,107 @@ import ( "fmt" "strings" + "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25" vimtypes "github.com/vmware/govmomi/vim25/types" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" + "github.com/vmware-tanzu/vm-operator/pkg/util" ) // Recommendation is the info about a placement recommendation. type Recommendation struct { - PoolMoRef vimtypes.ManagedObjectReference - HostMoRef *vimtypes.ManagedObjectReference - // TODO: Datastore, whatever else as we need it. + PoolMoRef vimtypes.ManagedObjectReference + HostMoRef *vimtypes.ManagedObjectReference + Datastores []DatastoreResult } -func relocateSpecToRecommendation(relocateSpec *vimtypes.VirtualMachineRelocateSpec) *Recommendation { +func relocateSpecToRecommendation( + ctx context.Context, + relocateSpec *vimtypes.VirtualMachineRelocateSpec) *Recommendation { + // Instance Storage requires the host. if relocateSpec == nil || relocateSpec.Pool == nil || relocateSpec.Host == nil { return nil } - return &Recommendation{ + r := Recommendation{ PoolMoRef: *relocateSpec.Pool, HostMoRef: relocateSpec.Host, } + + if pkgcfg.FromContext(ctx).Features.FastDeploy { + if ds := relocateSpec.Datastore; ds != nil { + r.Datastores = append(r.Datastores, DatastoreResult{ + MoRef: *ds, + }) + } + for i := range relocateSpec.Disk { + d := relocateSpec.Disk[i] + r.Datastores = append(r.Datastores, DatastoreResult{ + MoRef: d.Datastore, + ForDisk: true, + DiskKey: d.DiskId, + }) + } + } + + return &r } -func clusterPlacementActionToRecommendation(action vimtypes.ClusterClusterInitialPlacementAction) *Recommendation { - return &Recommendation{ +func clusterPlacementActionToRecommendation( + ctx context.Context, + finder *find.Finder, + action vimtypes.ClusterClusterInitialPlacementAction) (*Recommendation, error) { + + r := Recommendation{ PoolMoRef: action.Pool, HostMoRef: action.TargetHost, } + + if pkgcfg.FromContext(ctx).Features.FastDeploy { + if cs := action.ConfigSpec; cs != nil { + // + // Get the recommended datastore for the VM. + // + if cs.Files != nil { + if dsn := util.DatastoreNameFromStorageURI(cs.Files.VmPathName); dsn != "" { + ds, err := finder.Datastore(ctx, dsn) + if err != nil { + return nil, fmt.Errorf("failed to get datastore for %q: %w", dsn, err) + } + if ds != nil { + r.Datastores = append(r.Datastores, DatastoreResult{ + Name: dsn, + MoRef: ds.Reference(), + }) + } + } + } + + // + // Get the recommended datastores for each disk. + // + for i := range cs.DeviceChange { + dcs := cs.DeviceChange[i].GetVirtualDeviceConfigSpec() + if disk, ok := dcs.Device.(*vimtypes.VirtualDisk); ok { + if bbi, ok := disk.Backing.(vimtypes.BaseVirtualDeviceFileBackingInfo); ok { + if bi := bbi.GetVirtualDeviceFileBackingInfo(); bi.Datastore != nil { + r.Datastores = append(r.Datastores, DatastoreResult{ + MoRef: *bi.Datastore, + ForDisk: true, + DiskKey: disk.Key, + }) + } + } + } + } + } + } + + return &r, nil } func CheckPlacementRelocateSpec(spec *vimtypes.VirtualMachineRelocateSpec) error { @@ -109,7 +179,7 @@ func CloneVMRelocateSpec( // PlaceVMForCreate determines the suitable placement candidates in the cluster. func PlaceVMForCreate( - ctx context.Context, + vmCtx pkgctx.VirtualMachineContext, cluster *object.ClusterComputeResource, configSpec vimtypes.VirtualMachineConfigSpec) ([]Recommendation, error) { @@ -118,11 +188,15 @@ func PlaceVMForCreate( ConfigSpec: &configSpec, } - resp, err := cluster.PlaceVm(ctx, placementSpec) + vmCtx.Logger.V(4).Info("PlaceVMForCreate request", "placementSpec", vimtypes.ToString(placementSpec)) + + resp, err := cluster.PlaceVm(vmCtx, placementSpec) if err != nil { return nil, err } + vmCtx.Logger.V(6).Info("PlaceVMForCreate response", "resp", vimtypes.ToString(resp)) + var recommendations []Recommendation for _, r := range resp.Recommendations { @@ -132,7 +206,7 @@ func PlaceVMForCreate( for _, a := range r.Action { if pa, ok := a.(*vimtypes.PlacementAction); ok { - if r := relocateSpecToRecommendation(pa.RelocateSpec); r != nil { + if r := relocateSpecToRecommendation(vmCtx, pa.RelocateSpec); r != nil { recommendations = append(recommendations, *r) } } @@ -146,9 +220,10 @@ func PlaceVMForCreate( func ClusterPlaceVMForCreate( vmCtx pkgctx.VirtualMachineContext, vcClient *vim25.Client, + finder *find.Finder, resourcePoolsMoRefs []vimtypes.ManagedObjectReference, configSpec vimtypes.VirtualMachineConfigSpec, - needsHost bool) ([]Recommendation, error) { + needHostPlacement, needDatastorePlacement bool) ([]Recommendation, error) { // Work around PlaceVmsXCluster bug that crashes vpxd when ConfigSpec.Files is nil. configSpec.Files = new(vimtypes.VirtualMachineFileInfo) @@ -160,17 +235,18 @@ func ClusterPlaceVMForCreate( ConfigSpec: configSpec, }, }, - HostRecommRequired: &needsHost, + HostRecommRequired: &needHostPlacement, + DatastoreRecommRequired: &needDatastorePlacement, } - vmCtx.Logger.V(4).Info("PlaceVmsXCluster request", "placementSpec", placementSpec) + vmCtx.Logger.V(4).Info("PlaceVmsXCluster request", "placementSpec", vimtypes.ToString(placementSpec)) resp, err := object.NewRootFolder(vcClient).PlaceVmsXCluster(vmCtx, placementSpec) if err != nil { return nil, err } - vmCtx.Logger.V(6).Info("PlaceVmsXCluster response", "resp", resp) + vmCtx.Logger.V(6).Info("PlaceVmsXCluster response", "resp", vimtypes.ToString(resp)) if len(resp.Faults) != 0 { var faultMgs []string @@ -194,7 +270,12 @@ func ClusterPlaceVMForCreate( for _, a := range info.Recommendation.Action { if ca, ok := a.(*vimtypes.ClusterClusterInitialPlacementAction); ok { - if r := clusterPlacementActionToRecommendation(*ca); r != nil { + r, err := clusterPlacementActionToRecommendation(vmCtx, finder, *ca) + if err != nil { + return nil, fmt.Errorf( + "failed to translate placement action to recommendation: %w", err) + } + if r != nil { recommendations = append(recommendations, *r) } } diff --git a/pkg/providers/vsphere/placement/zone_placement.go b/pkg/providers/vsphere/placement/zone_placement.go index 14be89037..1534b05c2 100644 --- a/pkg/providers/vsphere/placement/zone_placement.go +++ b/pkg/providers/vsphere/placement/zone_placement.go @@ -9,7 +9,9 @@ import ( "math/rand" "strings" + "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/property" "github.com/vmware/govmomi/vim25" vimtypes "github.com/vmware/govmomi/vim25/types" "golang.org/x/exp/maps" @@ -42,10 +44,26 @@ type Result struct { ZoneName string HostMoRef *vimtypes.ManagedObjectReference PoolMoRef vimtypes.ManagedObjectReference - // TODO: Datastore, whatever else as we need it. + Datastores []DatastoreResult + + needZonePlacement bool + needHostPlacement bool + needDatastorePlacement bool +} + +type DatastoreResult struct { + Name string + MoRef vimtypes.ManagedObjectReference + URL string + TopLevelDirectoryCreateSupported bool + + // ForDisk is false if the recommendation is for the VM's home directory and + // true if for a disk. DiskKey is only valid if ForDisk is true. + ForDisk bool + DiskKey int32 } -func doesVMNeedPlacement(vmCtx pkgctx.VirtualMachineContext) (res Result, needZonePlacement, needInstanceStoragePlacement bool) { +func doesVMNeedPlacement(vmCtx pkgctx.VirtualMachineContext) (res Result) { res.ZonePlacement = true if zoneName := vmCtx.VM.Labels[topology.KubernetesTopologyZoneLabelKey]; zoneName != "" { @@ -53,7 +71,7 @@ func doesVMNeedPlacement(vmCtx pkgctx.VirtualMachineContext) (res Result, needZo res.ZoneName = zoneName } else { // VM does not have a zone already assigned so we need to select one. - needZonePlacement = true + res.needZonePlacement = true } if pkgcfg.FromContext(vmCtx).Features.InstanceStorage { @@ -65,11 +83,15 @@ func doesVMNeedPlacement(vmCtx pkgctx.VirtualMachineContext) (res Result, needZo res.HostMoRef = &vimtypes.ManagedObjectReference{Type: "HostSystem", Value: hostMoID} } else { // VM has InstanceStorage volumes so we need to select a host. - needInstanceStoragePlacement = true + res.needHostPlacement = true } } } + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + res.needDatastorePlacement = true + } + return } @@ -262,9 +284,10 @@ func getPlacementRecommendations( func getZonalPlacementRecommendations( vmCtx pkgctx.VirtualMachineContext, vcClient *vim25.Client, + finder *find.Finder, candidates map[string][]string, configSpec vimtypes.VirtualMachineConfigSpec, - needsHost bool) map[string][]Recommendation { + needHostPlacement, needDatastorePlacement bool) map[string][]Recommendation { rpMOToZone := map[vimtypes.ManagedObjectReference]string{} var candidateRPMoRefs []vimtypes.ManagedObjectReference @@ -282,7 +305,7 @@ func getZonalPlacementRecommendations( if len(candidateRPMoRefs) == 1 { // If there is only one candidate, we might be able to skip some work. - if needsHost { + if needHostPlacement || needDatastorePlacement { // This is a hack until PlaceVmsXCluster() supports instance storage disks. vmCtx.Logger.Info("Falling back into non-zonal placement since the only candidate needs host selected", "rpMoID", candidateRPMoRefs[0].Value) @@ -297,7 +320,14 @@ func getZonalPlacementRecommendations( } else { var err error - recs, err = ClusterPlaceVMForCreate(vmCtx, vcClient, candidateRPMoRefs, configSpec, needsHost) + recs, err = ClusterPlaceVMForCreate( + vmCtx, + vcClient, + finder, + candidateRPMoRefs, + configSpec, + needHostPlacement, + needDatastorePlacement) if err != nil { vmCtx.Logger.Error(err, "PlaceVmsXCluster failed") return nil @@ -338,15 +368,25 @@ func Placement( vmCtx pkgctx.VirtualMachineContext, client ctrlclient.Client, vcClient *vim25.Client, + finder *find.Finder, configSpec vimtypes.VirtualMachineConfigSpec, constraints Constraints) (*Result, error) { - existingRes, zonePlacement, instanceStoragePlacement := doesVMNeedPlacement(vmCtx) - if !zonePlacement && !instanceStoragePlacement { - return &existingRes, nil + curResult := doesVMNeedPlacement(vmCtx) + if !curResult.needZonePlacement && + !curResult.needHostPlacement && + !curResult.needDatastorePlacement { + + // VM does not require any type of placement, so we can return early. + return &curResult, nil } - candidates, err := getPlacementCandidates(vmCtx, client, vcClient, zonePlacement, constraints.ChildRPName) + candidates, err := getPlacementCandidates( + vmCtx, + client, + vcClient, + curResult.needZonePlacement, + constraints.ChildRPName) if err != nil { return nil, err } @@ -383,12 +423,17 @@ func Placement( } // TBD: May want to get the host for vGPU and other passthru devices too. - needsHost := instanceStoragePlacement - var recommendations map[string][]Recommendation - if zonePlacement { - recommendations = getZonalPlacementRecommendations(vmCtx, vcClient, candidates, configSpec, needsHost) - } else /* instanceStoragePlacement */ { + if curResult.needZonePlacement { + recommendations = getZonalPlacementRecommendations( + vmCtx, + vcClient, + finder, + candidates, + configSpec, + curResult.needHostPlacement, + curResult.needDatastorePlacement) + } else /* needHostPlacement or needDatastorePlacement */ { recommendations = getPlacementRecommendations(vmCtx, vcClient, candidates, configSpec) } if len(recommendations) == 0 { @@ -396,15 +441,109 @@ func Placement( } zoneName, rec := MakePlacementDecision(recommendations) - vmCtx.Logger.V(5).Info("Placement decision result", "zone", zoneName, "recommendation", rec) + vmCtx.Logger.V(5).Info("Placement recommendation", "zone", zoneName, "recommendation", rec) + + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + // Get the name and type of the datastores. + if err := getDatastoreNameAndType(vmCtx, vcClient, &rec); err != nil { + return nil, err + } + } - result := &Result{ - ZonePlacement: zonePlacement, - InstanceStoragePlacement: instanceStoragePlacement, + result := Result{ + ZonePlacement: curResult.needZonePlacement, + InstanceStoragePlacement: curResult.needHostPlacement, ZoneName: zoneName, PoolMoRef: rec.PoolMoRef, HostMoRef: rec.HostMoRef, + Datastores: rec.Datastores, + } + + vmCtx.Logger.V(4).Info("Placement result", "result", result) + + return &result, nil +} + +func getDatastoreNameAndType( + vmCtx pkgctx.VirtualMachineContext, + vcClient *vim25.Client, + rec *Recommendation) error { + + var objSet []vimtypes.ObjectSpec + for i := range rec.Datastores { + d := rec.Datastores[i] + if d.Name == "" { + objSet = append(objSet, vimtypes.ObjectSpec{ + Obj: d.MoRef, + }) + } + } + + if len(objSet) == 0 { + return nil + } + + pc := property.DefaultCollector(vcClient) + res, err := pc.RetrieveProperties(vmCtx, vimtypes.RetrieveProperties{ + SpecSet: []vimtypes.PropertyFilterSpec{ + { + PropSet: []vimtypes.PropertySpec{ + { + Type: "Datastore", + PathSet: []string{ + "capability.topLevelDirectoryCreateSupported", + "info.url", + "name", + }, + }, + }, + ObjectSet: objSet, + }, + }, + }) + if err != nil { + return fmt.Errorf("failed to get datastore names: %w", err) + } + + for i := range res.Returnval { + r := res.Returnval[i] + for j := range rec.Datastores { + if r.Obj == rec.Datastores[j].MoRef { + for k := range r.PropSet { + p := r.PropSet[k] + switch p.Name { + case "capability.topLevelDirectoryCreateSupported": + switch tVal := p.Val.(type) { + case bool: + rec.Datastores[j].TopLevelDirectoryCreateSupported = tVal + default: + return fmt.Errorf( + "datastore %[1]s is not bool: %[2]T, %+[2]v", + p.Name, p.Val) + } + case "info.url": + switch tVal := p.Val.(type) { + case string: + rec.Datastores[j].URL = tVal + default: + return fmt.Errorf( + "datastore %[1]s is not string: %[2]T, %+[2]v", + p.Name, p.Val) + } + case "name": + switch tVal := p.Val.(type) { + case string: + rec.Datastores[j].Name = tVal + default: + return fmt.Errorf( + "datastore %[1]s is not string: %[2]T, %+[2]v", + p.Name, p.Val) + } + } + } + } + } } - return result, nil + return nil } diff --git a/pkg/providers/vsphere/placement/zone_placement_test.go b/pkg/providers/vsphere/placement/zone_placement_test.go index 53b40ac80..fa1cd8ddd 100644 --- a/pkg/providers/vsphere/placement/zone_placement_test.go +++ b/pkg/providers/vsphere/placement/zone_placement_test.go @@ -4,6 +4,8 @@ package placement_test import ( + "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -14,10 +16,12 @@ import ( vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/placement" "github.com/vmware-tanzu/vm-operator/pkg/topology" + "github.com/vmware-tanzu/vm-operator/pkg/util/ptr" "github.com/vmware-tanzu/vm-operator/test/builder" ) @@ -69,6 +73,7 @@ func vcSimPlacement() { var ( initObjects []client.Object + parentCtx context.Context ctx *builder.TestContextForVCSim nsInfo builder.WorkloadNamespaceInfo testConfig builder.VCSimTestConfig @@ -80,6 +85,7 @@ func vcSimPlacement() { ) BeforeEach(func() { + parentCtx = pkgcfg.NewContext() testConfig = builder.VCSimTestConfig{} vm = builder.DummyVirtualMachine() @@ -88,13 +94,35 @@ func vcSimPlacement() { // Other than the name ConfigSpec contents don't matter for vcsim. configSpec = vimtypes.VirtualMachineConfigSpec{ Name: vm.Name, + + // Add a disk to prompt a datastore assignment. + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: &vimtypes.VirtualDisk{ + CapacityInBytes: 1024 * 1024, + VirtualDevice: vimtypes.VirtualDevice{ + Key: -42, + UnitNumber: ptr.To[int32](0), + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + ThinProvisioned: ptr.To(true), + }, + }, + }, + }, + }, } constraints = placement.Constraints{} }) JustBeforeEach(func() { - ctx = suite.NewTestContextForVCSim(testConfig, initObjects...) + ctx = suite.NewTestContextForVCSimWithParentContext( + parentCtx, + testConfig, + initObjects...) + nsInfo = ctx.CreateWorkloadNamespace() vm.Namespace = nsInfo.Namespace @@ -139,7 +167,7 @@ func vcSimPlacement() { }) It("returns success with same zone", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result).ToNot(BeNil()) Expect(result.ZonePlacement).To(BeTrue()) @@ -154,7 +182,7 @@ func vcSimPlacement() { It("returns success even if assigned zone is being deleted", func() { Expect(ctx.Client.Delete(ctx, zone)).To(Succeed()) - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).To(BeNil()) Expect(result).NotTo(BeNil()) }) @@ -166,14 +194,14 @@ func vcSimPlacement() { }) It("returns an error", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).To(MatchError("no zones in specified namespace")) Expect(result).To(BeNil()) }) }) It("no zone assigned, returns success", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -192,7 +220,7 @@ func vcSimPlacement() { }) It("returns success", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -211,7 +239,7 @@ func vcSimPlacement() { zone.Finalizers = []string{"test"} Expect(ctx.Client.Update(ctx, zone)).To(Succeed()) Expect(ctx.Client.Delete(ctx, zone)).To(Succeed()) - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).To(MatchError("no placement candidates available")) Expect(result).To(BeNil()) }) @@ -228,7 +256,7 @@ func vcSimPlacement() { } constraints.ChildRPName = childRPName - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -244,7 +272,7 @@ func vcSimPlacement() { Context("Only allowed zone does not exist", func() { It("returns error", func() { constraints.Zones = sets.New("bogus-zone") - _, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + _, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).To(MatchError("no placement candidates available after applying zone constraints: bogus-zone")) }) }) @@ -252,7 +280,7 @@ func vcSimPlacement() { Context("Allowed zone exists", func() { It("returns success", func() { constraints.Zones = sets.New(ctx.ZoneNames[0]) - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZoneName).To(Equal(ctx.ZoneNames[0])) }) @@ -276,7 +304,7 @@ func vcSimPlacement() { }) It("returns success with same host", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.InstanceStoragePlacement).To(BeTrue()) @@ -286,7 +314,7 @@ func vcSimPlacement() { }) It("returns success", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -312,7 +340,7 @@ func vcSimPlacement() { } constraints.ChildRPName = childRPName - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -349,7 +377,7 @@ func vcSimPlacement() { }) It("returns success with same zone", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result).ToNot(BeNil()) Expect(result.ZonePlacement).To(BeTrue()) @@ -369,14 +397,14 @@ func vcSimPlacement() { }) It("returns an error", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).To(MatchError("no placement candidates available")) Expect(result).To(BeNil()) }) }) It("returns success", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -395,7 +423,7 @@ func vcSimPlacement() { }) It("returns success", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -420,7 +448,7 @@ func vcSimPlacement() { } constraints.ChildRPName = childRPName - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -437,7 +465,7 @@ func vcSimPlacement() { Context("Only allowed zone does not exist", func() { It("returns error", func() { constraints.Zones = sets.New("bogus-zone") - _, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + _, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).To(MatchError("no placement candidates available after applying zone constraints: bogus-zone")) }) }) @@ -445,7 +473,7 @@ func vcSimPlacement() { Context("Allowed zone exists", func() { It("returns success", func() { constraints.Zones = sets.New(ctx.ZoneNames[0]) - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZoneName).To(Equal(ctx.ZoneNames[0])) }) @@ -469,7 +497,7 @@ func vcSimPlacement() { }) It("returns success with same host", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.InstanceStoragePlacement).To(BeTrue()) @@ -479,7 +507,7 @@ func vcSimPlacement() { }) It("returns success", func() { - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -505,7 +533,7 @@ func vcSimPlacement() { } constraints.ChildRPName = childRPName - result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, configSpec, constraints) + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) Expect(err).ToNot(HaveOccurred()) Expect(result.ZonePlacement).To(BeTrue()) @@ -522,4 +550,95 @@ func vcSimPlacement() { }) }) }) + + Describe("When FSS_WCP_VMSERVICE_FAST_DEPLOY enabled", func() { + BeforeEach(func() { + pkgcfg.SetContext(parentCtx, func(config *pkgcfg.Config) { + config.Features.FastDeploy = true + }) + }) + + It("returns success", func() { + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) + Expect(err).ToNot(HaveOccurred()) + + Expect(result.ZonePlacement).To(BeTrue()) + Expect(result.ZoneName).To(BeElementOf(ctx.ZoneNames)) + Expect(result.PoolMoRef).ToNot(BeZero()) + Expect(result.HostMoRef).To(BeNil()) + Expect(result.Datastores).ToNot(BeEmpty()) + Expect(result.Datastores[0].ForDisk).To(BeFalse()) + Expect(result.Datastores[0].DiskKey).To(BeZero()) + Expect(result.Datastores[0].Name).ToNot(BeEmpty()) + Expect(result.Datastores[0].MoRef).ToNot(BeZero()) + Expect(result.Datastores[0].URL).ToNot(BeZero()) + Expect(result.Datastores[0].TopLevelDirectoryCreateSupported).To(BeTrue()) + Expect(result.Datastores[1].ForDisk).To(BeTrue()) + Expect(result.Datastores[1].DiskKey).ToNot(BeZero()) + Expect(result.Datastores[1].Name).ToNot(BeEmpty()) + Expect(result.Datastores[1].MoRef).ToNot(BeZero()) + Expect(result.Datastores[1].URL).ToNot(BeZero()) + Expect(result.Datastores[1].TopLevelDirectoryCreateSupported).To(BeTrue()) + }) + + Context("Only one zone exists", func() { + BeforeEach(func() { + testConfig.NumFaultDomains = 1 + }) + + It("returns success", func() { + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) + Expect(err).ToNot(HaveOccurred()) + + Expect(result.ZonePlacement).To(BeTrue()) + Expect(result.ZoneName).To(BeElementOf(ctx.ZoneNames)) + Expect(result.PoolMoRef).ToNot(BeZero()) + Expect(result.HostMoRef).ToNot(BeNil()) + Expect(result.Datastores).ToNot(BeEmpty()) + Expect(result.Datastores[0].ForDisk).To(BeFalse()) + Expect(result.Datastores[0].DiskKey).To(BeZero()) + Expect(result.Datastores[0].Name).ToNot(BeEmpty()) + Expect(result.Datastores[0].MoRef).ToNot(BeZero()) + Expect(result.Datastores[0].URL).ToNot(BeZero()) + Expect(result.Datastores[0].TopLevelDirectoryCreateSupported).To(BeTrue()) + }) + }) + }) + + // TODO(akutz): Delete when FSS_WCP_VMSERVICE_FAST_DEPLOY is enabled. + Describe("When FSS_WCP_VMSERVICE_FAST_DEPLOY disabled", func() { + BeforeEach(func() { + pkgcfg.SetContext(parentCtx, func(config *pkgcfg.Config) { + config.Features.FastDeploy = false + }) + }) + + It("returns success", func() { + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) + Expect(err).ToNot(HaveOccurred()) + + Expect(result.ZonePlacement).To(BeTrue()) + Expect(result.ZoneName).To(BeElementOf(ctx.ZoneNames)) + Expect(result.PoolMoRef).ToNot(BeZero()) + Expect(result.HostMoRef).To(BeNil()) + Expect(result.Datastores).To(BeEmpty()) + }) + + Context("Only one zone exists", func() { + BeforeEach(func() { + testConfig.NumFaultDomains = 1 + }) + + It("returns success", func() { + result, err := placement.Placement(vmCtx, ctx.Client, ctx.VCClient.Client, ctx.Finder, configSpec, constraints) + Expect(err).ToNot(HaveOccurred()) + + Expect(result.ZonePlacement).To(BeTrue()) + Expect(result.ZoneName).To(BeElementOf(ctx.ZoneNames)) + Expect(result.PoolMoRef).ToNot(BeZero()) + Expect(result.HostMoRef).To(BeNil()) + Expect(result.Datastores).To(BeEmpty()) + }) + }) + }) } diff --git a/pkg/providers/vsphere/virtualmachine/configspec.go b/pkg/providers/vsphere/virtualmachine/configspec.go index bf1093763..67bc53acb 100644 --- a/pkg/providers/vsphere/virtualmachine/configspec.go +++ b/pkg/providers/vsphere/virtualmachine/configspec.go @@ -186,27 +186,29 @@ func CreateConfigSpecForPlacement( configSpec.DeviceChange = deviceChangeCopy - // Add a dummy disk for placement: PlaceVmsXCluster expects there to always be at least one disk. - // Until we're in a position to have the OVF envelope here, add a dummy disk satisfy it. - configSpec.DeviceChange = append(configSpec.DeviceChange, &vimtypes.VirtualDeviceConfigSpec{ - Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, - FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, - Device: &vimtypes.VirtualDisk{ - CapacityInBytes: 1024 * 1024, - VirtualDevice: vimtypes.VirtualDevice{ - Key: -42, - UnitNumber: ptr.To[int32](0), - Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ - ThinProvisioned: ptr.To(true), + if !pkgcfg.FromContext(vmCtx).Features.FastDeploy { + // Add a dummy disk for placement: PlaceVmsXCluster expects there to always be at least one disk. + // Until we're in a position to have the OVF envelope here, add a dummy disk satisfy it. + configSpec.DeviceChange = append(configSpec.DeviceChange, &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: &vimtypes.VirtualDisk{ + CapacityInBytes: 1024 * 1024, + VirtualDevice: vimtypes.VirtualDevice{ + Key: -42, + UnitNumber: ptr.To[int32](0), + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + ThinProvisioned: ptr.To(true), + }, }, }, - }, - Profile: []vimtypes.BaseVirtualMachineProfileSpec{ - &vimtypes.VirtualMachineDefinedProfileSpec{ - ProfileId: storageClassesToIDs[vmCtx.VM.Spec.StorageClass], + Profile: []vimtypes.BaseVirtualMachineProfileSpec{ + &vimtypes.VirtualMachineDefinedProfileSpec{ + ProfileId: storageClassesToIDs[vmCtx.VM.Spec.StorageClass], + }, }, - }, - }) + }) + } if pkgcfg.FromContext(vmCtx).Features.InstanceStorage { isVolumes := vmopv1util.FilterInstanceStorageVolumes(vmCtx.VM) diff --git a/pkg/providers/vsphere/vmlifecycle/create.go b/pkg/providers/vsphere/vmlifecycle/create.go index 431f45fd2..2532f929c 100644 --- a/pkg/providers/vsphere/vmlifecycle/create.go +++ b/pkg/providers/vsphere/vmlifecycle/create.go @@ -5,9 +5,11 @@ package vmlifecycle import ( "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vapi/rest" "github.com/vmware/govmomi/vim25" vimtypes "github.com/vmware/govmomi/vim25/types" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" ) @@ -24,17 +26,39 @@ type CreateArgs struct { HostMoID string StorageProfileID string DatastoreMoID string // gce2e only: used only if StorageProfileID is unset + Datastores []DatastoreRef + ZoneName string +} + +type DatastoreRef struct { + Name string + MoRef vimtypes.ManagedObjectReference + URL string + TopLevelDirectoryCreateSupported bool + + // ForDisk is false if the recommendation is for the VM's home directory and + // true if for a disk. DiskKey is only valid if ForDisk is true. + ForDisk bool + DiskKey int32 } func CreateVirtualMachine( vmCtx pkgctx.VirtualMachineContext, + k8sClient ctrlclient.Client, restClient *rest.Client, vimClient *vim25.Client, finder *find.Finder, + datacenter *object.Datacenter, createArgs *CreateArgs) (*vimtypes.ManagedObjectReference, error) { if createArgs.UseContentLibrary { - return deployFromContentLibrary(vmCtx, restClient, vimClient, createArgs) + return deployFromContentLibrary( + vmCtx, + k8sClient, + restClient, + vimClient, + datacenter, + createArgs) } return cloneVMFromInventory(vmCtx, finder, createArgs) diff --git a/pkg/providers/vsphere/vmlifecycle/create_contentlibrary.go b/pkg/providers/vsphere/vmlifecycle/create_contentlibrary.go index bc92824dc..462c21ac7 100644 --- a/pkg/providers/vsphere/vmlifecycle/create_contentlibrary.go +++ b/pkg/providers/vsphere/vmlifecycle/create_contentlibrary.go @@ -13,13 +13,17 @@ import ( "github.com/vmware/govmomi/vapi/vcenter" "github.com/vmware/govmomi/vim25" vimtypes "github.com/vmware/govmomi/vim25/types" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + pkgcfg "github.com/vmware-tanzu/vm-operator/pkg/config" pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/constants" "github.com/vmware-tanzu/vm-operator/pkg/providers/vsphere/contentlibrary" "github.com/vmware-tanzu/vm-operator/pkg/util" ) +var _ = deployOVF + func deployOVF( vmCtx pkgctx.VirtualMachineContext, restClient *rest.Client, @@ -112,8 +116,10 @@ func deployVMTX( func deployFromContentLibrary( vmCtx pkgctx.VirtualMachineContext, + k8sClient ctrlclient.Client, restClient *rest.Client, vimClient *vim25.Client, + datacenter *object.Datacenter, createArgs *CreateArgs) (*vimtypes.ManagedObjectReference, error) { // This call is needed to get the item type. We could avoid going to CL here, and @@ -126,6 +132,16 @@ func deployFromContentLibrary( switch item.Type { case library.ItemTypeOVF: + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + return linkedCloneOVF( + vmCtx, + k8sClient, + vimClient, + restClient, + datacenter, + item, + createArgs) + } return deployOVF(vmCtx, restClient, item, createArgs) case library.ItemTypeVMTX: return deployVMTX(vmCtx, restClient, item, createArgs) diff --git a/pkg/providers/vsphere/vmlifecycle/create_contentlibrary_linked_clone.go b/pkg/providers/vsphere/vmlifecycle/create_contentlibrary_linked_clone.go new file mode 100644 index 000000000..10fff0ce1 --- /dev/null +++ b/pkg/providers/vsphere/vmlifecycle/create_contentlibrary_linked_clone.go @@ -0,0 +1,202 @@ +// Copyright (c) 2024 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package vmlifecycle + +import ( + "errors" + "fmt" + "path" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vapi/library" + "github.com/vmware/govmomi/vapi/rest" + "github.com/vmware/govmomi/vim25" + vimtypes "github.com/vmware/govmomi/vim25/types" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + pkgctx "github.com/vmware-tanzu/vm-operator/pkg/context" + "github.com/vmware-tanzu/vm-operator/pkg/util/ptr" + vmopv1util "github.com/vmware-tanzu/vm-operator/pkg/util/vmopv1" + clsutil "github.com/vmware-tanzu/vm-operator/pkg/util/vsphere/contentlibrary" +) + +func linkedCloneOVF( + vmCtx pkgctx.VirtualMachineContext, + k8sClient ctrlclient.Client, + vimClient *vim25.Client, + restClient *rest.Client, + datacenter *object.Datacenter, + item *library.Item, + createArgs *CreateArgs) (*vimtypes.ManagedObjectReference, error) { + + logger := vmCtx.Logger.WithName("linkedCloneOVF") + + if len(createArgs.Datastores) == 0 { + return nil, errors.New("no compatible datastores") + } + + // Get the information required to do the linked clone. + imgInfo, err := getImageLinkedCloneInfo( + vmCtx, + k8sClient, + restClient, + item) + if err != nil { + return nil, err + } + + topLevelCacheDir, err := clsutil.GetTopLevelCacheDir( + vmCtx, + vimClient, + datacenter, + createArgs.Datastores[0].MoRef, + createArgs.Datastores[0].Name, + createArgs.Datastores[0].URL, + createArgs.Datastores[0].TopLevelDirectoryCreateSupported) + if err != nil { + return nil, fmt.Errorf("failed to create top-level cache dir: %w", err) + } + logger.Info("Got top-level cache dir", "topLevelCacheDir", topLevelCacheDir) + + dstDir := clsutil.GetCacheDirForLibraryItem( + topLevelCacheDir, + imgInfo.ItemID, + imgInfo.ItemContentVersion) + logger.Info("Got item cache dir", "dstDir", dstDir) + + dstURIs, err := clsutil.CacheStorageURIs( + vmCtx, + vimClient, + datacenter.Reference(), + datacenter.Reference(), + dstDir, + imgInfo.DiskURIs...) + if err != nil { + return nil, fmt.Errorf("failed to cache library item disks: %w", err) + } + logger.Info("Got parent disks", "dstURIs", dstURIs) + + vmDir := path.Dir(createArgs.ConfigSpec.Files.VmPathName) + logger.Info("Got vm dir", "vmDir", vmDir) + + // Update the ConfigSpec with the disk chains. + var disks []*vimtypes.VirtualDisk + for i := range createArgs.ConfigSpec.DeviceChange { + dc := createArgs.ConfigSpec.DeviceChange[i].GetVirtualDeviceConfigSpec() + if d, ok := dc.Device.(*vimtypes.VirtualDisk); ok { + disks = append(disks, d) + + // The profile is no longer needed since we have placement. + dc.Profile = nil + } + } + logger.Info("Got disks", "disks", disks) + + if a, b := len(dstURIs), len(disks); a != b { + return nil, fmt.Errorf( + "invalid disk count: len(uris)=%d, len(disks)=%d", a, b) + } + + for i := range disks { + d := disks[i] + if bfb, ok := d.Backing.(vimtypes.BaseVirtualDeviceFileBackingInfo); ok { + fb := bfb.GetVirtualDeviceFileBackingInfo() + fb.Datastore = &createArgs.Datastores[0].MoRef + fb.FileName = fmt.Sprintf("%s/%s-%d.vmdk", vmDir, vmCtx.VM.Name, i) + } + if fb, ok := d.Backing.(*vimtypes.VirtualDiskFlatVer2BackingInfo); ok { + fb.Parent = &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{ + Datastore: &createArgs.Datastores[0].MoRef, + FileName: dstURIs[i], + }, + DiskMode: string(vimtypes.VirtualDiskModePersistent), + ThinProvisioned: ptr.To(true), + } + } + } + + // The profile is no longer needed since we have placement. + createArgs.ConfigSpec.VmProfile = nil + + vmCtx.Logger.Info( + "Deploying OVF Library Item as linked clone", + "itemID", item.ID, + "itemName", item.Name, + "configSpec", createArgs.ConfigSpec) + + folder := object.NewFolder( + vimClient, + vimtypes.ManagedObjectReference{ + Type: "Folder", + Value: createArgs.FolderMoID, + }) + pool := object.NewResourcePool( + vimClient, + vimtypes.ManagedObjectReference{ + Type: "ResourcePool", + Value: createArgs.ResourcePoolMoID, + }) + + createTask, err := folder.CreateVM( + vmCtx, + createArgs.ConfigSpec, + pool, + nil) + if err != nil { + return nil, fmt.Errorf("failed to call create task: %w", err) + } + + createTaskInfo, err := createTask.WaitForResult(vmCtx) + if err != nil { + return nil, fmt.Errorf("failed to wait for create task: %w", err) + } + + vmRef, ok := createTaskInfo.Result.(vimtypes.ManagedObjectReference) + if !ok { + return nil, fmt.Errorf( + "failed to assert create task result is ref: %[1]T %+[1]v", + createTaskInfo.Result) + } + + return &vmRef, nil +} + +func getImageLinkedCloneInfo( + vmCtx pkgctx.VirtualMachineContext, + k8sClient ctrlclient.Client, + restClient *rest.Client, + item *library.Item) (vmopv1util.ImageLinkedCloneInfo, error) { + + imgInfo, err := vmopv1util.GetImageLinkedCloneInfo( + vmCtx, + k8sClient, + *vmCtx.VM.Spec.Image, + vmCtx.VM.Namespace) + + if err != nil { + if err != vmopv1util.ErrImageNotSynced { + return vmopv1util.ImageLinkedCloneInfo{}, + fmt.Errorf("failed to get image linked clone info: %w", err) + } + if err := clsutil.SyncLibraryItem( + vmCtx, + restClient, + item.ID); err != nil { + + return vmopv1util.ImageLinkedCloneInfo{}, + fmt.Errorf("failed to sync library item: %w", err) + } + if imgInfo, err = vmopv1util.GetImageLinkedCloneInfo( + vmCtx, + k8sClient, + *vmCtx.VM.Spec.Image, + vmCtx.VM.Namespace); err != nil { + + return vmopv1util.ImageLinkedCloneInfo{}, + fmt.Errorf("failed to get image linked clone info after syncing library item: %w", err) + } + } + return imgInfo, nil +} diff --git a/pkg/providers/vsphere/vmprovider_vm.go b/pkg/providers/vsphere/vmprovider_vm.go index 8cfc497e7..ef809162c 100644 --- a/pkg/providers/vsphere/vmprovider_vm.go +++ b/pkg/providers/vsphere/vmprovider_vm.go @@ -5,6 +5,7 @@ package vsphere import ( "context" + "errors" "fmt" "maps" "math/rand" @@ -47,6 +48,7 @@ import ( pkgutil "github.com/vmware-tanzu/vm-operator/pkg/util" "github.com/vmware-tanzu/vm-operator/pkg/util/annotations" kubeutil "github.com/vmware-tanzu/vm-operator/pkg/util/kube" + "github.com/vmware-tanzu/vm-operator/pkg/util/ovfcache" vmopv1util "github.com/vmware-tanzu/vm-operator/pkg/util/vmopv1" "github.com/vmware-tanzu/vm-operator/pkg/vmconfig" ) @@ -153,11 +155,6 @@ func (vs *vSphereVMProvider) createOrUpdateVirtualMachine( // Mark that this is a create operation. ctxop.MarkCreate(vmCtx) - createArgs, err := vs.getCreateArgs(vmCtx, client) - if err != nil { - return err - } - // Do not allow more than N create threads/goroutines. // // - In blocking create mode, this ensures there are reconciler threads @@ -165,16 +162,24 @@ func (vs *vSphereVMProvider) createOrUpdateVirtualMachine( // // - In non-blocking create mode, this ensures the number of goroutines // spawned to create VMs does not take up too much memory. - allowed, createDeferFn := vs.vmCreateConcurrentAllowed(vmCtx) + allowed, decrementConcurrentCreatesFn := vs.vmCreateConcurrentAllowed(vmCtx) if !allowed { return providers.ErrTooManyCreates } - if chanErr == nil { + // cleanupFn tracks the function(s) that must be invoked upon leaving this + // function during a blocking create or after an async create. + cleanupFn := decrementConcurrentCreatesFn + if chanErr == nil { vmCtx.Logger.V(4).Info("Doing a blocking create") - defer createDeferFn() + defer cleanupFn() + + createArgs, err := vs.getCreateArgs(vmCtx, client) + if err != nil { + return err + } newVM, err := vs.createVirtualMachine(vmCtx, client, createArgs) if err != nil { @@ -192,21 +197,41 @@ func (vs *vSphereVMProvider) createOrUpdateVirtualMachine( } } + // Update the cleanup function to include closing the error channel. + cleanupFn = func() { + close(chanErr) + decrementConcurrentCreatesFn() + } + if _, ok := currentlyCreating.LoadOrStore( vm.NamespacedName(), struct{}{}); ok { // If the VM is already being created in a goroutine, then there is no // need to create it again. - // - // However, we need to make sure we decrement the number of concurrent - // creates before returning. - createDeferFn() + cleanupFn() return providers.ErrDuplicateCreate } vmCtx.Logger.V(4).Info("Doing a non-blocking create") + // Update the cleanup function to include indicating a concurrent create is + // no longer occurring. + cleanupFn = func() { + currentlyCreating.Delete(vmCtx.VM.NamespacedName()) + close(chanErr) + decrementConcurrentCreatesFn() + } + + // When doing async create, get the createArgs *after* we are guarded + // against duplicate creates. + createArgs, err := vs.getCreateArgs(vmCtx, client) + if err != nil { + // Before we return the error, we need to make sure we cleanup. + cleanupFn() + return err + } + // Create a copy of the context and replace its VM with a copy to // ensure modifications in the goroutine below are not impacted or // impact the operations above us in the call stack. @@ -219,7 +244,7 @@ func (vs *vSphereVMProvider) createOrUpdateVirtualMachine( client, createArgs, chanErr, - createDeferFn) + cleanupFn) // Return with no error. The VM will be re-enqueued once the create // completes with success or failure. @@ -435,6 +460,7 @@ func (vs *vSphereVMProvider) vmCreatePathName( if len(vmCtx.VM.Spec.Cdrom) == 0 { return nil // only needed when deploying ISO library items } + if createArgs.StorageProfileID == "" { return nil } @@ -484,6 +510,33 @@ func (vs *vSphereVMProvider) vmCreatePathName( return nil } +func (vs *vSphereVMProvider) vmCreatePathNameFromDatastoreRecommendation( + vmCtx pkgctx.VirtualMachineContext, + createArgs *VMCreateArgs) error { + + if createArgs.ConfigSpec.Files == nil { + createArgs.ConfigSpec.Files = &vimtypes.VirtualMachineFileInfo{} + } + if createArgs.ConfigSpec.Files.VmPathName != "" { + return nil + } + if len(createArgs.Datastores) == 0 { + return errors.New("no compatible datastores") + } + + createArgs.ConfigSpec.Files.VmPathName = fmt.Sprintf( + "[%s] %s/%s.vmx", + createArgs.Datastores[0].Name, + vmCtx.VM.UID, + vmCtx.VM.Name) + + vmCtx.Logger.Info( + "vmCreatePathName", + "VmPathName", createArgs.ConfigSpec.Files.VmPathName) + + return nil +} + func (vs *vSphereVMProvider) getCreateArgs( vmCtx pkgctx.VirtualMachineContext, vcClient *vcclient.Client) (*VMCreateArgs, error) { @@ -501,8 +554,14 @@ func (vs *vSphereVMProvider) getCreateArgs( return nil, err } - if err := vs.vmCreatePathName(vmCtx, vcClient, createArgs); err != nil { - return nil, err + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + if err := vs.vmCreatePathNameFromDatastoreRecommendation(vmCtx, createArgs); err != nil { + return nil, err + } + } else { + if err := vs.vmCreatePathName(vmCtx, vcClient, createArgs); err != nil { + return nil, err + } } if err := vs.vmCreateFixupConfigSpec(vmCtx, vcClient, createArgs); err != nil { @@ -523,9 +582,11 @@ func (vs *vSphereVMProvider) createVirtualMachine( moRef, err := vmlifecycle.CreateVirtualMachine( ctx, + vs.k8sClient, vcClient.RestClient(), vcClient.VimClient(), vcClient.Finder(), + vcClient.Datacenter(), &args.CreateArgs) if err != nil { @@ -535,12 +596,30 @@ func (vs *vSphereVMProvider) createVirtualMachine( vmopv1.VirtualMachineConditionCreated, "Error", err.Error()) + + if pkgcfg.FromContext(ctx).Features.FastDeploy { + conditions.MarkFalse( + ctx.VM, + vmopv1.VirtualMachineConditionPlacementReady, + "Error", + err.Error()) + } + return nil, err } ctx.VM.Status.UniqueID = moRef.Reference().Value conditions.MarkTrue(ctx.VM, vmopv1.VirtualMachineConditionCreated) + if pkgcfg.FromContext(ctx).Features.FastDeploy { + if zoneName := args.ZoneName; zoneName != "" { + if ctx.VM.Labels == nil { + ctx.VM.Labels = map[string]string{} + } + ctx.VM.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName + } + } + return object.NewVirtualMachine(vcClient.VimClient(), *moRef), nil } @@ -549,19 +628,17 @@ func (vs *vSphereVMProvider) createVirtualMachineAsync( vcClient *vcclient.Client, args *VMCreateArgs, chanErr chan error, - createDeferFn func()) { + cleanupFn func()) { - defer func() { - close(chanErr) - createDeferFn() - currentlyCreating.Delete(ctx.VM.NamespacedName()) - }() + defer cleanupFn() moRef, vimErr := vmlifecycle.CreateVirtualMachine( ctx, + vs.k8sClient, vcClient.RestClient(), vcClient.VimClient(), vcClient.Finder(), + vcClient.Datacenter(), &args.CreateArgs) if vimErr != nil { @@ -581,7 +658,25 @@ func (vs *vSphereVMProvider) createVirtualMachineAsync( vmopv1.VirtualMachineConditionCreated, "Error", vimErr.Error()) - return nil //nolint:nilerr + + if pkgcfg.FromContext(ctx).Features.FastDeploy { + conditions.MarkFalse( + ctx.VM, + vmopv1.VirtualMachineConditionPlacementReady, + "Error", + vimErr.Error()) + } + + return nil + } + + if pkgcfg.FromContext(ctx).Features.FastDeploy { + if zoneName := args.ZoneName; zoneName != "" { + if ctx.VM.Labels == nil { + ctx.VM.Labels = map[string]string{} + } + ctx.VM.Labels[topology.KubernetesTopologyZoneLabelKey] = zoneName + } } ctx.VM.Status.UniqueID = moRef.Reference().Value @@ -755,6 +850,7 @@ func (vs *vSphereVMProvider) vmCreateDoPlacement( vmCtx, vs.k8sClient, vcClient.VimClient(), + vcClient.Finder(), placementConfigSpec, constraints) if err != nil { @@ -769,6 +865,18 @@ func (vs *vSphereVMProvider) vmCreateDoPlacement( createArgs.HostMoID = result.HostMoRef.Value } + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + createArgs.Datastores = make([]vmlifecycle.DatastoreRef, len(result.Datastores)) + for i := range result.Datastores { + createArgs.Datastores[i].DiskKey = result.Datastores[i].DiskKey + createArgs.Datastores[i].ForDisk = result.Datastores[i].ForDisk + createArgs.Datastores[i].MoRef = result.Datastores[i].MoRef + createArgs.Datastores[i].Name = result.Datastores[i].Name + createArgs.Datastores[i].URL = result.Datastores[i].URL + createArgs.Datastores[i].TopLevelDirectoryCreateSupported = result.Datastores[i].TopLevelDirectoryCreateSupported + } + } + if result.InstanceStoragePlacement { hostMoID := createArgs.HostMoID @@ -789,12 +897,16 @@ func (vs *vSphereVMProvider) vmCreateDoPlacement( } if result.ZonePlacement { - if vmCtx.VM.Labels == nil { - vmCtx.VM.Labels = map[string]string{} + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + createArgs.ZoneName = result.ZoneName + } else { + if vmCtx.VM.Labels == nil { + vmCtx.VM.Labels = map[string]string{} + } + // Note if the VM create fails for some reason, but this label gets updated on the k8s VM, + // then this is the pre-assigned zone on later create attempts. + vmCtx.VM.Labels[topology.KubernetesTopologyZoneLabelKey] = result.ZoneName } - // Note if the VM create fails for some reason, but this label gets updated on the k8s VM, - // then this is the pre-assigned zone on later create attempts. - vmCtx.VM.Labels[topology.KubernetesTopologyZoneLabelKey] = result.ZoneName } conditions.MarkTrue(vmCtx.VM, vmopv1.VirtualMachineConditionPlacementReady) @@ -1233,6 +1345,17 @@ func (vs *vSphereVMProvider) vmCreateGenConfigSpec( createArgs.ImageStatus, minCPUFreq) + if pkgcfg.FromContext(vmCtx).Features.FastDeploy { + if err := vs.vmCreateGenConfigSpecImage(vmCtx, createArgs); err != nil { + return err + } + createArgs.ConfigSpec.VmProfile = []vimtypes.BaseVirtualMachineProfileSpec{ + &vimtypes.VirtualMachineDefinedProfileSpec{ + ProfileId: createArgs.StorageProfileID, + }, + } + } + // Get the encryption class details for the VM. if pkgcfg.FromContext(vmCtx).Features.BringYourOwnEncryptionKey { for _, r := range vmconfig.FromContext(vmCtx) { @@ -1249,24 +1372,65 @@ func (vs *vSphereVMProvider) vmCreateGenConfigSpec( } } - err := vs.vmCreateGenConfigSpecExtraConfig(vmCtx, createArgs) - if err != nil { + if err := vs.vmCreateGenConfigSpecExtraConfig(vmCtx, createArgs); err != nil { return err } - err = vs.vmCreateGenConfigSpecChangeBootDiskSize(vmCtx, createArgs) - if err != nil { + if err := vs.vmCreateGenConfigSpecChangeBootDiskSize(vmCtx, createArgs); err != nil { return err } - err = vs.vmCreateGenConfigSpecZipNetworkInterfaces(vmCtx, createArgs) - if err != nil { + if err := vs.vmCreateGenConfigSpecZipNetworkInterfaces(vmCtx, createArgs); err != nil { return err } return nil } +func (vs *vSphereVMProvider) vmCreateGenConfigSpecImage( + vmCtx pkgctx.VirtualMachineContext, + createArgs *VMCreateArgs) error { + + if createArgs.ImageStatus.Type != "OVF" { + return nil + } + + if createArgs.ImageStatus.ProviderItemID == "" { + return errors.New("empty image provider item id") + } + if createArgs.ImageStatus.ProviderContentVersion == "" { + return errors.New("empty image provider content version") + } + + ovf, err := ovfcache.GetOVFEnvelope( + vmCtx, + createArgs.ImageStatus.ProviderItemID, + createArgs.ImageStatus.ProviderContentVersion) + if err != nil { + return fmt.Errorf("failed to get ovf from cache: %w", err) + } + + ovfConfigSpec, err := ovf.ToConfigSpec() + if err != nil { + return fmt.Errorf("failed to transform ovf to config spec: %w", err) + } + + if createArgs.ConfigSpec.GuestId == "" { + createArgs.ConfigSpec.GuestId = ovfConfigSpec.GuestId + } + + // Inherit the image's vAppConfig. + createArgs.ConfigSpec.VAppConfig = ovfConfigSpec.VAppConfig + + // Inherit the image's disks and their controllers. + pkgutil.CopyStorageControllersAndDisks( + &createArgs.ConfigSpec, + ovfConfigSpec, + createArgs.StorageProfileID) + + return nil +} + func (vs *vSphereVMProvider) vmCreateGenConfigSpecExtraConfig( vmCtx pkgctx.VirtualMachineContext, createArgs *VMCreateArgs) error { @@ -1497,6 +1661,7 @@ func (vs *vSphereVMProvider) vmResizeGetArgs( resizeArgs.VMClass.Spec, vmopv1.VirtualMachineImageStatus{}, minCPUFreq) + } return resizeArgs, nil diff --git a/pkg/util/configspec.go b/pkg/util/configspec.go index 056a4d80d..0ad112d7b 100644 --- a/pkg/util/configspec.go +++ b/pkg/util/configspec.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "reflect" + "regexp" "github.com/vmware/govmomi/vim25" vimtypes "github.com/vmware/govmomi/vim25/types" @@ -203,3 +204,86 @@ func SafeConfigSpecToString( return vimtypes.ToString(in) } + +var dsNameRX = regexp.MustCompile(`^\[([^\]].+)\].*$`) + +// DatastoreNameFromStorageURI returns the datastore name from a storage URI, +// ex.: [my-datastore-1] vm-name/vm-name.vmx. The previous URI would return the +// value "my-datastore-1". +// An empty string is returned if there is no match. +func DatastoreNameFromStorageURI(s string) string { + m := dsNameRX.FindStringSubmatch(s) + if len(m) == 0 { + return "" + } + return m[1] +} + +// CopyStorageControllersAndDisks copies the storage controllers and disks from +// the source spec to the destination. This function does not attempt to handle +// any conflicts -- it is a blind copy. If the provided storagePolicyID is +// non-empty, it is assigned to any all the copied disks. +func CopyStorageControllersAndDisks( + dst *vimtypes.VirtualMachineConfigSpec, + src vimtypes.VirtualMachineConfigSpec, + storagePolicyID string) { + + ctrlKeys := map[int32]struct{}{} + diskCtrlKeys := map[int32]struct{}{} + + for i := range src.DeviceChange { + srcSpec := src.DeviceChange[i].GetVirtualDeviceConfigSpec() + if srcSpec.Operation == vimtypes.VirtualDeviceConfigSpecOperationAdd { + + var dstSpec *vimtypes.VirtualDeviceConfigSpec + + switch srcDev := srcSpec.Device.(type) { + case vimtypes.BaseVirtualSCSIController, + vimtypes.BaseVirtualSATAController, + *vimtypes.VirtualIDEController, + *vimtypes.VirtualNVMEController: + + ctrlKeys[srcDev.GetVirtualDevice().Key] = struct{}{} + + dstSpec = &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: srcDev, + } + + case *vimtypes.VirtualDisk: + + diskCtrlKeys[srcDev.ControllerKey] = struct{}{} + + dstSpec = &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: srcDev, + } + if storagePolicyID != "" { + dstSpec.Profile = []vimtypes.BaseVirtualMachineProfileSpec{ + &vimtypes.VirtualMachineDefinedProfileSpec{ + ProfileId: storagePolicyID, + }, + } + } + } + + if dstSpec != nil { + dst.DeviceChange = append(dst.DeviceChange, dstSpec) + } + } + } + + // Remove any controllers that came from the OVF but are not used by disks. + RemoveDevicesFromConfigSpec(dst, func(bvd vimtypes.BaseVirtualDevice) bool { + if bvc, ok := bvd.(vimtypes.BaseVirtualController); ok { + vc := bvc.GetVirtualController() + if _, ok := ctrlKeys[vc.Key]; ok { + if _, ok := diskCtrlKeys[vc.Key]; !ok { + return true + } + } + } + return false + }) +} diff --git a/pkg/util/configspec_test.go b/pkg/util/configspec_test.go index c00cecfb0..b68b52ac9 100644 --- a/pkg/util/configspec_test.go +++ b/pkg/util/configspec_test.go @@ -903,3 +903,398 @@ var _ = DescribeTable( `{"_typeName":"VirtualMachineConfigSpec","vAppConfig":null}`, ), ) + +var _ = DescribeTable( + "DatastoreNameFromStorageURI", + func(in, expected string) { + Expect(pkgutil.DatastoreNameFromStorageURI(in)).To(Equal(expected)) + }, + Entry( + "empty", + "", + "", + ), + Entry( + "empty", + "invalid", + "", + ), + Entry( + "just the datastore", + "[my-datastore-1]", + "my-datastore-1", + ), + Entry( + "a full path", + "[my-datastore-1] my-vm/my-vm.vmx", + "my-datastore-1", + ), +) + +var _ = DescribeTable( + "CopyStorageControllersAndDisks", + func( + src, dst vimtypes.VirtualMachineConfigSpec, + storagePolicy string, + expected vimtypes.VirtualMachineConfigSpec) { + + pkgutil.CopyStorageControllersAndDisks(&dst, src, storagePolicy) + + Expect(reflect.DeepEqual(expected, dst)).To(BeTrue(), cmp.Diff(expected, dst)) + }, + Entry( + "empty", + vimtypes.VirtualMachineConfigSpec{}, + vimtypes.VirtualMachineConfigSpec{}, + "", + vimtypes.VirtualMachineConfigSpec{}, + ), + Entry( + "src is empty", + vimtypes.VirtualMachineConfigSpec{}, + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + }, + }, + "", + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + }, + }, + ), + Entry( + "src has a disk with no controller", + vimtypes.VirtualMachineConfigSpec{ + Name: "hello", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + ControllerKey: -100, + Key: -200, + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + DiskMode: string(vimtypes.VirtualDiskModePersistent), + ThinProvisioned: ptr.To(true), + }, + }, + CapacityInBytes: 10 * 1024 * 1024 * 1024, + }, + }, + }, + }, + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + }, + }, + "fake-storage-policy", + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + ControllerKey: -100, + Key: -200, + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + DiskMode: string(vimtypes.VirtualDiskModePersistent), + ThinProvisioned: ptr.To(true), + }, + }, + CapacityInBytes: 10 * 1024 * 1024 * 1024, + }, + Profile: []vimtypes.BaseVirtualMachineProfileSpec{ + &vimtypes.VirtualMachineDefinedProfileSpec{ + ProfileId: "fake-storage-policy", + }, + }, + }, + }, + }, + ), + + Entry( + "src has a disk with controller", + vimtypes.VirtualMachineConfigSpec{ + Name: "hello", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualAHCIController{ + VirtualSATAController: vimtypes.VirtualSATAController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -100, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + ControllerKey: -100, + Key: -200, + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + DiskMode: string(vimtypes.VirtualDiskModePersistent), + ThinProvisioned: ptr.To(true), + }, + }, + CapacityInBytes: 10 * 1024 * 1024 * 1024, + }, + }, + }, + }, + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + }, + }, + "fake-storage-policy", + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualAHCIController{ + VirtualSATAController: vimtypes.VirtualSATAController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -100, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + FileOperation: vimtypes.VirtualDeviceConfigSpecFileOperationCreate, + Device: &vimtypes.VirtualDisk{ + VirtualDevice: vimtypes.VirtualDevice{ + ControllerKey: -100, + Key: -200, + Backing: &vimtypes.VirtualDiskFlatVer2BackingInfo{ + VirtualDeviceFileBackingInfo: vimtypes.VirtualDeviceFileBackingInfo{}, + DiskMode: string(vimtypes.VirtualDiskModePersistent), + ThinProvisioned: ptr.To(true), + }, + }, + CapacityInBytes: 10 * 1024 * 1024 * 1024, + }, + Profile: []vimtypes.BaseVirtualMachineProfileSpec{ + &vimtypes.VirtualMachineDefinedProfileSpec{ + ProfileId: "fake-storage-policy", + }, + }, + }, + }, + }, + ), + + Entry( + "all supported controllers", + vimtypes.VirtualMachineConfigSpec{ + Name: "hello", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualAHCIController{ + VirtualSATAController: vimtypes.VirtualSATAController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -100, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualBusLogicController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -101, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualLsiLogicController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -102, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualLsiLogicSASController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -103, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -104, + }, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualIDEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -105, + }, + }, + }, + }, + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.VirtualNVMEController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -106, + }, + }, + }, + }, + }, + }, + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + }, + }, + "fake-storage-policy", + vimtypes.VirtualMachineConfigSpec{ + Name: "world", + DeviceChange: []vimtypes.BaseVirtualDeviceConfigSpec{ + &vimtypes.VirtualDeviceConfigSpec{ + Operation: vimtypes.VirtualDeviceConfigSpecOperationAdd, + Device: &vimtypes.ParaVirtualSCSIController{ + VirtualSCSIController: vimtypes.VirtualSCSIController{ + VirtualController: vimtypes.VirtualController{ + VirtualDevice: vimtypes.VirtualDevice{ + Key: -1, + }, + }, + }, + }, + }, + }, + }, + ), +) diff --git a/pkg/util/vmopv1/image.go b/pkg/util/vmopv1/image.go new file mode 100644 index 000000000..59b9de398 --- /dev/null +++ b/pkg/util/vmopv1/image.go @@ -0,0 +1,206 @@ +// Copyright (c) 2024 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package vmopv1 + +import ( + "context" + "errors" + "fmt" + "path" + + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + imgregv1a1 "github.com/vmware-tanzu/image-registry-operator-api/api/v1alpha1" + vmopv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha3" + "github.com/vmware-tanzu/vm-operator/pkg/conditions" +) + +type ImageLinkedCloneInfo struct { + ItemID string + ItemContentVersion string + DiskURIs []string +} + +var ErrImageNotSynced = errors.New("image not synced") + +// GetImageLinkedCloneInfo returns the information about a VirtualMachineImage +// or ClusterVirtualMachineImage required to perform a linked clone operation +// using the image's disk(s). +func GetImageLinkedCloneInfo( + ctx context.Context, + k8sClient ctrlclient.Client, + imgRef vmopv1.VirtualMachineImageRef, + namespace string) (ImageLinkedCloneInfo, error) { + + img, err := GetImage(ctx, k8sClient, imgRef, namespace) + if err != nil { + return ImageLinkedCloneInfo{}, err + } + + if err := IsImageOVF(img); err != nil { + return ImageLinkedCloneInfo{}, err + } + if err := IsImageReady(img); err != nil { + return ImageLinkedCloneInfo{}, err + } + if err := IsImageProviderReady(img); err != nil { + return ImageLinkedCloneInfo{}, err + } + + item, err := GetContentLibraryItemForImage(ctx, k8sClient, img) + if err != nil { + return ImageLinkedCloneInfo{}, err + } + if err := IsLibraryItemSynced(item); err != nil { + return ImageLinkedCloneInfo{}, err + } + + diskURIs, err := GetStorageURIsForLibraryItemDisks(item) + if err != nil { + return ImageLinkedCloneInfo{}, err + } + + return ImageLinkedCloneInfo{ + DiskURIs: diskURIs, + ItemID: string(item.Spec.UUID), + ItemContentVersion: item.Status.ContentVersion, + }, nil +} + +// GetImage returns the VirtualMachineImage or ClusterVirtualMachineImage for +// the provided image reference. +func GetImage( + ctx context.Context, + k8sClient ctrlclient.Client, + imgRef vmopv1.VirtualMachineImageRef, + namespace string) (vmopv1.VirtualMachineImage, error) { + + var obj vmopv1.VirtualMachineImage + + switch imgRef.Kind { + case vmiKind: + // Namespace scope image. + if err := k8sClient.Get( + ctx, + ctrlclient.ObjectKey{ + Name: imgRef.Name, + Namespace: namespace, + }, + &obj); err != nil { + + return vmopv1.VirtualMachineImage{}, err + } + case cvmiKind: + // Cluster scope image. + var obj2 vmopv1.ClusterVirtualMachineImage + if err := k8sClient.Get( + ctx, + ctrlclient.ObjectKey{ + Name: imgRef.Name, + }, &obj2); err != nil { + + return vmopv1.VirtualMachineImage{}, err + } + obj = vmopv1.VirtualMachineImage(obj2) + default: + return vmopv1.VirtualMachineImage{}, + fmt.Errorf("unsupported image kind: %q", imgRef.Kind) + } + + return obj, nil +} + +func IsImageReady(img vmopv1.VirtualMachineImage) error { + if !conditions.IsTrue(&img, vmopv1.ReadyConditionType) { + return fmt.Errorf( + "image condition is not ready: %v", + conditions.Get(&img, vmopv1.ReadyConditionType)) + } + if img.Spec.ProviderRef == nil || img.Spec.ProviderRef.Name == "" { + return errors.New("image provider ref is empty") + } + return nil +} + +func IsImageOVF(img vmopv1.VirtualMachineImage) error { + if img.Status.Type != string(imgregv1a1.ContentLibraryItemTypeOvf) { + return fmt.Errorf( + "image type %q is not OVF", img.Status.Type) + } + return nil +} + +func IsImageProviderReady(img vmopv1.VirtualMachineImage) error { + if img.Spec.ProviderRef == nil { + return errors.New("image provider ref is empty") + } + if img.Spec.ProviderRef.Name == "" { + return errors.New("image provider ref name is empty") + } + return nil +} + +func IsLibraryItemSynced(item imgregv1a1.ContentLibraryItem) error { + if !item.Status.Cached || item.Status.SizeInBytes.Size() == 0 { + return ErrImageNotSynced + } + return nil +} + +func GetContentLibraryItemForImage( + ctx context.Context, + k8sClient ctrlclient.Client, + img vmopv1.VirtualMachineImage) (imgregv1a1.ContentLibraryItem, error) { + + var obj imgregv1a1.ContentLibraryItem + + if img.Namespace != "" { + // Namespace scope ContentLibraryItem. + if err := k8sClient.Get( + ctx, + ctrlclient.ObjectKey{ + Name: img.Spec.ProviderRef.Name, + Namespace: img.Namespace, + }, + &obj); err != nil { + + return imgregv1a1.ContentLibraryItem{}, err + } + } else { + // Cluster scope ClusterContentLibraryItem. + var obj2 imgregv1a1.ClusterContentLibraryItem + if err := k8sClient.Get( + ctx, + ctrlclient.ObjectKey{Name: img.Spec.ProviderRef.Name}, + &obj2); err != nil { + + return imgregv1a1.ContentLibraryItem{}, err + } + obj = imgregv1a1.ContentLibraryItem(obj2) + } + + return obj, nil +} + +// GetStorageURIsForLibraryItemDisks returns the paths to the VMDK files from +// the provided library item. +func GetStorageURIsForLibraryItemDisks( + item imgregv1a1.ContentLibraryItem) ([]string, error) { + + var storageURIs []string + for i := range item.Status.FileInfo { + fi := item.Status.FileInfo[i] + if fi.StorageURI != "" { + if path.Ext(fi.StorageURI) == ".vmdk" { + storageURIs = append(storageURIs, fi.StorageURI) + } + } + } + if len(storageURIs) == 0 { + return nil, fmt.Errorf( + "no vmdk files found in the content library item status: %v", + item.Status) + } + return storageURIs, nil +} diff --git a/pkg/util/vsphere/client/client_test.go b/pkg/util/vsphere/client/client_test.go index 35dff12cb..f97228c9a 100644 --- a/pkg/util/vsphere/client/client_test.go +++ b/pkg/util/vsphere/client/client_test.go @@ -72,6 +72,8 @@ var _ = Describe("Client", Label(testlabels.VCSim), Ordered /* Avoided race for User: url.UserPassword(expectedUsername, expectedPassword), } + datacenter := simulator.Map.Any("Datacenter") + // Configure TLS. model.Service.TLS = tlsConfig @@ -92,7 +94,7 @@ var _ = Describe("Client", Label(testlabels.VCSim), Ordered /* Avoided race for Password: expectedPassword, CAFilePath: serverCertFile, Insecure: false, - Datacenter: simulator.Map.Any("Datacenter").Reference().Value, + Datacenter: datacenter.Reference().Value, } }) diff --git a/pkg/util/vsphere/contentlibrary/item_cache.go b/pkg/util/vsphere/contentlibrary/item_cache.go new file mode 100644 index 000000000..16901b83b --- /dev/null +++ b/pkg/util/vsphere/contentlibrary/item_cache.go @@ -0,0 +1,243 @@ +// Copyright (c) 2024 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package library + +import ( + "context" + "crypto/sha1" //nolint:gosec // used for creating directory name + "fmt" + "io" + "path" + "strings" + + "github.com/go-logr/logr" + "github.com/vmware/govmomi/fault" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + vimtypes "github.com/vmware/govmomi/vim25/types" +) + +type moRef = vimtypes.ManagedObjectReference + +// CacheStorageURIs copies the disk(s) from srcDiskURIs to dstDir and returns +// the path(s) to the copied disk(s). +func CacheStorageURIs( + ctx context.Context, + vimClient *vim25.Client, + dstDatacenterRef, srcDatacenterRef moRef, + dstDir string, + srcDiskURIs ...string) ([]string, error) { + + var ( + dstStorageURIs = make([]string, len(srcDiskURIs)) + dstDatacenter = object.NewDatacenter(vimClient, dstDatacenterRef) + srcDatacenter = object.NewDatacenter(vimClient, srcDatacenterRef) + + diskMgr = newVirtualDiskManager(vimClient) + fileMgr = object.NewFileManager(vimClient) + ) + + for i := range srcDiskURIs { + dstFilePath, err := copyDisk( + ctx, + diskMgr, + fileMgr, + dstDir, + srcDiskURIs[i], + dstDatacenter, + srcDatacenter) + if err != nil { + return nil, err + } + dstStorageURIs[i] = dstFilePath + } + + return dstStorageURIs, nil +} + +func copyDisk( + ctx context.Context, + diskMgr *virtualDiskManager, + fileMgr *object.FileManager, + dstDir, srcFilePath string, + dstDatacenter, srcDatacenter *object.Datacenter) (string, error) { + + var ( + srcFileName = path.Base(srcFilePath) + dstFileName = GetCachedFileNameForVMDK(srcFileName) + ".vmdk" + dstFilePath = path.Join(dstDir, dstFileName) + ) + + // Check to see if the disk is already cached. + _, queryDiskErr := diskMgr.QueryVirtualDiskUuid( + ctx, + dstFilePath, + dstDatacenter) + if queryDiskErr == nil { + // Disk exists, return the path to it. + return dstFilePath, nil + } + if !fault.Is(queryDiskErr, &vimtypes.FileNotFound{}) { + return "", fmt.Errorf("failed to query disk uuid: %w", queryDiskErr) + } + + // Create the VM folder. + if err := fileMgr.MakeDirectory( + ctx, + dstDir, + dstDatacenter, + true); err != nil { + + return "", fmt.Errorf("failed to create folder %q: %w", dstDir, err) + } + + // The base disk does not exist, create it. + copyDiskTask, err := diskMgr.CopyVirtualDisk( + ctx, + srcFilePath, + srcDatacenter, + dstFilePath, + dstDatacenter, + &vimtypes.FileBackedVirtualDiskSpec{ + VirtualDiskSpec: vimtypes.VirtualDiskSpec{ + AdapterType: string(vimtypes.VirtualDiskAdapterTypeLsiLogic), + DiskType: string(vimtypes.VirtualDiskTypeThin), + }, + }, + false) + if err != nil { + return "", fmt.Errorf("failed to call copy disk: %w", err) + } + if err := copyDiskTask.Wait(ctx); err != nil { + return "", fmt.Errorf("failed to wait for copy disk: %w", err) + } + + return dstFilePath, nil +} + +const topLevelCacheDirName = ".contentlib-cache" + +// GetTopLevelCacheDir returns the top-level cache directory at the root of the +// datastore. +// If the datastore uses vSAN, this function also ensures the top-level +// directory exists. +func GetTopLevelCacheDir( + ctx context.Context, + vimClient *vim25.Client, + dstDatacenter *object.Datacenter, + dstDatastoreRef vimtypes.ManagedObjectReference, + dstDatastoreName, dstDatastoreURL string, + topLevelDirectoryCreateSupported bool) (string, error) { + + logger := logr.FromContextOrDiscard(ctx).WithName("GetTopLevelCacheDir") + + logger.V(4).Info( + "Args", + "dstDatastoreRef", dstDatastoreRef, + "dstDatastoreName", dstDatastoreName, + "dstDatastoreURL", dstDatastoreURL, + "topLevelDirectoryCreateSupported", topLevelDirectoryCreateSupported) + + if topLevelDirectoryCreateSupported { + return fmt.Sprintf( + "[%s] %s", dstDatastoreName, topLevelCacheDirName), nil + } + + // TODO(akutz) Figure out a way to test if the directory already exists + // instead of trying to just create it again and using the + // FileAlreadyExists error as signal. + + dstDatastorePath, _ := strings.CutPrefix(dstDatastoreURL, "ds://") + topLevelCacheDirPath := path.Join(dstDatastorePath, topLevelCacheDirName) + + logger.V(4).Info( + "vSAN CreateDir", + "dstDatastorePath", dstDatastorePath, + "topLevelCacheDirPath", topLevelCacheDirPath) + + dsNSMgr := object.NewDatastoreNamespaceManager(vimClient) + uuidTopLevelCacheDirPath, err := dsNSMgr.CreateDirectory( + ctx, + object.NewDatastore(vimClient, dstDatastoreRef), + topLevelCacheDirPath, + "") + if err != nil { + if !fault.Is(err, &vimtypes.FileAlreadyExists{}) { + return "", fmt.Errorf("failed to create directory: %w", err) + } + uuidTopLevelCacheDirPath, err = dsNSMgr.ConvertNamespacePathToUuidPath( + ctx, + dstDatacenter, + topLevelCacheDirPath) + if err != nil { + return "", fmt.Errorf( + "failed to convert namespace path=%q: %w", + topLevelCacheDirPath, err) + } + } + + topLevelCacheDirName := path.Base(uuidTopLevelCacheDirPath) + + return fmt.Sprintf("[%s] %s", dstDatastoreName, topLevelCacheDirName), nil +} + +// GetCacheDirForLibraryItem returns the cache directory for a library item +// beneath a top-level cache directory. +func GetCacheDirForLibraryItem( + topLevelCacheDir, itemUUID, contentVersion string) string { + + return path.Join(topLevelCacheDir, itemUUID, contentVersion) +} + +// GetCachedFileNameForVMDK returns the first 17 characters of a SHA-1 sum of +// a VMDK file name and extension, ex. my-disk.vmdk. +func GetCachedFileNameForVMDK(s string) string { + h := sha1.New() //nolint:gosec // used for creating directory name + _, _ = io.WriteString(h, s) + return fmt.Sprintf("%x", h.Sum(nil))[0:17] +} + +type virtualDiskManager struct { + *object.VirtualDiskManager +} + +func newVirtualDiskManager(c *vim25.Client) *virtualDiskManager { + m := virtualDiskManager{ + VirtualDiskManager: object.NewVirtualDiskManager(c), + } + return &m +} + +func (m virtualDiskManager) CopyVirtualDisk( + ctx context.Context, + sourceName string, sourceDatacenter *object.Datacenter, + destName string, destDatacenter *object.Datacenter, + destSpec vimtypes.BaseVirtualDiskSpec, force bool) (*object.Task, error) { + + req := vimtypes.CopyVirtualDisk_Task{ + This: m.Reference(), + SourceName: sourceName, + DestName: destName, + DestSpec: destSpec, + Force: vimtypes.NewBool(force), + } + + if sourceDatacenter != nil { + ref := sourceDatacenter.Reference() + req.SourceDatacenter = &ref + } + + if destDatacenter != nil { + ref := destDatacenter.Reference() + req.DestDatacenter = &ref + } + + res, err := methods.CopyVirtualDisk_Task(ctx, m.Client(), &req) + if err != nil { + return nil, err + } + + return object.NewTask(m.Client(), res.Returnval), nil +} diff --git a/pkg/util/vsphere/contentlibrary/item_sync.go b/pkg/util/vsphere/contentlibrary/item_sync.go new file mode 100644 index 000000000..31cb64692 --- /dev/null +++ b/pkg/util/vsphere/contentlibrary/item_sync.go @@ -0,0 +1,43 @@ +// Copyright (c) 2024 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +package library + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + "github.com/vmware/govmomi/vapi/library" + "github.com/vmware/govmomi/vapi/rest" +) + +// SyncLibraryItem issues a sync call to the provided library item. +func SyncLibraryItem( + ctx context.Context, + client *rest.Client, + itemID string) error { + + // if item.Status.Cached && item.Status.SizeInBytes.Size() > 0 { + // return nil + // } + + var ( + mgr = library.NewManager(client) + logger = logr.FromContextOrDiscard(ctx) + ) + + // A file from a library item that belongs to a subscribed library may not + // be fully available. Sync the file to ensure it is present. + logger.Info("Syncing content library item", "libraryItemID", itemID) + libItem, err := mgr.GetLibraryItem(ctx, itemID) + if err != nil { + return fmt.Errorf( + "error getting library item %s to sync: %w", itemID, err) + } + if err := mgr.SyncLibraryItem(ctx, libItem, true); err != nil { + return fmt.Errorf( + "error syncing library item %s: %w", itemID, err) + } + return nil +}