Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MTV-1804 | Implement VDDK AIO buffer configuration #1280

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 22 additions & 4 deletions pkg/apis/forklift/v1beta1/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ limitations under the License.
package v1beta1

import (
"strconv"

libcnd "github.com/konveyor/forklift-controller/pkg/lib/condition"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -59,10 +61,13 @@ const (

// Provider settings.
const (
VDDK = "vddkInitImage"
SDK = "sdkEndpoint"
VCenter = "vcenter"
ESXI = "esxi"
VDDK = "vddkInitImage"
SDK = "sdkEndpoint"
VCenter = "vcenter"
ESXI = "esxi"
UseVddkAioOptimization = "useVddkAioOptimization"
VddkAioBufSize = "vddkAioBufSize"
VddkAioBufCount = "vddkAioBufCount"
)

const OvaProviderFinalizer = "forklift/ova-provider"
Expand Down Expand Up @@ -147,3 +152,16 @@ func (p *Provider) HasReconciled() bool {
func (p *Provider) RequiresConversion() bool {
return p.Type() == VSphere || p.Type() == Ova
}

// This provider requires VM guest conversion.
func (p *Provider) UseVddkAioOptimization() bool {
useVddkAioOptimization := p.Spec.Settings[UseVddkAioOptimization]
if useVddkAioOptimization == "" {
return false
}
parseBool, err := strconv.ParseBool(useVddkAioOptimization)
if err != nil {
return false
}
return parseBool
}
4 changes: 4 additions & 0 deletions pkg/controller/plan/adapter/base/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ const (

// DV immediate bind to WaitForFirstConsumer storage class
AnnBindImmediate = "cdi.kubevirt.io/storage.bind.immediate.requested"

// Add extra vddk configmap, in the Forklift used to pass AIO configuration to the VDDK.
// Related to https://github.com/kubevirt/containerized-data-importer/pull/3572
AnnVddkExtraArgs = "cdi.kubevirt.io/storage.pod.vddk.extraargs"
)

var VolumePopulatorNotSupportedError = liberr.New("provider does not support volume populators")
Expand Down
9 changes: 9 additions & 0 deletions pkg/controller/plan/adapter/vsphere/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,8 @@ const (
AnnImportBackingFile = "cdi.kubevirt.io/storage.import.backingFile"
)

const VddkConf = "vddk-conf"

// Map of vmware guest ids to osinfo ids.
var osMap = map[string]string{
"centos64Guest": "centos5.11",
Expand Down Expand Up @@ -144,6 +146,10 @@ type Builder struct {
macConflictsMap map[string]string
}

func genVddkConfConfigMapName(plan *api.Plan) string {
return fmt.Sprintf("%s-%s", plan.Name, VddkConf)
}

// Get list of destination VMs with mac addresses that would
// conflict with this VM, if any exist.
func (r *Builder) macConflicts(vm *model.VM) (conflictingVMs []string, err error) {
Expand Down Expand Up @@ -483,6 +489,9 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
dv.ObjectMeta.Annotations = make(map[string]string)
}
dv.ObjectMeta.Annotations[planbase.AnnDiskSource] = r.baseVolume(disk.File)
if !coldLocal && r.Source.Provider.UseVddkAioOptimization() {
dv.ObjectMeta.Annotations[planbase.AnnVddkExtraArgs] = genVddkConfConfigMapName(r.Plan)
}
dvs = append(dvs, *dv)
}
}
Expand Down
144 changes: 137 additions & 7 deletions pkg/controller/plan/kubevirt.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ const (
kApp = "forklift.app"
// LUKS
kLUKS = "isLUKS"
// Use
kUse = "use"
)

// User
Expand All @@ -111,7 +113,14 @@ const (
OvaPVLabel = "nfs-pv"
)

const ExtraV2vConf = "extra-v2v-conf"
// Vddk v2v conf
const (
ExtraV2vConf = "extra-v2v-conf"
VddkConf = "vddk-conf"

VddkAioBufSizeDefault = "16"
VddkAioBufCountDefault = "4"
)

// Map of VirtualMachines keyed by vmID.
type VirtualMachineMap map[string]VirtualMachine
Expand Down Expand Up @@ -242,6 +251,10 @@ func genExtraV2vConfConfigMapName(plan *api.Plan) string {
return fmt.Sprintf("%s-%s", plan.Name, ExtraV2vConf)
}

func genVddkConfConfigMapName(plan *api.Plan) string {
return fmt.Sprintf("%s-%s", plan.Name, VddkConf)
}

// Get the importer pod for a PersistentVolumeClaim.
func (r *KubeVirt) GetImporterPod(pvc core.PersistentVolumeClaim) (pod *core.Pod, found bool, err error) {
pod = &core.Pod{}
Expand Down Expand Up @@ -583,6 +596,12 @@ func (r *KubeVirt) DataVolumes(vm *plan.VMStatus) (dataVolumes []cdi.DataVolume,
if err != nil {
return
}
if r.Source.Provider.UseVddkAioOptimization() {
_, err = r.ensureVddkConfigMap()
if err != nil {
return nil, err
}
}

dataVolumes, err = r.dataVolumes(vm, secret, configMap)
if err != nil {
Expand Down Expand Up @@ -641,6 +660,83 @@ func (r *KubeVirt) EnsureDataVolumes(vm *plan.VMStatus, dataVolumes []cdi.DataVo
return
}

func (r *KubeVirt) vddkConfigMap(labels map[string]string) (*core.ConfigMap, error) {
data := make(map[string]string)
if r.Source.Provider.UseVddkAioOptimization() {
buffSize := r.Source.Provider.Spec.Settings[api.VddkAioBufSize]
if buffSize == "" {
buffSize = VddkAioBufSizeDefault
}
buffCount := r.Source.Provider.Spec.Settings[api.VddkAioBufCount]
if buffCount == "" {
buffCount = VddkAioBufCountDefault
}
data["vddk-config-file"] = fmt.Sprintf(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we make this the official ConfigMap key name? In the CDI pull request, I mount the ConfigMap as a volume and then look for the first file in that volume, but it seems like it would be less trouble to just look for this fixed name.

"VixDiskLib.nfcAio.Session.BufSizeIn64K=%s\n"+
"VixDiskLib.nfcAio.Session.BufCount=%s", buffSize, buffCount)
}
configMap := core.ConfigMap{
Data: data,
ObjectMeta: metav1.ObjectMeta{
Name: genVddkConfConfigMapName(r.Plan),
Namespace: r.Plan.Spec.TargetNamespace,
Labels: labels,
},
}
return &configMap, nil
}

func (r *KubeVirt) ensureVddkConfigMap() (configMap *core.ConfigMap, err error) {
labels := r.vddkLabels()
newConfigMap, err := r.vddkConfigMap(labels)
if err != nil {
return
}

list := &core.ConfigMapList{}
err = r.Destination.Client.List(
context.TODO(),
list,
&client.ListOptions{
LabelSelector: k8slabels.SelectorFromSet(labels),
Namespace: r.Plan.Spec.TargetNamespace,
},
)
if err != nil {
err = liberr.Wrap(err)
return
}
if len(list.Items) > 0 {
configMap = &list.Items[0]
configMap.Data = newConfigMap.Data
err = r.Destination.Client.Update(context.TODO(), configMap)
if err != nil {
err = liberr.Wrap(err)
return
}
r.Log.V(1).Info(
"VDDK extra args configmap updated.",
"configmap",
path.Join(
configMap.Namespace,
configMap.Name))
} else {
configMap = newConfigMap
err = r.Destination.Client.Create(context.TODO(), configMap)
if err != nil {
err = liberr.Wrap(err)
return
}
r.Log.V(1).Info(
"VDDK extra args configmap created.",
"configmap",
path.Join(
configMap.Namespace,
configMap.Name))
}
return
}

func (r *KubeVirt) EnsurePopulatorVolumes(vm *plan.VMStatus, pvcs []*core.PersistentVolumeClaim) (err error) {
var pendingPvcNames []string
for _, pvc := range pvcs {
Expand Down Expand Up @@ -850,12 +946,17 @@ func (r *KubeVirt) EnsureGuestConversionPod(vm *plan.VMStatus, vmCr *VirtualMach
return
}

configMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
libvirtConfigMap, err := r.ensureLibvirtConfigMap(vm.Ref, vmCr, pvcs)
if err != nil {
return
}

newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, configMap, pvcs, v2vSecret)
vddkConfigmap, err := r.ensureVddkConfigMap()
if err != nil {
return
}

newPod, err := r.guestConversionPod(vm, vmCr.Spec.Template.Spec.Volumes, libvirtConfigMap, vddkConfigmap, pvcs, v2vSecret)
if err != nil {
return
}
Expand Down Expand Up @@ -1690,8 +1791,8 @@ func (r *KubeVirt) findTemplate(vm *plan.VMStatus) (tmpl *template.Template, err
return
}

func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, configMap, pvcs, vm)
func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume, libvirtConfigMap *core.ConfigMap, vddkConfigmap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, v2vSecret *core.Secret) (pod *core.Pod, err error) {
volumes, volumeMounts, volumeDevices, err := r.podVolumeMounts(vmVolumes, libvirtConfigMap, vddkConfigmap, pvcs, vm)
if err != nil {
return
}
Expand Down Expand Up @@ -1892,7 +1993,7 @@ func (r *KubeVirt) guestConversionPod(vm *plan.VMStatus, vmVolumes []cnv.Volume,
return
}

func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, vm *plan.VMStatus) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) {
func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, libvirtConfigMap *core.ConfigMap, vddkConfigmap *core.ConfigMap, pvcs []*core.PersistentVolumeClaim, vm *plan.VMStatus) (volumes []core.Volume, mounts []core.VolumeMount, devices []core.VolumeDevice, err error) {
pvcsByName := make(map[string]*core.PersistentVolumeClaim)
for _, pvc := range pvcs {
pvcsByName[pvc.Name] = pvc
Expand Down Expand Up @@ -1930,7 +2031,7 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
Name: configMap.Name,
Name: libvirtConfigMap.Name,
},
},
},
Expand All @@ -1949,6 +2050,19 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
},
})
}
useVddkConf := r.Source.Provider.UseVddkAioOptimization()
if useVddkConf {
volumes = append(volumes, core.Volume{
Name: VddkConf,
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
Name: vddkConfigmap.Name,
},
},
},
})
}

switch r.Source.Provider.Type() {
case api.Ova:
Expand Down Expand Up @@ -2006,6 +2120,14 @@ func (r *KubeVirt) podVolumeMounts(vmVolumes []cnv.Volume, configMap *core.Confi
},
)
}
if useVddkConf {
mounts = append(mounts,
core.VolumeMount{
Name: VddkConf,
MountPath: fmt.Sprintf("/mnt/%s", VddkConf),
},
)
}
}

_, exists, err := r.findConfigMapInNamespace(Settings.VirtCustomizeConfigMap, r.Plan.Spec.TargetNamespace)
Expand Down Expand Up @@ -2389,6 +2511,14 @@ func (r *KubeVirt) vmLabels(vmRef ref.Ref) (labels map[string]string) {
return
}

// Labels for a vddk config
// We need to distinguish between the libvirt configmap which uses also the plan labels and the vddk configmap
func (r *KubeVirt) vddkLabels() (labels map[string]string) {
labels = r.planLabels()
labels[kUse] = VddkConf
return
}

// Labels for a VM on a plan without migration label.
func (r *KubeVirt) vmAllButMigrationLabels(vmRef ref.Ref) (labels map[string]string) {
labels = r.vmLabels(vmRef)
Expand Down
11 changes: 9 additions & 2 deletions virt-v2v/cmd/entrypoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package main
import (
_ "embed"
"encoding/json"
"errors"
"fmt"
"io"
"os"
Expand Down Expand Up @@ -144,12 +145,18 @@ func virtV2vVsphereArgs() (args []string, err error) {
if err != nil {
return nil, err
}
if info, err := os.Stat(global.VDDK); err == nil && info.IsDir() {
if info, err := os.Stat(global.VDDK_LIB); err == nil && info.IsDir() {
args = append(args,
"-it", "vddk",
"-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK),
"-io", fmt.Sprintf("vddk-libdir=%s", global.VDDK_LIB),
"-io", fmt.Sprintf("vddk-thumbprint=%s", os.Getenv("V2V_fingerprint")),
)
// Check if the config file exists but still allow the extra args to override the vddk-config for testing
if _, err := os.Stat(global.VDDK_CONF_FILE); !errors.Is(err, os.ErrNotExist) && os.Getenv("V2V_extra_args") != "" {
args = append(args,
"-io", fmt.Sprintf("vddk-config=%s", global.VDDK_CONF_FILE),
)
}
}

// When converting VM with name that do not meet DNS1123 RFC requirements,
Expand Down
17 changes: 9 additions & 8 deletions virt-v2v/pkg/global/variables.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,15 @@ package global
type MountPath string

const (
OVA = "ova"
VSPHERE = "vSphere"
DIR = "/var/tmp/v2v"
INSPECTION = "/var/tmp/v2v/inspection.xml"
FS MountPath = "/mnt/disks/disk[0-9]*"
BLOCK MountPath = "/dev/block[0-9]*"
VDDK = "/opt/vmware-vix-disklib-distrib"
LUKSDIR = "/etc/luks"
OVA = "ova"
VSPHERE = "vSphere"
DIR = "/var/tmp/v2v"
INSPECTION = "/var/tmp/v2v/inspection.xml"
FS MountPath = "/mnt/disks/disk[0-9]*"
BLOCK MountPath = "/dev/block[0-9]*"
VDDK_LIB = "/opt/vmware-vix-disklib-distrib"
LUKSDIR = "/etc/luks"
VDDK_CONF_FILE = "/mnt/vddk-conf/vddk-config-file"

WIN_FIRSTBOOT_PATH = "/Program Files/Guestfs/Firstboot"
WIN_FIRSTBOOT_SCRIPTS_PATH = "/Program Files/Guestfs/Firstboot/scripts"
Expand Down
Loading