From bd282d94a1beee6cccd048f0585226dacc232e56 Mon Sep 17 00:00:00 2001 From: Prasad Desala Date: Tue, 27 Feb 2024 11:24:26 +0530 Subject: [PATCH] CNV automation: Classes and helper functions - Virtctl, VirtualMachine, VirtualMachineInstance and VM workloads (#9248) Signed-off-by: Prasad Desala --- ocs_ci/helpers/cnv_helpers.py | 330 ++++++++++++++ ocs_ci/ocs/cnv/virtctl.py | 310 +++++++++++++ ocs_ci/ocs/cnv/virtual_machine.py | 416 ++++++++++++++++++ ocs_ci/ocs/cnv/virtual_machine_instance.py | 195 ++++++++ ocs_ci/ocs/constants.py | 29 +- ocs_ci/ocs/exceptions.py | 4 + .../templates/cnv-deployment/vm-secret.yaml | 9 + .../vm-standalone-pvc/pvc.yaml | 17 + .../vm-standalone-pvc/source.yaml | 8 + .../cnv-vm-workload/vm-standalone-pvc/vm.yaml | 61 +++ .../workloads/cnv/test_vm_lifecycle_and_io.py | 68 +++ ..._vm_single_disk_hot_plugging_unplugging.py | 63 +++ 12 files changed, 1509 insertions(+), 1 deletion(-) create mode 100644 ocs_ci/helpers/cnv_helpers.py create mode 100644 ocs_ci/ocs/cnv/virtctl.py create mode 100644 ocs_ci/ocs/cnv/virtual_machine.py create mode 100644 ocs_ci/ocs/cnv/virtual_machine_instance.py create mode 100644 ocs_ci/templates/cnv-deployment/vm-secret.yaml create mode 100644 ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/pvc.yaml create mode 100644 ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/source.yaml create mode 100644 ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/vm.yaml create mode 100644 tests/functional/workloads/cnv/test_vm_lifecycle_and_io.py create mode 100644 tests/functional/workloads/cnv/test_vm_single_disk_hot_plugging_unplugging.py diff --git a/ocs_ci/helpers/cnv_helpers.py b/ocs_ci/helpers/cnv_helpers.py new file mode 100644 index 00000000000..0fbd0e390d2 --- /dev/null +++ b/ocs_ci/helpers/cnv_helpers.py @@ -0,0 +1,330 @@ +""" +Helper functions specific for CNV +""" +import os +import base64 +import logging + +from ocs_ci.helpers.helpers import create_unique_resource_name, create_resource +from ocs_ci.ocs import constants +from ocs_ci.utility import templating +from ocs_ci.ocs.cnv.virtual_machine import VirtualMachine +from ocs_ci.helpers.helpers import ( + wait_for_resource_state, + create_ocs_object_from_kind_and_name, +) +from ocs_ci.framework import config + +logger = logging.getLogger(__name__) + + +def create_vm_using_standalone_pvc( + namespace=constants.CNV_NAMESPACE, + vm_name=None, + pvc_size=None, + sc_name=None, + ssh=True, + running=True, + wait_for_vm_boot=True, + vm_dict_path=None, +): + """ + Create a Virtual Machine (VM) in the specified namespace using a standalone Persistent Volume Claim (PVC) + + Args: + namespace (str): The namespace in which to create the VM. + vm_name (str): Name for the VM. If not provided, a unique name will be generated. + pvc_size (str): The size of the PVC to create + sc_name (str): Storageclass name to use + ssh (bool): If set to True, it adds a statically manged public SSH key during the VM creation at the first boot + running (bool): Set to True for the VM to start upon it's creation, False otherwise + wait_for_vm_boot (bool): If True and running is True, wait for the VM to finish booting and + ensure SSH connectivity + vm_dict_path (str): Path to the VM YAML file + + Returns: + vm_obj: The VirtualMachine object + + Raises: + CommandFailed: If an error occurs during the creation of the VM + + """ + namespace = ( + namespace if namespace else create_unique_resource_name("test-vm", "namespace") + ) + source_data_obj = create_volume_import_source() + pvc_data_obj = create_pvc_using_data_source( + source_name=source_data_obj.name, + pvc_size=pvc_size, + sc_name=sc_name, + namespace=namespace, + ) + + vm_dict_path = ( + vm_dict_path if vm_dict_path else constants.CNV_VM_STANDALONE_PVC_VM_YAML + ) + vm_data = templating.load_yaml(vm_dict_path) + vm_name = vm_name if vm_name else create_unique_resource_name("test", "vm") + vm_data["metadata"]["name"] = vm_name + vm_data["metadata"]["namespace"] = namespace + if not running: + vm_data["spec"]["running"] = False + vm_data["spec"]["template"]["spec"]["volumes"][0]["persistentVolumeClaim"][ + "claimName" + ] = pvc_data_obj.name + + if ssh: + ssh_secret = create_vm_secret(namespace=namespace) + ssh_secret_dict = [ + { + "sshPublicKey": { + "propagationMethod": {"noCloud": {}}, + "source": {"secret": {"secretName": f"{ssh_secret.name}"}}, + } + } + ] + vm_data["spec"]["template"]["spec"]["accessCredentials"] = ssh_secret_dict + + vm_ocs_obj = create_resource(**vm_data) + logger.info(f"Successfully created VM: {vm_ocs_obj.name}") + + wait_for_resource_state( + resource=pvc_data_obj, state=constants.STATUS_BOUND, timeout=300 + ) + vm_obj = VirtualMachine(vm_name=vm_ocs_obj.name, namespace=namespace) + if running: + vm_obj.wait_for_vm_status(status=constants.VM_RUNNING) + if wait_for_vm_boot: + vm_obj.wait_for_ssh_connectivity(timeout=1200) + + return vm_obj + + +def get_ssh_pub_key_with_filename(path=None): + """ + Retrieve the content of the SSH public key and its file name + + Args: + path (str): Path to the SSH public key file - Optional + + Returns: + tuple: A tuple containing the content of the SSH public key and the file name + + Raises: + FileNotFoundError: If the provided ssh pub key path does not exist + + """ + logger.info( + "Retrieving the content and file name of the SSH public key from the client machine" + ) + ssh_dir = os.path.expanduser("~/.ssh/") + if path: + if os.path.exists(path): + ssh_key_path = path + logger.info(f"The provided ssh pub key path:{path} exists") + else: + raise FileNotFoundError( + f"The provided ssh pub key path:{path} does not exist" + ) + else: + id_rsa_path = os.path.join(ssh_dir, "id_rsa.pub") + config_ssh_key = config.DEPLOYMENT.get("ssh_key") + config_ssh_key_path = os.path.expanduser(config_ssh_key) + if os.path.exists(id_rsa_path): + ssh_key_path = id_rsa_path + logger.info("Default id_rsa.pub exists") + elif config_ssh_key and os.path.exists(config_ssh_key_path): + ssh_key_path = config_ssh_key_path + logger.info(f"Using ssh key from ocs-ci default config: {config_ssh_key}") + else: + raise FileNotFoundError( + "Neither id_rsa.pub nor ssh_key in ocs-ci default config is present" + ) + + with open(ssh_key_path, "r") as ssh_key: + content = ssh_key.read().strip() + key_name = os.path.basename(ssh_key_path) + + return content, key_name + + +def convert_ssh_key_to_base64(ssh_key): + """ + Convert SSH key to base64 encoding + + Args: + ssh_key (str): SSH key + + Returns: + str: Base64 encoded SSH key + + """ + logger.info("Converting SSH key to base64") + base64_key = base64.b64encode(ssh_key.encode()).decode() + return base64_key + + +def create_vm_secret(path=None, namespace=constants.CNV_NAMESPACE): + """ + Create an SSH secret for the VM + + Args: + path (str): Path to the SSH public key file - optional + + Returns: + secret_obj: An OCS instance + + """ + secret_data = templating.load_yaml(constants.CNV_VM_SECRET_YAML) + secret_data["metadata"]["name"] = create_unique_resource_name("vm-test", "secret") + secret_data["metadata"]["namespace"] = namespace + ssh_pub_key, _ = get_ssh_pub_key_with_filename(path=path) + base64_key = convert_ssh_key_to_base64(ssh_key=ssh_pub_key) + secret_data["data"]["key"] = base64_key + secret_obj = create_resource(**secret_data) + logger.info(f"Successfully created an SSH secret for the VM - {secret_obj.name}") + + return secret_obj + + +def create_volume_import_source(name=None, url=None): + """ + Create a VolumeImportSource object + + Args: + name (str): Name for the VolumeImportSource. If not provided, a unique name will be generated + url (str): URL for the registry source + + Returns: + source_data_obj: The created VolumeImportSource object + + """ + source_data = templating.load_yaml(constants.CNV_VM_STANDALONE_PVC_SOURCE_YAML) + name = name if name else create_unique_resource_name("source", "volumeimportsource") + source_data["metadata"]["name"] = name + if url: + source_data["spec"]["source"]["registry"]["url"] + source_data_obj = create_resource(**source_data) + logger.info(f"Successfully created VolumeImportSource - {source_data_obj.name}") + + return source_data_obj + + +def create_pvc_using_data_source( + source_name, pvc_size=None, sc_name=None, namespace=constants.CNV_NAMESPACE +): + """ + Create a PVC using a specified data source + + Args: + source_name (str): Name of the data source (VolumeImportSource) for the PVC + pvc_size (str): Size of the PVC + sc_name (str): StorageClass name for the PVC + namespace (str): The namespace in which to create the PVC + + Returns: + pvc_data_obj: PVC object + + """ + pvc_data = templating.load_yaml(constants.CNV_VM_STANDALONE_PVC_PVC_YAML) + pvc_name = create_unique_resource_name("test", "pvc") + pvc_data["metadata"]["name"] = pvc_name + pvc_data["metadata"]["namespace"] = namespace + pvc_data["spec"]["dataSourceRef"]["name"] = source_name + if pvc_size: + pvc_data["spec"]["resource"]["requests"]["storage"] = pvc_size + if sc_name: + pvc_data["spec"]["storageClassName"] = sc_name + pvc_data_obj = create_resource(**pvc_data) + logger.info(f"Successfully created PVC - {pvc_data_obj.name} using data source") + + return pvc_data_obj + + +def get_pvc_from_vm(vm_obj): + """ + Get the PVC name from VM obj + + Returns: + ocs_ci.ocs.resources.ocs.OCS (obj): PVC in the form of ocs object + + """ + vm_data = vm_obj.get() + pvc_name = ( + vm_data.get("spec") + .get("template") + .get("spec") + .get("volumes")[0] + .get("persistentVolumeClaim") + .get("claimName") + ) + return create_ocs_object_from_kind_and_name( + kind=constants.PVC, resource_name=pvc_name, namespace=vm_obj.namespace + ) + + +def get_secret_from_vm(vm_obj): + """ + Get the secret name from VM obj + + Returns: + ocs_ci.ocs.resources.ocs.OCS (obj): Secret in the form of ocs object + + """ + vm_data = vm_obj.get() + secret_name = ( + vm_data.get("spec") + .get("template") + .get("spec") + .get("accessCredentials")[0] + .get("sshPublicKey") + .get("source") + .get("secret") + .get("secretName") + ) + return create_ocs_object_from_kind_and_name( + kind=constants.SECRET, resource_name=secret_name, namespace=vm_obj.namespace + ) + + +def get_volumeimportsource(pvc_obj): + """ + Get the volumeimportsource name from PVC obj + + Returns: + ocs_ci.ocs.resources.ocs.OCS (obj): volumeimportsource in the form of ocs object + + """ + pvc_data = pvc_obj.get() + volumeimportsource_name = pvc_data.get("spec").get("dataSource").get("name") + return create_ocs_object_from_kind_and_name( + kind=constants.VOLUME_IMPORT_SOURCE, + resource_name=volumeimportsource_name, + namespace=pvc_obj.namespace, + ) + + +def get_ssh_private_key_path(): + """ + Get the full path of the derived private key file from the associated SSH public key file + + Returns: + str: The full path of the derived private key file + + """ + ssh_dir = os.path.expanduser("~/.ssh/") + _, ssh_pub_key_name = get_ssh_pub_key_with_filename() + + # Derive private key path by replacing the extension (if present) + private_key_name, _ = os.path.splitext(ssh_pub_key_name) + private_key_path = os.path.join(ssh_dir, private_key_name) + + # Handling both with and without .pem file extension case + pem_private_key_path = private_key_path + ".pem" + if os.path.exists(pem_private_key_path): + private_key_path = pem_private_key_path + logger.info( + f"The private key used for authenticating to the server: {private_key_path}" + ) + + return private_key_path diff --git a/ocs_ci/ocs/cnv/virtctl.py b/ocs_ci/ocs/cnv/virtctl.py new file mode 100644 index 00000000000..6ecb7f6b683 --- /dev/null +++ b/ocs_ci/ocs/cnv/virtctl.py @@ -0,0 +1,310 @@ +""" +Virtctl class +""" +import json + +from ocs_ci.utility.utils import run_cmd + + +class Virtctl(object): + """ + Virtctl class for interacting with the virtctl command-line tool. + It provides various methods for managing KubeVirt VMs. + """ + + def __init__(self, namespace=None): + """ + Initialize the Virtctl object. + + Args: + namespace (str): The namespace for KubeVirt VMs. + + """ + self.namespace = namespace + self.base_command = f"virtctl --namespace {self.namespace}" + + def add_volume(self, vm_name, volume_name, persist=False, serial=None): + """ + Add a volume to a VM. + + Args: + vm_name (str): Name of the VM. + volume_name (str): Name of the volume to add. + persist (bool): True to persist the volume. + serial (str): Serial number for the volume. + + Returns: + str: stdout of command + + """ + base_command = ( + f"{self.base_command} addvolume {vm_name} --volume-name={volume_name}" + ) + optional_flags = [] + if persist: + optional_flags.append("--persist") + if serial: + optional_flags.append(f"--serial={serial}") + + command = f"{base_command} {' '.join(optional_flags)}" + + return run_cmd(command) + + def remove_volume(self, vm_name, volume_name): + """ + Remove a volume from a VM. + + Args: + vm_name (str): Name of the VM. + volume_name (str): Name of the volume to remove. + + Returns: + str: stdout of command + + """ + command = ( + f"{self.base_command} removevolume {vm_name} --volume-name={volume_name}" + ) + return run_cmd(command) + + def guest_os_info(self, vm_name, dict_out=True): + """ + Get information about the guest OS running on a VM. + + Args: + vm_name (str): Name of the VM. + + Returns: + dict: output of the executed command in dict format else an ouput in json format + + """ + command = f"{self.base_command} guestosinfo {vm_name}" + json_out = run_cmd(command) + return json.loads(json_out) if dict_out else json_out + + def image_upload(self, dv_name, size, image_path, insecure=False): + """ + Upload an image to a DataVolume. + + Args: + dv_name (str): Name of the DataVolume. + size (int): Size of the image. + image_path (str): Path to the image file. + insecure (bool): True to upload the image insecurely. + + Returns: + str: stdout of command + + """ + base_command = f"{self.base_command} image-upload dv {dv_name} --size={size} --image-path={image_path}" + optional_params = ["--insecure"] if insecure else [] + command = f"{base_command} {' '.join(optional_params)}" + return run_cmd(command) + + def _pause(self, entity_type, entity_name): + """ + Pause a specified entity. + + Args: + entity_type (str): Type of the entity ('vm' or 'vmi'). + entity_name (str): Name of the entity. + + Returns: + str: stdout of command + + """ + command = f"{self.base_command} pause {entity_type} {entity_name}" + return run_cmd(command) + + def pause_vm(self, vm_name): + """ + Pause a VM. + + Args: + vm_name (str): Name of the VM. + + Returns: + str: stdout of command + + """ + return self._pause("vm", vm_name) + + def pause_vmi(self, vm_name): + """ + Pause a VirtualMachineInstance (VMI). + + Args: + vm_name (str): Name of the VMI. + + Returns: + str: stdout of command + + """ + return self._pause("vmi", vm_name) + + def restart_vm(self, vm_name): + """ + Restart a VM. + + Args: + vm_name (str): Name of the VM. + + Returns: + str: stdout of command + + """ + command = f"{self.base_command} restart {vm_name}" + return run_cmd(command) + + def scp( + self, + local_path, + vm_username, + vm_name, + identity_file=None, + vm_dest_path=None, + to_vm=True, + recursive=False, + ): + """ + Copy files between local and VM using SCP. + + Args: + local_path (str): Local path of the file or directory. + vm_username (str): Username to connect to the VM. + vm_name (str): Name of the VM. + identity_file (str): Path to the SSH private key. + vm_dest_path (str): Destination path on the VM. + to_vm (bool): True to copy to VM, False to copy from VM. + recursive (bool): True for recursive copying. + + Returns: + str: stdout of command + + """ + base_command = f"{self.base_command} scp" + + extra_params = ["--recursive"] if recursive else [] + extra_params.append("--local-ssh-opts='-o StrictHostKeyChecking=no'") + + if identity_file: + extra_params.append(f" --identity-file={identity_file}") + + vm_dest_path = vm_dest_path if vm_dest_path else "." + if to_vm: + mandatory_params = [ + f"{local_path}", + f"{vm_username}@{vm_name}:{vm_dest_path}", + ] + else: + mandatory_params = [ + f"{vm_username}@{vm_name}:{vm_dest_path} " f"{local_path}", + ] + + command = f"{base_command} {' '.join(extra_params + mandatory_params)}" + + return run_cmd(command) + + def run_ssh_command(self, vm, username, command, use_sudo=True, identity_file=None): + """ + SSH into a VM and execute a command + + Args: + vm (str): Name of the VM. + username (str): SSH username. + command (str): Command to run on the VM. + use_sudo (bool): True to run the command with sudo. + identity_file (str): Path to the SSH private key. + + Returns: + str: stdout of command + + """ + base_command = f"{self.base_command} ssh {vm} " + base_command += "--local-ssh-opts='-o StrictHostKeyChecking=no'" + + if use_sudo: + command = f"sudo {command}" + + mandatory_flags = [ + f"--username={username}", + "--port=22", # Default port for VM + f'-c "{command}"', + ] + + if identity_file: + mandatory_flags.insert(1, f"--identity-file={identity_file}") + + full_command = f"{base_command} {' '.join(mandatory_flags)}" + out = run_cmd(full_command) + + return out + + def start_vm(self, vm_name): + """ + Start a VM. + + Args: + vm_name (str): Name of the VM. + + Returns: + str: stdout of command + + """ + command = f"{self.base_command} start {vm_name}" + return run_cmd(command) + + def stop_vm(self, vm_name, force=False): + """ + Stop a VM. + + Args: + vm_name (str): Name of the VM. + force (bool): True to forcefully stop the VM. + + Returns: + str: stdout of command + + """ + force_flag = "--force" if force else "" + command = f"{self.base_command} stop {vm_name} {force_flag}" + return run_cmd(command) + + def unpause_vm(self, vm_name): + """ + Unpause a VM. + + Args: + vm_name (str): Name of the VM. + + Returns: + str: stdout of command + + """ + command = f"{self.base_command} unpause vm {vm_name}" + return run_cmd(command) + + def unpause_vmi(self, vm_name): + """ + Unpause a VirtualMachineInstance (VMI). + + Args: + vm_name (str): Name of the VMI. + + Returns: + str: stdout of command + + """ + command = f"{self.base_command} unpause vmi {vm_name}" + return run_cmd(command) + + def version(self): + """ + Get the version information. + + Returns: + str: stdout of command + + """ + command = f"{self.base_command} version" + return run_cmd(command) diff --git a/ocs_ci/ocs/cnv/virtual_machine.py b/ocs_ci/ocs/cnv/virtual_machine.py new file mode 100644 index 00000000000..21cfd4e5943 --- /dev/null +++ b/ocs_ci/ocs/cnv/virtual_machine.py @@ -0,0 +1,416 @@ +""" +Virtual machine class +""" +import yaml +import logging + +from ocs_ci.ocs.ocp import OCP +from ocs_ci.ocs.cnv.virtctl import Virtctl +from ocs_ci.ocs.cnv.virtual_machine_instance import VirtualMachineInstance +from ocs_ci.ocs import constants +from ocs_ci.utility.utils import TimeoutSampler +from ocs_ci.ocs.exceptions import UsernameNotFoundException +from ocs_ci.helpers import cnv_helpers + + +logger = logging.getLogger(__name__) + + +class VirtualMachine(Virtctl): + """ + Virtual Machine class which provides VM information and handles various VM related operations + like start / stop / status / restart/ etc + """ + + def __init__( + self, + vm_name, + namespace=None, + ): + """ + Initialize the VirtualMachine object. + + Args: + vm_name (str): Name of the VirtualMachine. + namespace (str): Namespace for the VirtualMachine. + + """ + super().__init__(namespace=namespace) + self._vm_name = vm_name + self.vm_ocp_obj = OCP( + kind=constants.VIRTUAL_MACHINE, + namespace=namespace, + ) + self.vmi_ocp_obj = OCP( + kind=constants.VIRTUAL_MACHINE_INSTANCE, + namespace=namespace, + ) + self.vmi_obj = VirtualMachineInstance( + vmi_name=self._vm_name, namespace=namespace + ) + + @property + def name(self): + return self._vm_name + + def get(self, out_yaml_format=True): + """ + Get information about the VirtualMachine. + + Args: + out_yaml_format (bool): True to get the output in YAML format. + + Returns: + dict: Information about the VirtualMachine. + + """ + return self.vm_ocp_obj.get( + resource_name=self._vm_name, out_yaml_format=out_yaml_format + ) + + def get_os_username(self): + """ + Retrieve the operating system username from the cloud-init data associated with the virtual machine + + Returns: + str: The operating system username + + Raises: + UsernameNotFoundException: If the 'user' key is not present in the VM userData + + """ + vm_get_out = self.get() + volumes = ( + vm_get_out.get("spec", {}) + .get("template", {}) + .get("spec", {}) + .get("volumes", []) + ) + for volume in volumes: + cloud_init_data = volume.get("cloudInitNoCloud") or volume.get( + "cloudInitConfigDrive" + ) + if cloud_init_data: + user_data = cloud_init_data.get("userData", {}) + user_data_dict = yaml.safe_load(user_data) + username = user_data_dict.get("user") + if username is not None: + return username + else: + raise UsernameNotFoundException( + f"Username not found in the {self.name} user data" + ) + + def wait_for_vm_status(self, status=constants.VM_RUNNING, timeout=600): + """ + Wait for the VirtualMachine to reach the specified status. + + Args: + status (str): The desired status to wait for - Running/Stopped/Paused. default is "Running"). + timeout (int): Timeout value in seconds. + + Raises: + TimeoutExpiredError: If the timeout is reached. + + """ + self.vm_ocp_obj.wait_for_resource( + resource_name=self._vm_name, condition=status, timeout=timeout + ) + + def start(self, timeout=600, wait=True): + """ + Start the VirtualMachine. + + Args: + timeout (int): Timeout value in seconds. + wait (bool): True to wait for the VirtualMachine to reach the "Running" status. + + """ + if ( + self.printableStatus() == constants.CNV_VM_STOPPED + and self.check_if_vmi_does_not_exist() + ): + logger.info( + f"{self._vm_name} is in stopped state and vmi does not exists, starting {self._vm_name}" + ) + elif not self.check_if_vmi_does_not_exist(): + logger.info( + f"VMI for this {self._vm_name} is still running, waiting for the vmi to " + f"delete before starting the {self._vm_name}" + ) + self.vmi_obj.wait_for_vmi_delete() + self.start_vm(self._vm_name) + logger.info(f"Successfully started VM: {self._vm_name}") + + if wait: + self.wait_for_vm_status(status=constants.VM_RUNNING, timeout=timeout) + logger.info(f"VM:{self._vm_name} reached Running state") + + def check_if_vmi_does_not_exist(self): + """ + Check if the VirtualMachineInstance (VMI) does not exist. + + Returns: + bool: True if the VMI does not exist. + + """ + status_conditions_out = self.get().get("status").get("conditions")[0] + return status_conditions_out["reason"] == "VMINotExists" + + def wait_for_ssh_connectivity(self, username=None, timeout=600): + """ + Wait for the SSH connectivity to establish to the virtual machine + + Args: + vm_obj (vm object): The virtual machine object. + username (str): The username to use for SSH. If None, it will use the OS username from vm_obj if exists + timeout (int): The maximum time to wait for SSH connectivity in seconds + + """ + username = username if username else self.get_os_username() + logger.info(f"Waiting for the SSH connectivity to establish to {self.name} ") + for sample in TimeoutSampler( + timeout=timeout, + sleep=30, + func=self.run_ssh_cmd, + username=username, + command="exit", + use_sudo=False, + ): + if sample == "": + logger.info(f"{self.name} is ready for SSH connection") + return + + def stop(self, force=False, wait=True): + """ + Stop the VirtualMachine. + + Args: + force (bool): True to forcefully stop the VirtualMachine. + wait (bool): True to wait for the VirtualMachine to reach the "Stopped" status. + + """ + self.stop_vm(self._vm_name, force=force) + logger.info(f"Successfully stopped VM: {self._vm_name}") + if wait: + self.vmi_obj.wait_for_virt_launcher_pod_delete() + self.vmi_obj.wait_for_vmi_delete() + self.wait_for_vm_status(status=constants.CNV_VM_STOPPED) + logger.info(f"VM: {self._vm_name} reached Stopped state") + + def restart(self, wait=True): + """ + Restart the VirtualMachine. + + Args: + wait (bool): True to wait for the VirtualMachine to reach the "Running" status. + + """ + self.restart_vm(self._vm_name) + logger.info(f"Successfully restarted VM: {self._vm_name}") + if wait: + self.vmi_obj.wait_for_virt_launcher_pod_delete() + self.vmi_obj.wait_for_vmi_to_be_running() + logger.info( + f"VM: {self._vm_name} reached Running state state after restart operation" + ) + + def addvolme(self, volume_name, persist=True, serial=None): + """ + Add a volume to a VM + + Args: + volume_name (str): Name of the volume/PVC to add. + persist (bool): True to persist the volume. + serial (str): Serial number for the volume. + + Returns: + str: stdout of command + + """ + logger.info(f"Adding {volume_name} to {self._vm_name}") + self.add_volume( + vm_name=self._vm_name, + volume_name=volume_name, + persist=persist, + serial=serial, + ) + logger.info(f"Successfully HotPlugged disk {volume_name} to {self._vm_name}") + + def removevolume(self, volume_name): + """ + Remove a volume from a VM + + Args: + volume_name (str): Name of the volume to remove. + + Returns: + str: stdout of command + + """ + logger.info(f"Removing {volume_name} from {self._vm_name}") + self.remove_volume(vm_name=self._vm_name, volume_name=volume_name) + logger.info( + f"Successfully HotUnplugged disk {volume_name} from {self._vm_name}" + ) + + def scp_to_vm( + self, + local_path, + vm_username=None, + identity_file=None, + vm_dest_path=None, + recursive=False, + ): + """ + Copy files/directories from the local machine to the VirtualMachine using SCP. + + Args: + local_path (str): Path to the local file/directory. + vm_username (str): Username for SSH connection to the VirtualMachine. + identity_file (str): Path to the SSH private key file. + vm_dest_path (str): Destination path on the VirtualMachine. + recursive (bool): True to copy directories recursively. + + Returns: + str: stdout of command + + """ + vm_username = vm_username if vm_username else self.get_os_username() + vm_dest_path = vm_dest_path if vm_dest_path else "." + identity_file = ( + identity_file if identity_file else cnv_helpers.get_ssh_private_key_path() + ) + logger.info( + f"Starting scp from local machine path: {local_path} to VM path: {vm_dest_path}" + ) + return self.scp( + local_path, + vm_username, + self._vm_name, + identity_file=identity_file, + vm_dest_path=vm_dest_path, + to_vm=True, + recursive=recursive, + ) + + def scp_from_vm( + self, + local_path, + vm_src_path, + vm_username=None, + identity_file=None, + recursive=False, + ): + """ + Copy files/directories from the VirtualMachine to the local machine using SCP. + + Args: + local_path (str): Path to the local destination. + vm_username (str): Username for SSH connection to the VirtualMachine. + identity_file (str): Path to the SSH private key file. + vm_src_path (str): Source path on the VirtualMachine. + recursive (bool): True to copy directories recursively. + + Returns: + str: stdout of command + + """ + vm_username = vm_username if vm_username else self.get_os_username() + identity_file = ( + identity_file if identity_file else cnv_helpers.get_ssh_private_key_path() + ) + logger.info( + f"Starting scp from VM path: {vm_src_path} to local machine path: {local_path}" + ) + return self.scp( + local_path, + vm_username, + self._vm_name, + identity_file=identity_file, + vm_dest_path=vm_src_path, + to_vm=False, + recursive=recursive, + ) + + def run_ssh_cmd(self, command, username=None, use_sudo=True, identity_file=None): + """ + Connect to the VirtualMachine using SSH and execute a command. + + Args: + username (str): SSH username for the VirtualMachine. + command (str): Command to execute + identity_file (str): Path to the SSH private key file. + use_sudo (bool): True to run the command with sudo. + + Returns: + str: stdout of command + + """ + logger.info(f"Executing {command} command on the {self._vm_name} VM using SSH") + username = username if username else self.get_os_username() + identity_file = ( + identity_file if identity_file else cnv_helpers.get_ssh_private_key_path() + ) + return self.run_ssh_command( + self._vm_name, + username, + command, + use_sudo=use_sudo, + identity_file=identity_file, + ) + + def pause(self, wait=True): + """ + Pause the VirtualMachine. + + Args: + wait (bool): True to wait for the VirtualMachine to reach the "Paused" status. + + """ + self._pause("vm", self._vm_name) + logger.info(f"Successfully Paused VM: {self._vm_name}") + if wait: + self.wait_for_vm_status(status=constants.VM_PAUSED) + logger.info(f"VM: {self._vm_name} reached Paused state") + + def unpause(self, wait=True): + """ + Unpause the VirtualMachine. + + Args: + wait (bool): True to wait for the VirtualMachine to reach the "Running" status. + + """ + self.unpause_vm(self._vm_name) + logger.info(f"Successfully UnPaused VM: {self._vm_name}") + if wait: + self.wait_for_vm_status(status=constants.VM_RUNNING) + logger.info(f"VM: {self._vm_name} reached Running state") + + def ready(self): + """ + Get the readiness status of the VirtualMachine. + + Returns: + bool: True if the VirtualMachine is ready. + + """ + return self.get().get("status", {}).get("ready") + + def printableStatus(self): + """ + Get the printable status of the VirtualMachine. + + Returns: + str: Printable status of the VirtualMachine. + + """ + return self.get().get("status").get("printableStatus") + + def delete(self): + """ + Delete the VirtualMachine + """ + self.vm_ocp_obj.delete(resource_name=self._vm_name) + self.vm_ocp_obj.wait_for_delete(resource_name=self._vm_name, timeout=180) diff --git a/ocs_ci/ocs/cnv/virtual_machine_instance.py b/ocs_ci/ocs/cnv/virtual_machine_instance.py new file mode 100644 index 00000000000..a9a0e2caa57 --- /dev/null +++ b/ocs_ci/ocs/cnv/virtual_machine_instance.py @@ -0,0 +1,195 @@ +""" +Virtual machine instance class +""" +import logging + +from ocs_ci.ocs.ocp import OCP +from ocs_ci.ocs.cnv.virtctl import Virtctl +from ocs_ci.ocs.resources.pod import Pod +from ocs_ci.ocs.resources.pod import get_pods_having_label +from ocs_ci.utility.utils import TimeoutSampler +from ocs_ci.ocs import constants + + +logger = logging.getLogger(__name__) + + +class VirtualMachineInstance: + """ + Virtual Machine Instance class for managing VMIs. + """ + + def __init__( + self, + vmi_name, + namespace=None, + ): + """ + Initialize the VirtualMachineInstance object. + + Args: + vmi_name (str): Name of the VirtualMachineInstance. + namespace (str): Namespace for the VirtualMachineInstance. + + """ + self._namespace = namespace + self._vmi_name = vmi_name + self.ocp = OCP( + kind=constants.VIRTUAL_MACHINE_INSTANCE, + namespace=namespace, + ) + self._virtctl = Virtctl(namespace=self._namespace) + + def get(self, out_yaml_format=True): + """ + Get information about the VirtualMachineInstance. + + Args: + out_yaml_format (bool): True to get the output in YAML format. + + Returns: + dict: Information about the VirtualMachineInstance. + + """ + return self.ocp.get( + resource_name=self._vmi_name, out_yaml_format=out_yaml_format + ) + + def is_vmi_running(self): + """ + Check if the VirtualMachineInstance is in the 'Running' phase. + + Returns: + bool: True if the VirtualMachineInstance is in 'Running' phase, False otherwise. + + """ + return self.get().get("status").get("phase") == "Running" + + def node(self): + """ + Get the node information for the VirtualMachineInstance. + + Returns: + str: Node name. + + """ + return self.get().get("status").get("nodeName") + + def pause(self, wait=True): + """ + Pause the VirtualMachineInstance. + + Args: + wait (bool): True to wait for the VirtualMachineInstance to reach the 'Paused' status. + + """ + self._virtctl.pause_vmi(vm_name=self._vmi_name) + logger.info(f"Successfully Paused VMI: {self._vmi_name}") + if wait: + self.wait_for_vmi_condition_pause_status(pause=True) + logger.info(f"VMI: {self._vmi_name} reached Paused state") + + def unpause(self, wait=True): + """ + Unpause the VirtualMachineInstance. + + Args: + wait (bool): True to wait for the VirtualMachineInstance to reach the 'Running' status. + + """ + self._virtctl.unpause_vmi(vm_name=self._vmi_name) + logger.info(f"Successfully UnPaused VMI: {self._vmi_name}") + if wait: + self.wait_for_vmi_condition_pause_status(pause=False) + logger.info(f"VMI: {self._vmi_name} reached Running state") + + def virt_launcher_pod(self): + """ + Get the name of the Virt Launcher Pod associated with the VirtualMachineInstance. + + Returns: + str: Virt Launcher Pod name. + + """ + selector = f"vm.kubevirt.io/name={self._vmi_name}" + virt_launcher_pod = Pod(**get_pods_having_label(selector, self._namespace)[0]) + return virt_launcher_pod.name + + def wait_for_vmi_to_be_running(self, timeout=600): + """ + Wait for the VirtualMachineInstance to reach the 'Running' status. + + Args: + timeout (int): Timeout value in seconds. + + """ + self.ocp.wait_for_resource( + resource_name=self._vmi_name, + column="PHASE", + condition=constants.VM_RUNNING, + timeout=timeout, + ) + + def get_vmi_active_condition(self): + """ + Get the active condition of the VirtualMachineInstance. + + Returns: + list: Active condition of the VirtualMachineInstance. + + """ + out = self.get().get("status").get("conditions") + return [ + condition + for condition in out + if condition.get("lastTransitionTime") is not None + ] + + def get_vmi_pause_condition(self): + """ + Get the pause condition of the VirtualMachineInstance. + + Returns: + dict: Pause condition of the VirtualMachineInstance, or None if not found. + + """ + for pause_condition in self.get_vmi_active_condition(): + if ( + pause_condition.get("reason") == "PausedByUser" + and pause_condition.get("status") == "True" + ): + return pause_condition + + def wait_for_vmi_condition_pause_status(self, pause=True): + """ + Wait for the VirtualMachineInstance to reach the specified pause status. + + Args: + pause (bool): True to wait for 'Paused' status, False for 'Running' status. + + """ + condition = "PausedByUser" if pause else None + for sample in TimeoutSampler( + timeout=600, sleep=10, func=self.get_vmi_pause_condition + ): + if not sample or (condition and sample.get("reason") == condition): + return + + def wait_for_vmi_delete(self, timeout=600): + """ + Wait for the deletion of the VirtualMachineInstance. + + Args: + timeout (int): Timeout value in seconds. + + """ + self.ocp.wait_for_delete(resource_name=self._vmi_name, timeout=timeout) + + def wait_for_virt_launcher_pod_delete(self): + """ + Wait for the deletion of the Virt Launcher Pod associated with the VirtualMachineInstance. + + """ + pod_obj = OCP(kind=constants.POD, namespace=self._namespace) + virt_launcher_pod = self.virt_launcher_pod() + pod_obj.wait_for_delete(virt_launcher_pod) diff --git a/ocs_ci/ocs/constants.py b/ocs_ci/ocs/constants.py index 338cfc3ad2b..378fb897356 100644 --- a/ocs_ci/ocs/constants.py +++ b/ocs_ci/ocs/constants.py @@ -70,6 +70,10 @@ ) TEMPLATE_AUTHENTICATION_DIR = os.path.join(TEMPLATE_DIR, "authentication") KREW_INSTALL_DIR = os.path.join(TEMPLATE_DIR, "krew_plugin") +TEMPLATE_CNV_VM_WORKLOAD_DIR = os.path.join(TEMPLATE_DIR, "cnv-vm-workload") +TEMPLATE_CNV_VM_STANDALONE_PVC_DIR = os.path.join( + TEMPLATE_CNV_VM_WORKLOAD_DIR, "vm-standalone-pvc" +) DATA_DIR = os.getenv("OCSCI_DATA_DIR") or os.path.join(TOP_DIR, "data") ROOK_REPO_DIR = os.path.join(DATA_DIR, "rook") ROOK_EXAMPLES_DIR = os.path.join( @@ -329,7 +333,7 @@ DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS_MS_PC = f"{DEFAULT_CLUSTERNAME}-cephfs" DEFAULT_VOLUMESNAPSHOTCLASS_RBD_MS_PC = f"{DEFAULT_CLUSTERNAME}-ceph-rbd" -# CNV constants +# CNV deployment constants CNV_NAMESPACE = "openshift-cnv" CNV_QUAY_NIGHTLY_IMAGE = "quay.io/openshift-cnv/nightly-catalog" HYPERCONVERGED = "HyperConverged" @@ -337,6 +341,16 @@ KUBEVIRT_HYPERCONVERGED = "kubevirt-hyperconverged" CNV_SELECTOR = "operators.coreos.com/kubevirt-hyperconverged.openshift-cnv" +# CNV VM constants +VIRTUAL_MACHINE = "VirtualMachine" +VIRTUAL_MACHINE_INSTANCE = "VirtualMachineInstance" +VM_RUNNING = "Running" +CNV_VM_STOPPED = "Stopped" +VM_PAUSED = "Paused" +DEFAULT_CNV_CEPH_RBD_SC = "ocs-storagecluster-ceph-rbd-virtualization" +VOLUME_IMPORT_SOURCE = "VolumeImportSource" + + # Virtctl constants VIRTCTL = "virtctl" VIRTCTL_CLI_DOWNLOADS = f"{VIRTCTL}-clidownloads-kubevirt-hyperconverged" @@ -760,6 +774,19 @@ TEMPLATE_DEPLOYMENT_DIR_CNV, "hyperconverged.yaml" ) +CNV_VM_SECRET_YAML = os.path.join(TEMPLATE_DEPLOYMENT_DIR_CNV, "vm-secret.yaml") + +# CNV VM workload yamls +CNV_VM_STANDALONE_PVC_SOURCE_YAML = os.path.join( + TEMPLATE_CNV_VM_STANDALONE_PVC_DIR, "source.yaml" +) +CNV_VM_STANDALONE_PVC_PVC_YAML = os.path.join( + TEMPLATE_CNV_VM_STANDALONE_PVC_DIR, "pvc.yaml" +) +CNV_VM_STANDALONE_PVC_VM_YAML = os.path.join( + TEMPLATE_CNV_VM_STANDALONE_PVC_DIR, "vm.yaml" +) + # Multus Networks MULTUS_PUBLIC_NET_YAML = os.path.join(TEMPLATE_DEPLOYMENT_DIR, "multus-public-net.yaml") MULTUS_CLUSTER_NET_YAML = os.path.join( diff --git a/ocs_ci/ocs/exceptions.py b/ocs_ci/ocs/exceptions.py index eac9659710c..7896658359f 100644 --- a/ocs_ci/ocs/exceptions.py +++ b/ocs_ci/ocs/exceptions.py @@ -682,3 +682,7 @@ class SameNameClusterAlreadyExistsException(Exception): class NoRunningCephToolBoxException(Exception): pass + + +class UsernameNotFoundException(Exception): + pass diff --git a/ocs_ci/templates/cnv-deployment/vm-secret.yaml b/ocs_ci/templates/cnv-deployment/vm-secret.yaml new file mode 100644 index 00000000000..3dd32167b1a --- /dev/null +++ b/ocs_ci/templates/cnv-deployment/vm-secret.yaml @@ -0,0 +1,9 @@ +--- +# Secret file used mainly for SSH to VM workload +apiVersion: v1 +kind: Secret +metadata: + name: new-my-secret + namespace: default +data: + key: PLACEHOLDER diff --git a/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/pvc.yaml b/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/pvc.yaml new file mode 100644 index 00000000000..274af457318 --- /dev/null +++ b/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/pvc.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sample-vm-pvc + namespace: default +spec: + dataSourceRef: + apiGroup: cdi.kubevirt.io + kind: VolumeImportSource + name: centos-source + accessModes: + - ReadWriteMany + resources: + requests: + storage: 30Gi + storageClassName: ocs-storagecluster-ceph-rbd-virtualization + volumeMode: Block diff --git a/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/source.yaml b/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/source.yaml new file mode 100644 index 00000000000..a958a700da5 --- /dev/null +++ b/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/source.yaml @@ -0,0 +1,8 @@ +apiVersion: cdi.kubevirt.io/v1beta1 +kind: VolumeImportSource +metadata: + name: centos-source +spec: + source: + registry: + url: "docker://quay.io/kubevirt/centos8-container-disk-images" diff --git a/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/vm.yaml b/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/vm.yaml new file mode 100644 index 00000000000..66df5c21e67 --- /dev/null +++ b/ocs_ci/templates/cnv-vm-workload/vm-standalone-pvc/vm.yaml @@ -0,0 +1,61 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: centos-stream8 + namespace: default +spec: + running: true + template: + metadata: + annotations: + vm.kubevirt.io/flavor: small + vm.kubevirt.io/os: centos-stream8 + vm.kubevirt.io/workload: server + creationTimestamp: null + labels: + kubevirt.io/domain: centos-stream8 + kubevirt.io/size: small + spec: + architecture: amd64 + domain: + cpu: + cores: 1 + sockets: 1 + threads: 1 + devices: + disks: + - bootOrder: 1 + disk: + bus: virtio + name: rootdisk + - bootOrder: 2 + disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + masquerade: {} + model: virtio + networkInterfaceMultiqueue: true + rng: {} + machine: + type: pc-q35-rhel9.2.0 + memory: + guest: 2Gi + resources: {} + evictionStrategy: LiveMigrate + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 180 + volumes: + - name: rootdisk + persistentVolumeClaim: + claimName: sample-vm-pvc + - cloudInitNoCloud: + userData: |- + #cloud-config + user: centos + password: 7lua-f8o8-nk3t + chpasswd: { expire: False } + name: cloudinitdisk diff --git a/tests/functional/workloads/cnv/test_vm_lifecycle_and_io.py b/tests/functional/workloads/cnv/test_vm_lifecycle_and_io.py new file mode 100644 index 00000000000..c77e67b15aa --- /dev/null +++ b/tests/functional/workloads/cnv/test_vm_lifecycle_and_io.py @@ -0,0 +1,68 @@ +import logging +import pytest + +from ocs_ci.framework.pytest_customization.marks import magenta_squad +from ocs_ci.framework.testlib import E2ETest, workloads +from ocs_ci.helpers.cnv_helpers import ( + create_vm_using_standalone_pvc, + get_pvc_from_vm, + get_secret_from_vm, + get_volumeimportsource, +) +from ocs_ci.helpers.helpers import create_project + +log = logging.getLogger(__name__) + + +@magenta_squad +@workloads +@pytest.mark.polarion_id("OCS-5241") +class TestVmOperations(E2ETest): + """ + Tests for VM operations + """ + + @pytest.fixture(autouse=True) + def teardown(self, request): + """ + teardown function + """ + + def finalizer(): + pvc_obj = get_pvc_from_vm(self.vm_obj) + secret_obj = get_secret_from_vm(self.vm_obj) + volumeimportsource_obj = get_volumeimportsource(pvc_obj=pvc_obj) + self.vm_obj.delete() + pvc_obj.delete() + secret_obj.delete() + volumeimportsource_obj.delete() + self.proj_obj.delete(resource_name=self.proj_obj.namespace) + + request.addfinalizer(finalizer) + + def test_vm_lifecycle_and_io(self): + """ + This test performs the VM lifecycle operations and IO + + Steps: + 1) Create a VM using a standalone PVC + a) Create a cdi source with a registry url pointing to the source image + b) Create a PVC using this source image backed with an odf storageclass + c) Create a secret using a statically manged public SSH key and add this secret name to the VM spec for ssh + d) Create a VM using the above PVC + 2) Start the VM using virtctl command and wait for the VM to reach running state + 3) SSH to the VM and create some data on the PVC mount point + 4) SCP that create data in step-3 to localmachine + 5) Stop the VM + 6) Delete the VM (as part of teardown) + + """ + self.proj_obj = create_project() + self.vm_obj = create_vm_using_standalone_pvc( + running=True, namespace=self.proj_obj.namespace + ) + self.vm_obj.run_ssh_cmd( + command="dd if=/dev/zero of=/dd_file.txt bs=1024 count=102400" + ) + self.vm_obj.scp_from_vm(local_path="/tmp", vm_src_path="/dd_file.txt") + self.vm_obj.stop() diff --git a/tests/functional/workloads/cnv/test_vm_single_disk_hot_plugging_unplugging.py b/tests/functional/workloads/cnv/test_vm_single_disk_hot_plugging_unplugging.py new file mode 100644 index 00000000000..89219da58ab --- /dev/null +++ b/tests/functional/workloads/cnv/test_vm_single_disk_hot_plugging_unplugging.py @@ -0,0 +1,63 @@ +import logging +import pytest + +from ocs_ci.framework.pytest_customization.marks import magenta_squad +from ocs_ci.framework.testlib import E2ETest, workloads +from ocs_ci.helpers.cnv_helpers import ( + create_vm_using_standalone_pvc, + get_pvc_from_vm, + get_secret_from_vm, + get_volumeimportsource, +) +from ocs_ci.helpers.helpers import create_pvc, create_project +from ocs_ci.ocs import constants + +log = logging.getLogger(__name__) + + +@magenta_squad +@workloads +@pytest.mark.polarion_id("OCS-5243") +class TestVmOperations(E2ETest): + """ + Tests for VM operations + """ + + @pytest.fixture(autouse=True) + def teardown(self, request): + """ + teardown function + """ + + def finalizer(): + pvc_obj = get_pvc_from_vm(self.vm_obj) + secret_obj = get_secret_from_vm(self.vm_obj) + volumeimportsource_obj = get_volumeimportsource(pvc_obj=pvc_obj) + self.vm_obj.delete() + pvc_obj.delete() + secret_obj.delete() + volumeimportsource_obj.delete() + self.proj_obj.delete(resource_name=self.proj_obj.namespace) + + request.addfinalizer(finalizer) + + def test_vm_single_disk_hot_plugging_unplugging(self): + """ + Test for a single disk Hot Plugging and Hot Unplugging + """ + self.proj_obj = create_project() + self.vm_obj = create_vm_using_standalone_pvc( + running=True, namespace=self.proj_obj.namespace + ) + self.vm_obj.run_ssh_cmd( + command="dd if=/dev/zero of=/dd_file.txt bs=1024 count=102400" + ) + pvc_obj = create_pvc( + sc_name=constants.DEFAULT_CNV_CEPH_RBD_SC, + namespace=self.proj_obj.namespace, + size=20, + access_mode=constants.ACCESS_MODE_RWX, + volume_mode=constants.VOLUME_MODE_BLOCK, + ) + self.vm_obj.addvolme(volume_name=pvc_obj.name) + self.vm_obj.removevolume(volume_name=pvc_obj.name)