Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

IPI disk removal when cluster is destroyed #10916

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion ocs_ci/cleanup/vsphere/cleanup.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,14 @@ def delete_ipi_nodes(vsphere, cluster_name):

vms_ipi = []
for vm in vms_dc:
if cluster_name in vm.name:
if cluster_name in vm.name and "generated-zone" not in vm.name:
vms_ipi.append(vm)
logger.info(vm.name)
if vms_ipi:
vsphere.poweroff_vms(vms_ipi)
for vm in vms_ipi:
logger.info(f"removing disk for vm {vm.name}")
vsphere.remove_disks_with_main_disk(vm)
vsphere.destroy_vms(vms_ipi)


Expand Down
9 changes: 9 additions & 0 deletions ocs_ci/deployment/vmware.py
Original file line number Diff line number Diff line change
Expand Up @@ -1612,6 +1612,15 @@ def destroy_cluster(self, log_level="DEBUG"):
template_folder = get_infra_id(self.cluster_path)
else:
logger.warning("metadata.json file doesn't exist.")
vsphere = VSPHERE(
config.ENV_DATA["vsphere_server"],
config.ENV_DATA["vsphere_user"],
config.ENV_DATA["vsphere_password"],
)
all_vms = vsphere.get_vms_by_string(config.ENV_DATA["cluster_name"])
vsphere.stop_vms(all_vms)
for vm in all_vms:
vsphere.remove_disks_with_main_disk(vm)

try:
run_cmd(
Expand Down
54 changes: 54 additions & 0 deletions ocs_ci/utility/vsphere.py
Original file line number Diff line number Diff line change
Expand Up @@ -819,6 +819,36 @@ def get_used_unit_number(self, vm):
if hasattr(device.backing, "fileName") and device.unitNumber != 0
]

def remove_disks_with_main_disk(self, vm):
"""
Removes all disks for a VM

Args:
vm (vim.VirtualMachine): VM instance

"""
extra_disk_unit_numbers = self.get_used_unit_number_with_all_unit_number(vm)
if extra_disk_unit_numbers:
for each_disk_unit_number in extra_disk_unit_numbers:
self.remove_disk(vm=vm, identifier=each_disk_unit_number)

def get_used_unit_number_with_all_unit_number(self, vm):
"""
Gets the used unit numbers including main disk for a VM

Args:
vm (vim.VirtualMachine): VM instance

Returns:
list: list of unit numbers

"""
return [
device.unitNumber
for device in vm.config.hardware.device
if hasattr(device.backing, "fileName")
]

def check_folder_exists(self, name, cluster, dc):
"""
Checks whether folder exists in Templates
Expand Down Expand Up @@ -1801,3 +1831,27 @@ def add_interface_to_compute_vms(
f"Task for configuring network for {vm.name} did not complete successfully."
)
logger.info(f"Network adapter added to {vm.name} successfully.")

def get_vms_by_string(self, str_to_match):
"""
Gets the VM's with search string

Args:
str_to_match (str): String to match VM's

Returns:
list: VM instance

"""

content = self.get_content
container = content.rootFolder
view_type = [vim.VirtualMachine]
recursive = True

container_view = content.viewManager.CreateContainerView(
container, view_type, recursive
)
vms = [vm for vm in container_view.view if str_to_match in vm.name]
container_view.Destroy()
return vms
Loading