From 22ed0f049f2b015da8ae24e4f84507400d62e359 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Fri, 20 Oct 2023 11:21:44 +0300 Subject: [PATCH] cherry-pick test_reclaim_space_cronjob_with_annotation 4.12 (#8337) * test_reclaim_chronjob_by_label__init Signed-off-by: Daniel Osypenko --- ocs_ci/ocs/cluster.py | 22 +++ .../test_rbd_reclaimspace_cronjob.py | 153 +++++++++++++++++- 2 files changed, 173 insertions(+), 2 deletions(-) diff --git a/ocs_ci/ocs/cluster.py b/ocs_ci/ocs/cluster.py index 29857a6df00..df19b6ce5fd 100644 --- a/ocs_ci/ocs/cluster.py +++ b/ocs_ci/ocs/cluster.py @@ -641,6 +641,28 @@ def get_ceph_capacity(self): # so, return 0 as usable capacity. return 0 + def get_ceph_free_capacity(self): + """ + Function to calculate the free capacity of a cluster + + Returns: + float: The free capacity of a cluster (in GB) + + """ + replica = int(self.get_ceph_default_replica()) + if replica > 0: + logger.info(f"Number of replica : {replica}") + ct_pod = pod.get_ceph_tools_pod() + output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph df") + total_avail = output.get("stats").get("total_bytes") + total_used = output.get("stats").get("total_used_raw_bytes") + total_free = total_avail - total_used + return total_free / replica / constants.BYTES_IN_GB + else: + # if the replica number is 0, usable capacity can not be calculate + # so, return 0 as usable capacity. + return 0 + def get_ceph_cluster_iops(self): """ The function gets the IOPS from the ocs cluster diff --git a/tests/manage/pv_services/space_reclaim/test_rbd_reclaimspace_cronjob.py b/tests/manage/pv_services/space_reclaim/test_rbd_reclaimspace_cronjob.py index 3ac023803a3..98991256bb5 100644 --- a/tests/manage/pv_services/space_reclaim/test_rbd_reclaimspace_cronjob.py +++ b/tests/manage/pv_services/space_reclaim/test_rbd_reclaimspace_cronjob.py @@ -1,21 +1,27 @@ import logging +import random import pytest +from ocs_ci.helpers import helpers from ocs_ci.ocs import constants from ocs_ci.framework.testlib import ( skipif_ocs_version, ManageTest, tier1, polarion_id, + skipif_external_mode, + bugzilla, ) +from ocs_ci.ocs.cluster import CephCluster from ocs_ci.ocs.exceptions import ( CommandFailed, TimeoutExpiredError, UnexpectedBehaviour, ) +from ocs_ci.ocs.ocp import OCP from ocs_ci.ocs.resources.pod import get_file_path, check_file_existence -from ocs_ci.helpers.helpers import fetch_used_size -from ocs_ci.utility.utils import TimeoutSampler +from ocs_ci.helpers.helpers import fetch_used_size, create_unique_resource_name +from ocs_ci.utility.utils import TimeoutSampler, exec_cmd log = logging.getLogger(__name__) @@ -137,3 +143,146 @@ def test_rbd_space_reclaim_cronjob(self, pause_and_resume_cluster_load): log.info(check_file_existence(pod_obj=pod_obj, file_path=file_path)) if check_file_existence(pod_obj=pod_obj, file_path=file_path): log.info(f"{fio_filename4} is intact") + + @tier1 + @bugzilla("2046677") + @skipif_external_mode + @pytest.mark.parametrize( + argnames=["replica", "compression", "volume_binding_mode", "pvc_status"], + argvalues=[ + pytest.param( + *[ + 3, + "aggressive", + constants.IMMEDIATE_VOLUMEBINDINGMODE, + constants.STATUS_BOUND, + ], + marks=pytest.mark.polarion_id("OCS-4587"), + ), + pytest.param( + *[ + 2, + "none", + constants.IMMEDIATE_VOLUMEBINDINGMODE, + constants.STATUS_BOUND, + ], + marks=pytest.mark.polarion_id("OCS-4587"), + ), + ], + ) + def test_reclaim_space_cronjob_with_annotation( + self, + replica, + compression, + volume_binding_mode, + pvc_status, + project_factory, + storageclass_factory_class, + pvc_factory, + ): + """ + Test case to check that reclaim space job is created for rbd pvc with reclaim space annotation + + Steps: + 1. Create a project + 2. Create a storage class with reclaim policy as delete + 3. Create a pvc with above storage class + 4. Run IO on the pod + 5. Add reclaim space annotation to the pvc + 6. Validate the reclaim space cronjob + """ + + # get random size for pvc + ceph_cluster = CephCluster() + pvc_size = random.randint(1, int(ceph_cluster.get_ceph_free_capacity())) + + # get random schedule + schedule = ["hourly", "midnight", "weekly"] + schedule = random.choice(schedule) + + self.namespace = create_unique_resource_name( + "reclaim-space-cronjob", "namespace" + ) + project_obj = project_factory(project_name=self.namespace) + + interface_type = constants.CEPHBLOCKPOOL + sc_obj = storageclass_factory_class( + interface=interface_type, + new_rbd_pool=True, + replica=replica, + compression=compression, + volume_binding_mode=volume_binding_mode, + pool_name="test-pool-cronjob", + ) + + pvc_obj = pvc_factory( + interface=constants.CEPHBLOCKPOOL, + project=project_obj, + storageclass=sc_obj, + size=pvc_size, + access_mode=constants.ACCESS_MODE_RWO, + status=pvc_status, + volume_mode=constants.VOLUME_MODE_BLOCK, + ) + + helpers.wait_for_resource_state(pvc_obj, pvc_status) + + log.info("add reclaimspace.csiaddons.openshift.io/schedule label to PVC ") + OCP(kind=constants.PVC, namespace=self.namespace).annotate( + f"reclaimspace.csiaddons.openshift.io/schedule=@{schedule}", pvc_obj.name + ) + + pvc_to_chron_job_dict = self.wait_for_cronjobs(True, 60) + assert pvc_to_chron_job_dict, "Reclaim space cron job does not exist" + + chron_job_name = ( + pvc_obj.get() + .get("metadata") + .get("annotations") + .get("reclaimspace.csiaddons.openshift.io/cronjob") + ) + chron_job_schedule = ( + pvc_obj.get() + .get("metadata") + .get("annotations") + .get("reclaimspace.csiaddons.openshift.io/schedule") + ) + + assert ( + pvc_to_chron_job_dict[chron_job_name] == chron_job_schedule + ), "Reclaim space cron job does not exist, or schedule is not correct" + + def wait_for_cronjobs(self, cronjobs_exist, timeout=60): + """ + Runs 'oc get reclaimspacecronjob' with the TimeoutSampler + + Args: + cronjobs_exist (bool): Condition to be tested, True if cronjobs should exist, False otherwise + timeout (int): Timeout + Returns: + + pvc_to_chron_job_dict (dict): Dictionary with PVC name as key and cronjob schedule as value, + None if no cronjobs exist + + """ + name_json_path = '{.items[*].metadata.name}{"\t"}{.items[*].spec.schedule}' + try: + for sample in TimeoutSampler( + timeout=timeout, + sleep=5, + func=exec_cmd, + cmd=f"oc get reclaimspacecronjob -n {self.namespace} -o jsonpath='{name_json_path}'", + ): + if len(sample.stderr) > 0: + return None + if (len(sample.stdout) > 0 and cronjobs_exist) or ( + len(sample.stdout) == 0 and not cronjobs_exist + ): + pvc_to_schedule_list = sample.stdout.decode().split() + mid_index = len(pvc_to_schedule_list) // 2 + chronjob_schedules = pvc_to_schedule_list[mid_index:] + chronjob_names = pvc_to_schedule_list[:mid_index] + pvc_to_schedule_dict = dict(zip(chronjob_names, chronjob_schedules)) + return pvc_to_schedule_dict + except TimeoutExpiredError: + return None