From 44fa2762a5cecf6a1982944f97f5b46cff503c36 Mon Sep 17 00:00:00 2001 From: Amrita Mahapatra <49347640+amr1ta@users.noreply.github.com> Date: Fri, 14 Jun 2024 19:51:46 +0530 Subject: [PATCH] Added testcases for improve storage allocation feature Signed-off-by: Amrita Mahapatra <49347640+amr1ta@users.noreply.github.com> --- .../storage_client_deployment.py | 11 +- ocs_ci/ocs/rados_utils.py | 70 ---- ocs_ci/ocs/resources/storage_client.py | 48 ++- ...n_and_distribution_of_storage_resources.py | 306 +++++++++++++++--- 4 files changed, 301 insertions(+), 134 deletions(-) diff --git a/ocs_ci/deployment/provider_client/storage_client_deployment.py b/ocs_ci/deployment/provider_client/storage_client_deployment.py index a6caa6e335eb..41dc84e2f14b 100644 --- a/ocs_ci/deployment/provider_client/storage_client_deployment.py +++ b/ocs_ci/deployment/provider_client/storage_client_deployment.py @@ -33,10 +33,6 @@ verify_block_pool_exists, ) from ocs_ci.ocs.exceptions import CommandFailed -from ocs_ci.helpers.managed_services import ( - verify_storageclient, - # verify_storageclient_storageclass_claims, -) log = logging.getLogger(__name__) @@ -228,12 +224,12 @@ def provider_and_native_client_installation( constants.DEFAULT_BLOCKPOOL ), f"{constants.DEFAULT_BLOCKPOOL} is not created" assert ( - self.rados_utils.verify_cephblockpool_status() + verify_cephblockpool_status() ), "the cephblockpool is not in Ready phase" # Validate radosnamespace created and in 'Ready' status assert ( - self.rados_utils.check_phase_of_rados_namespace() + check_phase_of_rados_namespace() ), "The radosnamespace is not in Ready phase" # Validate storageclassrequests created @@ -286,9 +282,6 @@ def verify_provider_mode_deployment(self): """ This method verifies provider mode deployment - Returns: - onboarding_token(str): client onboarding token - """ # Check ux server pod, ocs-provider server pod and rgw pods are up and running diff --git a/ocs_ci/ocs/rados_utils.py b/ocs_ci/ocs/rados_utils.py index b6c279b71ca8..cf3c8f8510c1 100644 --- a/ocs_ci/ocs/rados_utils.py +++ b/ocs_ci/ocs/rados_utils.py @@ -277,76 +277,6 @@ def get_mgr_proxy_container(self, node, docker_image, proxy_container="mgr_proxy return mgr_object - def verify_cephblockpool_status( - self, - pool_name=constants.DEFAULT_BLOCKPOOL, - namespace=None, - required_phase=constants.STATUS_READY, - ): - """ - Verify the phase of cephblockpool - - Args: - pool_name (str): The name of the Ceph block pool - - Returns: - status: True if the Ceph block pool is in Ready status, False otherwise - """ - if not namespace: - namespace = config.ENV_DATA["cluster_namespace"] - cmd = ( - f"oc get {constants.CEPHBLOCKPOOL} {pool_name} -n {namespace} " - "-o=jsonpath='{.status.phase}'" - ) - # phase = run_cmd(cmd=cmd) - - phase = retry((CommandFailed), tries=20, delay=10,)( - run_cmd - )(cmd=cmd) - - logger.info(f"{pool_name} is in {phase} phase") - logger.info(f"Required phase is {required_phase}") - if phase == required_phase: - return True - else: - return False - - def fetch_rados_namespaces(self, namespace=None): - """ - Verify if rados namespace exists - - Returns: - bool: True if the radosnamespace exists, False otherwise - """ - logger.info("Fetch radosnamespaces exist") - if not namespace: - namespace = config.ENV_DATA["cluster_namespace"] - rados_ns_obj = ocp.OCP(kind=constants.CEPHBLOCKPOOLRADOSNS, namespace=namespace) - result = rados_ns_obj.get() - sample = result["items"] - rados_ns_list = [item.get("metadata").get("name") for item in sample] - return rados_ns_list - - def check_phase_of_rados_namespace( - self, namespace=None, required_phase=constants.STATUS_READY - ): - """ - Verify if rados namespace exists - - Returns: - bool: True if the radosnamespace exists, False otherwise - """ - logger.info("Verifying if radosnamespace is in desired phase") - if not namespace: - namespace = config.ENV_DATA["cluster_namespace"] - for rados_namespace in self.fetch_rados_namespaces(namespace=namespace): - check_radosns_phase_cmd = ( - f"oc get {constants.CEPHBLOCKPOOLRADOSNS} {rados_namespace} -n {namespace} " - "-o=jsonpath='{.status.phase}'" - ) - phase = run_cmd(cmd=check_radosns_phase_cmd) - return True if phase == required_phase else False - def verify_cephblockpool_status( pool_name=constants.DEFAULT_BLOCKPOOL, diff --git a/ocs_ci/ocs/resources/storage_client.py b/ocs_ci/ocs/resources/storage_client.py index 8d4bf6768657..30928481f367 100644 --- a/ocs_ci/ocs/resources/storage_client.py +++ b/ocs_ci/ocs/resources/storage_client.py @@ -124,10 +124,24 @@ def odf_installation_on_client( if enable_console: enable_console_plugin(value="[odf-client-console]") + def check_storageclient_availability(self, storage_client_name): + """ + Check if the storage client exists + + Returns: + bool: True if storage client exists, False otherwise + """ + return self.storage_client_obj.check_resource_existence( + timeout=120, + resource_name=storage_client_name, + should_exist=True, + ) + def create_storage_client( self, storage_provider_endpoint=None, onboarding_token=None, + native_client=False, ): """ This method creates storage clients @@ -135,18 +149,34 @@ def create_storage_client( Inputs: storage_provider_endpoint (str): storage provider endpoint details. onboarding_token (str): onboarding token + native_client (bool): flag to indicate if the storageclient is nativeclient """ - # Pull storage-client yaml data - log.info("Pulling storageclient CR data from yaml") - storage_client_data = templating.load_yaml(constants.STORAGE_CLIENT_YAML) - resource_name = storage_client_data["metadata"]["name"] - log.info(f"the resource name: {resource_name}") + if self.ocs_version < version.VERSION_4_16: + # Pull storage-client yaml data + log.info("Pulling storageclient CR data from yaml") + storage_client_data = templating.load_yaml( + constants.NATIVE_STORAGE_CLIENT_YAML + ) + storage_client_name = storage_client_data["metadata"]["name"] + log.info(f"the resource name: {storage_client_name}") + + else: + log.info("Pulling storageclient CR data from yaml") + storage_client_data = templating.load_yaml( + constants.PROVIDER_MODE_STORAGE_CLIENT + ) + if native_client: + storage_client_data["metadata"]["name"] = "ocs-storagecluster" + storage_client_name = storage_client_data["metadata"]["name"] + else: + storage_client_name = storage_client_data["metadata"]["name"] + log.info(f"the resource name: {storage_client_name}") # Check storageclient is available or not - is_available = self.storage_client_obj.is_exist( - resource_name=resource_name, + is_available = self.check_storageclient_availability( + storage_client_name=storage_client_name, ) if not is_available: @@ -467,10 +497,12 @@ def create_native_storage_client( onboarding_token = storage_clients.generate_client_onboarding_ticket() # Create ODF subscription for storage-client - self.odf_installation_on_client() + if self.ocs_version < version.VERSION_4_16: + self.odf_installation_on_client() self.create_storage_client( storage_provider_endpoint=storage_provider_endpoint, onboarding_token=onboarding_token, + native_client=True, ) if self.ocs_version < version.VERSION_4_16: diff --git a/tests/functional/provider_client/test_improve_allocation_and_distribution_of_storage_resources.py b/tests/functional/provider_client/test_improve_allocation_and_distribution_of_storage_resources.py index 08d72e0522a5..db6f0f2ac705 100644 --- a/tests/functional/provider_client/test_improve_allocation_and_distribution_of_storage_resources.py +++ b/tests/functional/provider_client/test_improve_allocation_and_distribution_of_storage_resources.py @@ -1,49 +1,24 @@ import pytest import logging -# import tempfile -# import time - - -# from ocs_ci.framework import config +from ocs_ci.framework import config from ocs_ci.ocs import constants - -# from ocs_ci.deployment.helpers.lso_helpers import setup_local_storage -# from ocs_ci.ocs.node import label_nodes, get_all_nodes, get_node_objs -# from ocs_ci.utility.retry import retry -# from ocs_ci.ocs.ui.validation_ui import ValidationUI -# from ocs_ci.ocs.ui.base_ui import login_ui, close_browser -# from ocs_ci.ocs.utils import ( -# setup_ceph_toolbox, -# enable_console_plugin, -# run_cmd, -# ) -# from ocs_ci.utility.utils import ( -# wait_for_machineconfigpool_status, -# ) -# from ocs_ci.utility import templating, version - -# from ocs_ci.deployment.deployment import Deployment, create_catalog_source -# from ocs_ci.deployment.baremetal import clean_disk -# from ocs_ci.ocs.resources.storage_cluster import ( -# verify_storage_cluster, -# check_storage_client_status, -# ) -# from ocs_ci.ocs.resources.catalog_source import CatalogSource -# from ocs_ci.ocs.bucket_utils import check_pv_backingstore_type -# from ocs_ci.ocs.resources import pod +from ocs_ci.ocs.resources.storage_client import StorageClient from ocs_ci.helpers.helpers import ( get_all_storageclass_names, verify_block_pool_exists, + create_storage_class, + get_cephfs_data_pool_name, + create_ceph_block_pool, +) +from ocs_ci.ocs.rados_utils import ( verify_cephblockpool_status, check_phase_of_rados_namespace, ) - -# from ocs_ci.ocs.exceptions import CommandFailed -from ocs_ci.helpers.managed_services import verify_storageclient from ocs_ci.framework.testlib import ( skipif_ocs_version, ManageTest, + green_squad, tier1, skipif_ocp_version, skipif_managed_service, @@ -55,6 +30,7 @@ @tier1 +@green_squad @skipif_ocs_version("<4.16") @skipif_ocp_version("<4.16") @skipif_external_mode @@ -66,27 +42,219 @@ def setup(self, request): """ Setup method for the class - 1. Create storageclient for the provider """ + self.storage_client = StorageClient() + self.storage_class_claims = [ + constants.CEPHBLOCKPOOL_SC, + constants.CEPHFILESYSTEM_SC, + ] + self.native_storageclient_name = "ocs-storagecluster" + + def test_storage_allocation_for_a_storageclient_for_storageclaims_creation( + self, + secret_factory, + ): + """ + This test is to verify that for storageclaims created for a storageclient new radosnamespaces gets created + using same or different storageprofiles. + + validate at Provider side: + 1. Verify that only one CephBlockPool remains + 2. Verify for each block type storageclaim creates a new radosnamespace corresponding to + the storageprofile check the radosnamespaces are in "Ready" status + 3. Verify storageclassrequests gets created for the storageclaim + 4. Verify storageclasses gets created + 5. Verify storgeclass creation works as expected. + 6. Verify the same behavior for storageclaims created with different storageprofiles + 7. Verify data is isolated between the consumers sharing the same blockpool + + """ + # Check if native storageclient available else create storageclient + if not self.storage_client.check_storageclient_availability( + storage_client_name=self.native_storageclient_name + ): + + self.storage_client.create_native_storage_client( + namespace_to_create_storage_client=config.ENV_DATA["cluster_namespace"] + ) + self.storage_client.verify_native_storageclient() + + # Validate cephblockpool created + assert verify_block_pool_exists( + constants.DEFAULT_BLOCKPOOL + ), f"{constants.DEFAULT_BLOCKPOOL} is not created" + assert verify_cephblockpool_status(), "the cephblockpool is not in Ready phase" + + # Validate radosnamespace created and in 'Ready' status + assert ( + check_phase_of_rados_namespace() + ), "The radosnamespace is not in Ready phase" + + # Validate storageclassrequests created + assert self.storage_client.verify_storagerequest_exists( + storageclient_name=self.native_storageclient_name + ), "Storageclass requests are unavailable" + + # Verify storageclasses gets created + storage_classes = get_all_storageclass_names() + for storage_class in self.storage_class_claims: + assert ( + storage_class in storage_classes + ), "Storage classes ae not created as expected" + + # Verify storgeclass creation works as expected + secret = secret_factory(interface=self.interface) + sc_obj = create_storage_class( + interface_type=constants.CEPHBLOCKPOOL, + interface_name=get_cephfs_data_pool_name(), + secret_name=secret.name, + ) + assert sc_obj, f"Failed to create {sc_obj.name} storage class" + log.info(f"Storage class: {sc_obj.name} created successfully") + + # Verify the radosnamespace is dispalying in ceph-csi-configs + + # Create a new blockpool + cbp_obj = create_ceph_block_pool() + assert cbp_obj, "Failed to create block pool" + + # Create storageclaim with the created blockpool value + self.storage_client.create_storageclaim( + storageclaim_name="claim-created-on-added-blockpool", + type="block", + storage_client_name=self.native_storageclient_name, + storageprofile=cbp_obj.name, + ) + + # Verify storageclaim created successfully + self.storage_client.verify_storage_claim_status( + storage_client_name=self.native_storageclient_name + ) + + # Validate a new radosnamespace created and in 'Ready' status + assert ( + check_phase_of_rados_namespace() + ), "The radosnamespace is not in Ready phase" + + # Validate storageclassrequests created + assert self.storage_client.verify_storagerequest_exists( + storageclient_name=self.native_storageclient_name + ), "Storageclass requests are unavailable" + + def test_storage_allocation_for_hcp_cluster_storageclient_for_storageclaims_creation( + self, + secret_factory, + ): + """ + This test is to verify that for storageclaims created for a storageclient new radosnamespaces gets created + using same or different storageprofiles. + + validate at Provider side: + 1. Verify that only one CephBlockPool remains + 2. Verify for each block type storageclaim creates a new radosnamespace corresponding to + the storageprofile check the radosnamespaces are in "Ready" status + 3. Verify storageclassrequests gets created for the storageclaim + 4. Verify storageclasses gets created + 5. Verify storgeclass creation works as expected. + 6. Verify the same behavior for storageclaims created with different storageprofiles + 7. Verify data is isolated between the consumers sharing the same blockpool + + """ + from tests.libtest.test_provider_create_hosted_cluster import TestProviderHosted + + test_hosted_client = TestProviderHosted() + test_hosted_client.test_deploy_OCP_and_setup_ODF_client_on_hosted_clusters() + test_hosted_client.test_storage_client_connected() + # Validate cephblockpool created + assert verify_block_pool_exists( + constants.DEFAULT_BLOCKPOOL + ), f"{constants.DEFAULT_BLOCKPOOL} is not created" + assert verify_cephblockpool_status(), "the cephblockpool is not in Ready phase" + + # Validate radosnamespace created and in 'Ready' status + assert ( + check_phase_of_rados_namespace() + ), "The radosnamespace is not in Ready phase" + + # Validate storageclassrequests created + assert self.storage_client.verify_storagerequest_exists( + storageclient_name=self.native_storageclient_name + ), "Storageclass requests are unavailable" + + # Verify storageclasses gets created + storage_classes = get_all_storageclass_names() + for storage_class in self.storage_class_claims: + assert ( + storage_class in storage_classes + ), "Storage classes ae not created as expected" + + # Verify storgeclass creation works as expected + secret = secret_factory(interface=self.interface) + sc_obj = create_storage_class( + interface_type=constants.CEPHBLOCKPOOL, + interface_name=get_cephfs_data_pool_name(), + secret_name=secret.name, + ) + assert sc_obj, f"Failed to create {sc_obj.name} storage class" + log.info(f"Storage class: {sc_obj.name} created successfully") + + # Verify the radosnamespace is dispalying in ceph-csi-configs - def test_storage_allocation_for_multiple_storageclients_using_same_storageprofile_for_storageclaims( + # Create a new blockpool + cbp_obj = create_ceph_block_pool() + assert cbp_obj, "Failed to create block pool" + + # Create storageclaim with the created blockpool value + self.storage_client.create_storageclaim( + storageclaim_name="claim-created-on-added-blockpool", + type="block", + storage_client_name=self.native_storageclient_name, + storageprofile=cbp_obj.name, + ) + + # Verify storageclaim created successfully + self.storage_client.verify_storage_claim_status( + storage_client_name=self.native_storageclient_name + ) + + # Validate a new radosnamespace created and in 'Ready' status + assert ( + check_phase_of_rados_namespace() + ), "The radosnamespace is not in Ready phase" + + # Validate storageclassrequests created + assert self.storage_client.verify_storagerequest_exists( + storageclient_name=self.native_storageclient_name + ), "Storageclass requests are unavailable" + + def test_accociated_radosnamespace_gets_deleted_after_deletion_of_storageclient( self, + secret_factory, ): """ - For multiple clients to the same provider, and for each client StorageClaim is - created using the same Storage profile name. - at Provider side: - 1. Verify storageclass creation works as expected - 2. Verify that only one CephBlockPool gets created - 3. Verify for each storageclaim creates a new radosnamespace corresponding to + This test is to verify that accociated radosnamespace will be deleted after deletion of a storageclient. + + validate at Provider side: + 1. Verify that only one CephBlockPool remains + 2. Verify for each block type storageclaim creates a new radosnamespace corresponding to the storageprofile check the radosnamespaces are in "Ready" status - Note: Storageclients are clusterscoped. - 4. Verify data is isolated between the consumers sharing the same blockpool + 3. Verify storageclassrequests gets created for the storageclaim + 4. Verify storageclasses gets created + 5. Verify storgeclass creation works as expected. + 6. Verify the same behavior for storageclaims created with different storageprofiles + 7. Verify data is isolated between the consumers sharing the same blockpool + """ + # Check if native storageclient available else create storageclient + if not self.storage_client.check_storageclient_availability( + storage_client_name=self.native_storageclient_name + ): - # Validate storageclaims are Ready and associated storageclasses are created - verify_storageclient() + self.storage_client.create_native_storage_client( + namespace_to_create_storage_client=config.ENV_DATA["cluster_namespace"] + ) + self.storage_client.verify_native_storageclient() # Validate cephblockpool created assert verify_block_pool_exists( @@ -100,8 +268,52 @@ def test_storage_allocation_for_multiple_storageclients_using_same_storageprofil ), "The radosnamespace is not in Ready phase" # Validate storageclassrequests created - storage_class_classes = get_all_storageclass_names() + assert self.storage_client.verify_storagerequest_exists( + storageclient_name=self.native_storageclient_name + ), "Storageclass requests are unavailable" + + # Verify storageclasses gets created + storage_classes = get_all_storageclass_names() for storage_class in self.storage_class_claims: assert ( - storage_class in storage_class_classes + storage_class in storage_classes ), "Storage classes ae not created as expected" + + # Verify storgeclass creation works as expected + secret = secret_factory(interface=self.interface) + sc_obj = create_storage_class( + interface_type=constants.CEPHBLOCKPOOL, + interface_name=get_cephfs_data_pool_name(), + secret_name=secret.name, + ) + assert sc_obj, f"Failed to create {sc_obj.name} storage class" + log.info(f"Storage class: {sc_obj.name} created successfully") + + # Verify the radosnamespace is dispalying in ceph-csi-configs + + # Create a new blockpool + cbp_obj = create_ceph_block_pool() + assert cbp_obj, "Failed to create block pool" + + # Create storageclaim with the created blockpool value + self.storage_client.create_storageclaim( + storageclaim_name="claim-created-on-added-blockpool", + type="block", + storage_client_name=self.native_storageclient_name, + storageprofile=cbp_obj.name, + ) + + # Verify storageclaim created successfully + self.storage_client.verify_storage_claim_status( + storage_client_name=self.native_storageclient_name + ) + + # Validate a new radosnamespace created and in 'Ready' status + assert ( + check_phase_of_rados_namespace() + ), "The radosnamespace is not in Ready phase" + + # Validate storageclassrequests created + assert self.storage_client.verify_storagerequest_exists( + storageclient_name=self.native_storageclient_name + ), "Storageclass requests are unavailable"