Skip to content

Commit

Permalink
Create new test for ui 'test_ui_storage_size_post_resize_osd', move t…
Browse files Browse the repository at this point in the history
…he 'create_pvcs_and_pods' from the 'setup' method to 'prepare_data_before_resize_osd' method

Signed-off-by: Itzhak Kave <[email protected]>
  • Loading branch information
Itzhak Kave committed Jun 27, 2024
1 parent 18d66d2 commit 67e67d3
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 9 deletions.
48 changes: 47 additions & 1 deletion ocs_ci/helpers/osd_resize.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,13 @@
resize_osd,
)
from ocs_ci.ocs.cluster import check_ceph_osd_tree, CephCluster
from ocs_ci.utility.utils import ceph_health_check, TimeoutSampler, convert_device_size
from ocs_ci.ocs.ui.page_objects.page_navigator import PageNavigator
from ocs_ci.utility.utils import (
ceph_health_check,
TimeoutSampler,
convert_device_size,
human_to_bytes_ui,
)
from ocs_ci.ocs import constants
from ocs_ci.ocs.ocp import OCP
from ocs_ci.framework import config
Expand Down Expand Up @@ -411,3 +417,43 @@ def basic_resize_osd(old_storage_size):
logger.info(f"Increase the osd size to {new_storage_size}")
resize_osd(new_storage_size)
return new_storage_size


def check_storage_size_is_reflected_in_ui():
"""
Check that the current total storage size is reflected in the
UI 'ocs-storagecluster-storagesystem' page.
"""
block_and_file = (
PageNavigator()
.nav_odf_default_page()
.nav_storage_systems_tab()
.nav_storagecluster_storagesystem_details()
.nav_block_and_file()
)
used, available = block_and_file.get_raw_capacity_card_values()
# Get the used, available and total size in bytes
used_size_bytes = human_to_bytes_ui(used)
available_size_bytes = human_to_bytes_ui(available)
total_size_bytes = used_size_bytes + available_size_bytes

# Convert the used, available and total size to GB
bytes_to_gb = 1024**3
used_size_gb = used_size_bytes / bytes_to_gb
available_size_gb = available_size_bytes / bytes_to_gb
total_size_gb = round(total_size_bytes / bytes_to_gb)
logger.info(f"Used size = {used_size_gb}Gi")
logger.info(f"Available size = {available_size_gb}Gi")
logger.info(f"Total size = {total_size_gb}Gi")

ceph_cluster = CephCluster()
ceph_capacity = ceph_cluster.get_ceph_capacity()
expected_total_size_gb = ceph_capacity * len(get_osd_pods())
logger.info(f"expected total size = {expected_total_size_gb}Gi")

assert total_size_gb == expected_total_size_gb, (
f"The total size {total_size_gb}Gi is not equal to the "
f"expected total size {expected_total_size_gb}Gi"
)
logger.info("The total size is equal to the expected total size")
36 changes: 28 additions & 8 deletions tests/functional/z_cluster/cluster_expansion/test_resize_osd.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
skipif_managed_service,
skipif_hci_provider_and_client,
brown_squad,
black_squad,
)
from ocs_ci.framework.testlib import (
ignore_leftovers,
Expand All @@ -21,6 +22,7 @@
tier4b,
tier4c,
tier4a,
tier4,
)
from ocs_ci.ocs.constants import VOLUME_MODE_BLOCK, OSD, ROOK_OPERATOR, MON_DAEMON
from ocs_ci.helpers.osd_resize import (
Expand All @@ -29,6 +31,7 @@
check_resize_osd_pre_conditions,
update_resize_osd_count,
basic_resize_osd,
check_storage_size_is_reflected_in_ui,
)
from ocs_ci.ocs.resources.pod import (
get_osd_pods,
Expand Down Expand Up @@ -101,14 +104,6 @@ def setup(self, request, create_pvcs_and_pods):

self.pod_file_name = "fio_test"
self.sanity_helpers = Sanity()
pvc_size = random.randint(3, 7)
self.pvcs1, self.pods_for_integrity_check = create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=6, num_of_cephfs_pvc=6
)
pvc_size = random.randint(3, 8)
self.pvcs2, self.pods_for_run_io = create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=5, num_of_cephfs_pvc=5
)

@pytest.fixture(autouse=True)
def teardown(self, request):
Expand Down Expand Up @@ -154,6 +149,14 @@ def prepare_data_before_resize_osd(self):
Prepare the data before resizing the osd
"""
pvc_size = random.randint(3, 7)
self.pvcs1, self.pods_for_integrity_check = self.create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=6, num_of_cephfs_pvc=6
)
pvc_size = random.randint(3, 8)
self.pvcs2, self.pods_for_run_io = self.create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=5, num_of_cephfs_pvc=5
)
logger.info("Run IO on the pods for integrity check")
self.run_io_on_pods(self.pods_for_integrity_check)
logger.info("Calculate the md5sum of the pods for integrity check")
Expand Down Expand Up @@ -296,3 +299,20 @@ def test_resize_osd_for_large_diff(self, size_to_increase):
self.prepare_data_before_resize_osd()
resize_osd(self.new_storage_size)
self.verification_steps_post_resize_osd()

@tier1
@tier4
@black_squad
@pytest.mark.last
@polarion_id("OCS-5800")
def test_ui_storage_size_post_resize_osd(self, setup_ui_session):
"""
Test the new total storage size is reflected in the UI post resize osd
"""
if config.RUN["resize_osd_count"] < 1:
pytest.skip(
"No resize osd has been performed in the current test run. "
"The test should run only post resize osd"
)
check_storage_size_is_reflected_in_ui()

0 comments on commit 67e67d3

Please sign in to comment.