Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test the new total storage size is reflected in the UI post resize osd #9975

Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 55 additions & 1 deletion ocs_ci/helpers/osd_resize.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,13 @@
resize_osd,
)
from ocs_ci.ocs.cluster import check_ceph_osd_tree, CephCluster
from ocs_ci.utility.utils import ceph_health_check, TimeoutSampler, convert_device_size
from ocs_ci.ocs.ui.page_objects.page_navigator import PageNavigator
from ocs_ci.utility.utils import (
ceph_health_check,
TimeoutSampler,
convert_device_size,
human_to_bytes_ui,
)
from ocs_ci.ocs import constants
from ocs_ci.ocs.ocp import OCP
from ocs_ci.framework import config
Expand Down Expand Up @@ -411,3 +417,51 @@ def basic_resize_osd(old_storage_size):
logger.info(f"Increase the osd size to {new_storage_size}")
resize_osd(new_storage_size)
return new_storage_size


def check_storage_size_is_reflected_in_ui():
"""
Check that the current total storage size is reflected in the
UI 'ocs-storagecluster-storagesystem' page.

"""
block_and_file = (
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice!

PageNavigator()
.nav_odf_default_page()
.nav_storage_systems_tab()
.nav_storagecluster_storagesystem_details()
.nav_block_and_file()
)
used, available = block_and_file.get_raw_capacity_card_values()
# Get the used, available and total size in bytes
used_size_bytes = human_to_bytes_ui(used)
available_size_bytes = human_to_bytes_ui(available)
total_size_bytes = used_size_bytes + available_size_bytes

# Convert the used, available and total size to GB
bytes_to_gb = 1024**3
used_size_gb = used_size_bytes / bytes_to_gb
available_size_gb = available_size_bytes / bytes_to_gb
total_size_gb = round(total_size_bytes / bytes_to_gb)
logger.info(f"Used size = {used_size_gb}Gi")
logger.info(f"Available size = {available_size_gb}Gi")
logger.info(f"Total size = {total_size_gb}Gi")

ceph_cluster = CephCluster()
ceph_capacity = int(ceph_cluster.get_ceph_capacity())
ceph_total_size = ceph_capacity * len(get_osd_pods())

# There could be a small gap between the total size in the UI and the actual Ceph total size.
# So, instead of checking the accurate size, we check that the total size is within the expected range.
max_gap = 6 if ceph_total_size < 1500 else 12
expected_total_size_range_gb = range(
ceph_total_size - max_gap, ceph_total_size + max_gap
)
logger.info(
f"Check that the total UI size {total_size_gb}Gi is in the "
f"expected total size range {expected_total_size_range_gb}Gi"
)
assert total_size_gb in expected_total_size_range_gb, (
f"The total UI size {total_size_gb}Gi is not in the "
f"expected total size range {expected_total_size_range_gb}Gi"
)
36 changes: 28 additions & 8 deletions tests/functional/z_cluster/cluster_expansion/test_resize_osd.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
skipif_managed_service,
skipif_hci_provider_and_client,
brown_squad,
black_squad,
)
from ocs_ci.framework.testlib import (
ignore_leftovers,
Expand All @@ -21,6 +22,7 @@
tier4b,
tier4c,
tier4a,
tier4,
)
from ocs_ci.ocs.constants import VOLUME_MODE_BLOCK, OSD, ROOK_OPERATOR, MON_DAEMON
from ocs_ci.helpers.osd_resize import (
Expand All @@ -29,6 +31,7 @@
check_resize_osd_pre_conditions,
update_resize_osd_count,
basic_resize_osd,
check_storage_size_is_reflected_in_ui,
)
from ocs_ci.ocs.resources.pod import (
get_osd_pods,
Expand Down Expand Up @@ -101,14 +104,6 @@ def setup(self, request, create_pvcs_and_pods):

self.pod_file_name = "fio_test"
self.sanity_helpers = Sanity()
pvc_size = random.randint(3, 7)
self.pvcs1, self.pods_for_integrity_check = create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=6, num_of_cephfs_pvc=6
)
pvc_size = random.randint(3, 8)
self.pvcs2, self.pods_for_run_io = create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=5, num_of_cephfs_pvc=5
)

@pytest.fixture(autouse=True)
def teardown(self, request):
Expand Down Expand Up @@ -154,6 +149,14 @@ def prepare_data_before_resize_osd(self):
Prepare the data before resizing the osd

"""
pvc_size = random.randint(3, 7)
self.pvcs1, self.pods_for_integrity_check = self.create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=6, num_of_cephfs_pvc=6
)
pvc_size = random.randint(3, 8)
self.pvcs2, self.pods_for_run_io = self.create_pvcs_and_pods(
pvc_size=pvc_size, num_of_rbd_pvc=5, num_of_cephfs_pvc=5
)
logger.info("Run IO on the pods for integrity check")
self.run_io_on_pods(self.pods_for_integrity_check)
logger.info("Calculate the md5sum of the pods for integrity check")
Expand Down Expand Up @@ -296,3 +299,20 @@ def test_resize_osd_for_large_diff(self, size_to_increase):
self.prepare_data_before_resize_osd()
resize_osd(self.new_storage_size)
self.verification_steps_post_resize_osd()

@tier1
@tier4
PrasadDesala marked this conversation as resolved.
Show resolved Hide resolved
@black_squad
@pytest.mark.last
@polarion_id("OCS-5800")
def test_ui_storage_size_post_resize_osd(self, setup_ui_session):
"""
Test the new total storage size is reflected in the UI post resize osd

"""
if config.RUN["resize_osd_count"] < 1:
pytest.skip(
"No resize osd has been performed in the current test run. "
"The test should run only post resize osd"
)
check_storage_size_is_reflected_in_ui()
PrasadDesala marked this conversation as resolved.
Show resolved Hide resolved
Loading