Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test to automate validation of performance profiles and change the performance profiles post deployment #9490

Merged
merged 8 commits into from
Jun 6, 2024
99 changes: 99 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -1056,6 +1056,105 @@
"https://api.openshift.com/api/upgrades_info/v1/graph?channel={channel}"
)

# Performance profile related constants
PERFORMANCE_PROFILE_LEAN = "lean"
PERFORMANCE_PROFILE_BALANCED = "balanced"
PERFORMANCE_PROFILE_PERFORMANCE = "performance"

LEAN_PROFILE_REQUEST_CPU_VALUES = {
"mgr": "500m",
"mon": "500m",
"osd": "1500m",
"mds": "1",
"rgw": "1",
}

LEAN_PROFILE_REQUEST_MEMORY_VALUES = {
"mgr": "1Gi",
"mon": "1Gi",
"osd": "3Gi",
"mds": "2Gi",
"rgw": "1Gi",
}

BALANCED_PROFILE_REQUEST_CPU_VALUES = {
"mgr": "1",
"mon": "1",
"osd": "2",
"mds": "2",
"rgw": "2",
}

BALANCED_PROFILE_REQUEST_MEMORY_VALUES = {
"mgr": "1536Mi",
"mon": "2Gi",
"osd": "5Gi",
"mds": "6Gi",
"rgw": "2Gi",
}

PERFORMANCE_PROFILE_REQUEST_CPU_VALUES = {
"mgr": "1500m'",
"mon": "1500",
"osd": "4",
"mds": "3",
"rgw": "2",
}
PERFORMANCE_PROFILE_REQUEST_MEMORY_VALUES = {
"mgr": "2Gi",
"mon": "2Gi",
"osd": "8Gi",
"mds": "8Gi",
"rgw": "4Gi",
}

LEAN_PROFILE_CPU_LIMIT_VALUES = {
"mgr": "1",
"mon": "500m",
"osd": "1500m",
"mds": "1",
"rgw": "1",
}

LEAN_PROFILE_MEMORY_LIMIT_VALUES = {
"mgr": "2Gi",
"mon": "1Gi",
"osd": "3Gi",
"mds": "2Gi",
"rgw": "1Gi",
}

BALANCED_PROFILE_CPU_LIMIT_VALUES = {
"mgr": "2",
"mon": "1",
"osd": "2",
"mds": "2",
"rgw": "2",
}

BALANCED_PROFILE_MEMORY_LIMIT_VALUES = {
"mgr": "3Gi",
"mon": "2Gi",
"osd": "5Gi",
"mds": "6Gi",
"rgw": "2Gi",
}

PERFORMANCE_PROFILE_CPU_LIMIT_VALUES = {
"mgr": "3",
"mon": "1500m",
"osd": "4",
"mds": "3",
"rgw": "2",
}
PERFORMANCE_PROFILE_MEMORY_LIMIT_VALUES = {
"mgr": "4Gi",
"mon": "2Gi",
"osd": "8Gi",
"mds": "8Gi",
"rgw": "4Gi",
}

# Podsecurity admission policies
PSA_PRIVILEGED = "privileged"
PSA_BASELINE = "baseline"
Expand Down
5 changes: 5 additions & 0 deletions ocs_ci/ocs/resources/storage_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -920,6 +920,11 @@ def verify_storage_cluster():
log.info(f"Check if StorageCluster: {storage_cluster_name} is in Succeeded phase")
if config.ENV_DATA.get("platform") == constants.FUSIONAAS_PLATFORM:
timeout = 1000
elif (
storage_cluster.data["spec"]["resourceProfile"]
!= storage_cluster.data["status"]["lastAppliedResourceProfile"]
):
timeout = 1200
else:
timeout = 600
storage_cluster.wait_for_phase(phase="Ready", timeout=timeout)
Expand Down
159 changes: 159 additions & 0 deletions tests/functional/z_cluster/test_performance_profile_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
import logging
import pytest

from ocs_ci.framework.pytest_customization.marks import (
tier4a,
skipif_ocs_version,
brown_squad,
)
from ocs_ci.framework.testlib import (
ManageTest,
skipif_external_mode,
skipif_managed_service,
)
from ocs_ci.framework import config
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.storage_cluster import verify_storage_cluster
from ocs_ci.ocs import constants
from ocs_ci.ocs.resources.pod import (
get_pods_having_label,
Pod,
)
from ocs_ci.utility.utils import run_cmd
from ocs_ci.ocs.resources.storage_cluster import StorageCluster


log = logging.getLogger(__name__)


@brown_squad
@tier4a
@skipif_external_mode
@skipif_managed_service
@skipif_ocs_version("<4.15")
@pytest.mark.polarion_id("XXXX")
pintojoy marked this conversation as resolved.
Show resolved Hide resolved
class TestProfileDefaultValuesCheck(ManageTest):
def test_validate_cluster_resource_profile(self):
"""
Testcase to validate osd, mgr, mon, mds and rgw pod memory and cpu values
are matching with the predefined set of values post profile updation

"""
pv_pod_obj = []
log.info("Obtaining the performance profile values from the cluster")
storage_cluster_name = config.ENV_DATA["storage_cluster_name"]
storage_cluster = StorageCluster(
resource_name=storage_cluster_name,
namespace=config.ENV_DATA["cluster_namespace"],
)
performance_profile = storage_cluster.data["spec"]["resourceProfile"]
if performance_profile == constants.PERFORMANCE_PROFILE_LEAN:
expected_cpu_request_values = constants.LEAN_PROFILE_REQUEST_CPU_VALUES
expected_memory_request_values = (
constants.LEAN_PROFILE_REQUEST_MEMORY_VALUES
)
expected_cpu_limit_values = constants.LEAN_PROFILE_CPU_LIMIT_VALUES
expected_memory_limit_values = constants.LEAN_PROFILE_MEMORY_LIMIT_VALUES
elif performance_profile == constants.PERFORMANCE_PROFILE_BALANCED:
expected_cpu_request_values = constants.BALANCED_PROFILE_REQUEST_CPU_VALUES
expected_memory_request_values = (
constants.BALANCED_PROFILE_REQUEST_MEMORY_VALUES
)
expected_cpu_limit_values = constants.BALANCED_PROFILE_CPU_LIMIT_VALUES
expected_memory_limit_values = (
constants.BALANCED_PROFILE_MEMORY_LIMIT_VALUES
)
elif performance_profile == constants.PERFORMANCE_PROFILE_PERFORMANCE:
expected_cpu_request_values = (
constants.PERFORMANCE_PROFILE_REQUEST_CPU_VALUES
)
expected_memory_request_values = (
constants.PERFORMANCE_PROFILE_REQUESt_LIMIT_VALUES
)
expected_cpu_limit_values = constants.PERFORMANCE_PROFILE_CPU_LIMIT_VALUES
expected_memory_limit_values = (
constants.PERFORMANCE_PROFILE_MEMORY_LIMIT_VALUES
)
else:
log.error("Does not match any performance profiles")

log.info(performance_profile)

label_selector = list(expected_cpu_limit_values.keys())

for label in label_selector:
for pod in get_pods_having_label(
label=label, namespace=config.ENV_DATA["cluster_namespace"]
yitzhak12 marked this conversation as resolved.
Show resolved Hide resolved
):
pv_pod_obj.append(Pod(**pod))
podd = Pod(**pod)
log.info(podd.name)
resource_dict = OCP(
namespace=config.ENV_DATA["cluster_namespace"], kind="pod"
).get(resource_name=podd.name)["spec"]["containers"][0]["resources"]

assert (
resource_dict["limits"]["cpu"] == expected_cpu_limit_values[label]
and resource_dict["limits"]["memory"]
== expected_memory_limit_values[label]
and resource_dict["requests"]["cpu"]
== expected_cpu_request_values[label]
and resource_dict["requests"]["memory"]
== expected_memory_request_values[label]
), f"Resource values arent reflecting actual values for {label} pod "
log.info("All the memory and CPU values are matching in the profile")

@pytest.mark.parametrize(
argnames=["perf_profile"],
argvalues=[
pytest.param(*["performance"], marks=pytest.mark.polarion_id("OCS-XXXX")),
pytest.param(*["lean"], marks=pytest.mark.polarion_id("OCS-XXXX")),
pytest.param(*["balanced"], marks=pytest.mark.polarion_id("OCS-XXXX")),
],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add polarion ID for the tests

)
def test_change_cluster_resource_profile(self, perf_profile):
"""
Testcase to validate osd, mgr, mon, mds and rgw pod memory and cpu values
are matching with the predefined set of values post profile updation

"""
namespace = config.ENV_DATA["cluster_namespace"]
self.perf_profile = perf_profile
log.info("Obtaining the performance profile values from the cluster")
storage_cluster_name = config.ENV_DATA["storage_cluster_name"]
storage_cluster = StorageCluster(
resource_name=storage_cluster_name,
namespace=config.ENV_DATA["cluster_namespace"],
)
exist_performance_profile = storage_cluster.data["spec"]["resourceProfile"]
if exist_performance_profile == self.perf_profile:
log.info("Performance profile is same as profile that is already present")
else:
ptch = f'{{"spec": {{"resourceProfile":"{self.perf_profile}"}}}}'
ptch_cmd = (
f"oc patch storagecluster {storage_cluster.data.get('metadata').get('name')} "
f"-n {namespace} --type merge --patch '{ptch}'"
)
run_cmd(ptch_cmd)
log.info("Verify storage cluster is on Ready state")

verify_storage_cluster()

# Verify that storage cluster is updated with new profile

assert (
self.perf_profile == storage_cluster.data["spec"]["resourceProfile"]
), f"Performance profile is not updated successfully to {self.perf_profile}"
log.info(
f"Performance profile successfully got updated to {self.perf_profile} mode"
)
log.info("Reverting profile changes")
ptch = f'{{"spec": {{"resourceProfile":"{exist_performance_profile}"}}}}'

# Reverting the performance profile back to the original
ptch_cmd = (
f"oc patch storagecluster {storage_cluster.data.get('metadata').get('name')}"
f" -n {namespace} --type merge --patch '{ptch}'"
)
run_cmd(ptch_cmd)
verify_storage_cluster()
Loading