Skip to content

Commit

Permalink
4.14 System test: Test MCG object expiration with disruptions like no…
Browse files Browse the repository at this point in the history
…de drain, restart, nb db recovery (#8367)


Signed-off-by: Mahesh Shetty <[email protected]>
  • Loading branch information
mashetty330 authored Feb 26, 2024
1 parent 67e2ebe commit b9fa3ce
Show file tree
Hide file tree
Showing 8 changed files with 613 additions and 106 deletions.
2 changes: 2 additions & 0 deletions ocs_ci/helpers/e2e_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ def validate_mcg_bucket_replicaton(
download_dir=bidi_downloaded_objs_dir_1,
amount=object_amount,
pattern=f"FirstBiDi-{uuid4().hex}",
prefix="bidi_1",
wait_for_replication=True,
second_bucket_name=second_bucket.name,
mcg_obj=mcg_obj_session,
Expand All @@ -156,6 +157,7 @@ def validate_mcg_bucket_replicaton(
download_dir=bidi_downloaded_objs_dir_2,
amount=object_amount,
pattern=f"SecondBiDi-{uuid4().hex}",
prefix="bidi_2",
wait_for_replication=True,
second_bucket_name=first_bucket.name,
mcg_obj=mcg_obj_session,
Expand Down
76 changes: 69 additions & 7 deletions ocs_ci/ocs/bucket_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
exec_nb_db_query,
)
from ocs_ci.helpers.helpers import create_resource
from ocs_ci.utility import version

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -1859,6 +1860,7 @@ def write_random_test_objects_to_bucket(
file_dir,
amount=1,
pattern="ObjKey-",
prefix=None,
bs="1M",
mcg_obj=None,
s3_creds=None,
Expand All @@ -1884,6 +1886,8 @@ def write_random_test_objects_to_bucket(
# Verify that the needed directory exists
io_pod.exec_cmd_on_pod(f"mkdir -p {file_dir}")
full_object_path = f"s3://{bucket_to_write}"
if prefix:
full_object_path += f"/{prefix}/"
obj_lst = write_random_objects_in_pod(io_pod, file_dir, amount, pattern, bs)
sync_object_directory(
io_pod,
Expand Down Expand Up @@ -1945,7 +1949,10 @@ def write_random_test_objects_to_s3_path(
)


def patch_replication_policy_to_bucket(bucket_name, rule_id, destination_bucket_name):
def patch_replication_policy_to_bucket(
bucket_name, rule_id, destination_bucket_name, prefix=""
):

"""
Patches replication policy to a bucket
Expand All @@ -1954,9 +1961,25 @@ def patch_replication_policy_to_bucket(bucket_name, rule_id, destination_bucket_
rule_id (str): The ID of the replication rule
destination_bucket_name (str): The name of the replication destination bucket
"""
replication_policy = {
"rules": [{"rule_id": rule_id, "destination_bucket": destination_bucket_name}]
}

if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_12:
replication_policy = {
"rules": [
{
"rule_id": rule_id,
"destination_bucket": destination_bucket_name,
"filter": {"prefix": prefix},
}
]
}
else:
replication_policy = [
{
"rule_id": rule_id,
"destination_bucket": destination_bucket_name,
"filter": {"prefix": prefix},
}
]
replication_policy_patch_dict = {
"spec": {
"additionalConfig": {"replicationPolicy": json.dumps(replication_policy)}
Expand Down Expand Up @@ -2024,6 +2047,7 @@ def random_object_round_trip_verification(
download_dir,
amount=1,
pattern="RandomObject-",
prefix=None,
wait_for_replication=False,
second_bucket_name=None,
mcg_obj=None,
Expand Down Expand Up @@ -2070,17 +2094,20 @@ def random_object_round_trip_verification(
file_dir=upload_dir,
amount=amount,
pattern=pattern,
prefix=prefix,
mcg_obj=mcg_obj,
s3_creds=s3_creds,
)
written_objects = io_pod.exec_cmd_on_pod(f"ls -A1 {upload_dir}").split(" ")
if wait_for_replication:
compare_bucket_object_list(mcg_obj, bucket_name, second_bucket_name, **kwargs)
assert compare_bucket_object_list(
mcg_obj, bucket_name, second_bucket_name, **kwargs
), f"Objects in the buckets {bucket_name} and {second_bucket_name} are not same"
bucket_name = second_bucket_name
# Download the random objects that were uploaded to the bucket
sync_object_directory(
podobj=io_pod,
src=f"s3://{bucket_name}",
src=f"s3://{bucket_name}/{prefix}" if prefix else f"s3://{bucket_name}",
target=download_dir,
s3_obj=mcg_obj,
signed_request_creds=s3_creds,
Expand Down Expand Up @@ -2175,6 +2202,25 @@ def create_aws_bs_using_cli(
)


def upload_bulk_buckets(s3_obj, buckets, amount=1, object_key="obj-key-0", prefix=None):
"""
Upload given amount of objects with sequential keys to multiple buckets
Args:
s3_obj: obc/mcg object
buckets (list): list of bucket names to upload to
amount (int, optional): number of objects to upload per bucket
object_key (str, optional): base object key
prefix (str, optional): prefix for the upload path
"""
for bucket in buckets:
for index in range(amount):
s3_put_object(
s3_obj, bucket.name, f"{prefix}/{object_key}-{index}", object_key
)


def change_objects_creation_date_in_noobaa_db(
bucket_name, object_keys=[], new_creation_time=0
):
Expand Down Expand Up @@ -2204,7 +2250,6 @@ def change_objects_creation_date_in_noobaa_db(
if object_keys:
psql_query += f" AND data->>'key' = ANY(ARRAY{object_keys})"
psql_query += ";"

exec_nb_db_query(psql_query)


Expand Down Expand Up @@ -2542,3 +2587,20 @@ def delete_object_tags(
),
out_yaml_format=False,
)


def bulk_s3_put_bucket_lifecycle_config(mcg_obj, buckets, lifecycle_config):
"""
This method applies a lifecycle configuration to multiple buckets
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
buckets (list): list of bucket names to apply the lifecycle rule to
lifecycle_config (dict): a dict following the expected AWS json structure of a config file
"""
for bucket in buckets:
mcg_obj.s3_client.put_bucket_lifecycle_configuration(
Bucket=bucket.name, LifecycleConfiguration=lifecycle_config
)
logger.info("Applied lifecyle rule on all the buckets")
4 changes: 4 additions & 0 deletions ocs_ci/ocs/resources/mcg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1014,8 +1014,12 @@ def reset_core_pod(self):
Delete the noobaa-core pod and wait for it to come up again
"""
from ocs_ci.ocs.resources.pod import wait_for_pods_by_label_count

self.core_pod.delete(wait=True)
wait_for_pods_by_label_count(
label=constants.NOOBAA_CORE_POD_LABEL, exptected_count=1
)
self.core_pod = Pod(
**get_pods_having_label(constants.NOOBAA_CORE_POD_LABEL, self.namespace)[0]
)
Expand Down
37 changes: 36 additions & 1 deletion ocs_ci/ocs/resources/pod.py
Original file line number Diff line number Diff line change
Expand Up @@ -945,8 +945,10 @@ def get_noobaa_operator_pod(
def get_noobaa_db_pod():
"""
Get noobaa db pod obj
Returns:
Pod object: Noobaa db pod object
"""
nb_db = get_pods_having_label(
label=constants.NOOBAA_DB_LABEL_47_AND_ABOVE,
Expand Down Expand Up @@ -1515,7 +1517,12 @@ def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):
return used_percentage


def get_pods_having_label(label, namespace, cluster_config=None, statuses=None):
def get_pods_having_label(
label,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
cluster_config=None,
statuses=None,
):
"""
Fetches pod resources with given label in given namespace
Expand Down Expand Up @@ -2079,6 +2086,34 @@ def wait_for_storage_pods(timeout=200):
helpers.wait_for_resource_state(resource=pod_obj, state=state, timeout=timeout)


def wait_for_noobaa_pods_running(timeout=300, sleep=10):
"""
Wait until all the noobaa pods have reached status RUNNING
Args:
timeout (int): Timeout in seconds
"""

def _check_nb_pods_status():
nb_pod_labels = [
constants.NOOBAA_CORE_POD_LABEL,
constants.NOOBAA_ENDPOINT_POD_LABEL,
constants.NOOBAA_OPERATOR_POD_LABEL,
constants.NOOBAA_DB_LABEL_47_AND_ABOVE,
]
nb_pods_running = list()
for pod_label in nb_pod_labels:
pods = get_pods_having_label(pod_label, statuses=[constants.STATUS_RUNNING])
if len(pods) == 0:
return False
nb_pods_running.append(pod_label)
return set(nb_pod_labels) == set(nb_pods_running)

sampler = TimeoutSampler(timeout=timeout, sleep=10, func=_check_nb_pods_status)
sampler.wait_for_func_status(True)


def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Expand Down
47 changes: 47 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7296,3 +7296,50 @@ def finalizer():

request.addfinalizer(finalizer)
return _override_nb_default_backingstore_implementation


@pytest.fixture(scope="session")
def scale_noobaa_resources_session():
"""
Session scoped fixture to scale noobaa resources
"""
scale_noobaa_resources()


@pytest.fixture()
def scale_noobaa_resources_fixture():
"""
Fixture to scale noobaa resources
"""
scale_noobaa_resources()


def scale_noobaa_resources():

"""
Scale the noobaa pod resources and scale endpoint count
"""

storagecluster_obj = OCP(
kind="storagecluster",
resource_name="ocs-storagecluster",
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
)

scale_endpoint_pods_param = (
'{"spec": {"multiCloudGateway": {"endpoints": {"minCount": 3,"maxCount": 10}}}}'
)
scale_noobaa_resources_param = (
'{"spec": {"resources": {"noobaa-core": {"limits": {"cpu": "3","memory": "4Gi"},'
'"requests": {"cpu": "3","memory": "4Gi"}},"noobaa-db": {"limits": {"cpu": "3","memory": "4Gi"},'
'"requests": {"cpu": "3","memory": "4Gi"}},"noobaa-endpoint": {"limits": {"cpu": "3","memory": "4Gi"},'
'"requests": {"cpu": "3","memory": "4Gi"}}}}}'
)
storagecluster_obj.patch(params=scale_endpoint_pods_param, format_type="merge")
log.info("Scaled noobaa endpoint counts")
storagecluster_obj.patch(params=scale_noobaa_resources_param, format_type="merge")
log.info("Scaled noobaa pod resources")
time.sleep(60)
Loading

0 comments on commit b9fa3ce

Please sign in to comment.