Skip to content

Commit

Permalink
4.14 System test: Test log based replication deletion sync feature wi…
Browse files Browse the repository at this point in the history
…th noobaa related disruptive ops 💣 (#8534)

Signed-off-by: Mahesh Shetty <[email protected]>
  • Loading branch information
mashetty330 authored Mar 18, 2024
1 parent 6959977 commit e444e7e
Show file tree
Hide file tree
Showing 6 changed files with 358 additions and 23 deletions.
36 changes: 36 additions & 0 deletions ocs_ci/ocs/bucket_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2604,3 +2604,39 @@ def bulk_s3_put_bucket_lifecycle_config(mcg_obj, buckets, lifecycle_config):
Bucket=bucket.name, LifecycleConfiguration=lifecycle_config
)
logger.info("Applied lifecyle rule on all the buckets")


def upload_test_objects_to_source_and_wait_for_replication(
mcg_obj, source_bucket, target_bucket, mockup_logger, timeout
):
"""
Upload a set of objects to the source bucket, logs the operations and wait for the replication to complete.
"""
logger.info("Uploading test objects and waiting for replication to complete")
mockup_logger.upload_test_objs_and_log(source_bucket.name)

assert compare_bucket_object_list(
mcg_obj,
source_bucket.name,
target_bucket.name,
timeout=timeout,
), f"Standard replication failed to complete in {timeout} seconds"


def delete_objects_from_source_and_wait_for_deletion_sync(
mcg_obj, source_bucket, target_bucket, mockup_logger, timeout
):
"""
Delete all objects from the source bucket,logs the operations and wait for the deletion sync to complete.
"""
logger.info("Deleting source objects and waiting for deletion sync with target")
mockup_logger.delete_all_objects_and_log(source_bucket.name)

assert compare_bucket_object_list(
mcg_obj,
source_bucket.name,
target_bucket.name,
timeout=timeout,
), f"Deletion sync failed to complete in {timeout} seconds"
2 changes: 2 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,7 @@
DEFAULT_NOOBAA_BACKINGSTORE = "noobaa-default-backing-store"
DEFAULT_NOOBAA_BUCKETCLASS = "noobaa-default-bucket-class"
NOOBAA_RESOURCE_NAME = "noobaa"
NOOBAA_DB_PVC_NAME = "db-noobaa-db-pg-0"
MIN_PV_BACKINGSTORE_SIZE_IN_GB = 17
JENKINS_BUILD = "jax-rs-build"
JENKINS_BUILD_COMPLETE = "Complete"
Expand Down Expand Up @@ -1109,6 +1110,7 @@
FUSIONAAS_PLATFORM,
]
BAREMETAL_PLATFORMS = [BAREMETAL_PLATFORM, BAREMETALPSI_PLATFORM]
DEFAULT_AWS_REGION = "us-east-2"

HCI_PROVIDER_CLIENT_PLATFORMS = [
HCI_BAREMETAL,
Expand Down
28 changes: 28 additions & 0 deletions ocs_ci/ocs/resources/mockup_bucket_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ def __init__(self, awscli_pod, mcg_obj, bucket_factory, platform, region):
f"ls -A1 {constants.AWSCLI_TEST_OBJ_DIR}"
).split(" ")

@property
def standard_test_obj_list(self):
return self._standard_test_obj_list

def upload_test_objs_and_log(self, bucket_name):
"""
Uploads files from files_dir to the MCG bucket and write matching
Expand Down Expand Up @@ -97,6 +101,30 @@ def upload_arbitrary_object_and_log(self, bucket_name):

self._upload_mockup_logs(bucket_name, [obj_name], "PUT")

def delete_objs_and_log(self, bucket_name, objs):
"""
Delete list of objects from the MCG bucket and write
matching mockup logs
Args:
bucket_name(str): Name of the MCG bucket
objs(list): List of the objects to delete
"""
logger.info(f"Deleting the {objs} from the bucket")
obj_list = list_objects_from_bucket(
self.awscli_pod,
f"s3://{bucket_name}",
s3_obj=self.mcg_obj,
)
if set(objs).issubset(set(obj_list)):
for i in range(len(objs)):
s3cmd = craft_s3_command(
f"rm s3://{bucket_name}/{objs[i]}", self.mcg_obj
)
self.awscli_pod.exec_cmd_on_pod(s3cmd)
self._upload_mockup_logs(bucket_name, objs, "DELETE")

def delete_all_objects_and_log(self, bucket_name):
"""
Deletes all objects from the MCG bucket and write matching mockup logs
Expand Down
82 changes: 82 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.resources.bucket_policy import gen_bucket_policy
from ocs_ci.ocs.resources.mcg_replication_policy import AwsLogBasedReplicationPolicy
from ocs_ci.ocs.resources.mockup_bucket_logger import MockupBucketLogger
from ocs_ci.ocs.scale_lib import FioPodScale
from ocs_ci.ocs.utils import (
setup_ceph_toolbox,
Expand Down Expand Up @@ -7439,3 +7441,83 @@ def finalizer():

request.addfinalizer(finalizer)
return factory


@pytest.fixture()
def reduce_replication_delay_setup(add_env_vars_to_noobaa_core_class):
"""
A fixture to reduce the replication delay to one minute.
Args:
new_delay_in_miliseconds (function): A function to add env vars to the noobaa-core pod
"""
log.warning("Reducing replication delay")

def factory(new_delay_in_miliseconds=60 * 1000):
new_env_var_tuples = [
(constants.BUCKET_REPLICATOR_DELAY_PARAM, new_delay_in_miliseconds),
(constants.BUCKET_LOG_REPLICATOR_DELAY_PARAM, new_delay_in_miliseconds),
]
add_env_vars_to_noobaa_core_class(new_env_var_tuples)

return factory


@pytest.fixture()
def aws_log_based_replication_setup(
awscli_pod_session, mcg_obj_session, bucket_factory, reduce_replication_delay_setup
):
"""
A fixture to set up standard log-based replication with deletion sync.
Args:
awscli_pod_session(Pod): A pod running the AWS CLI
mcg_obj_session(MCG): An MCG object
bucket_factory: A bucket factory fixture
Returns:
MockupBucketLogger: A MockupBucketLogger object
Bucket: The source bucket
Bucket: The target bucket
"""

reduce_replication_delay_setup()

def factory(bucketclass_dict=None):
log.info("Starting log-based replication setup")
if bucketclass_dict is None:
bucketclass_dict = {
"interface": "OC",
"namespace_policy_dict": {
"type": "Single",
"namespacestore_dict": {
constants.AWS_PLATFORM: [(1, constants.DEFAULT_AWS_REGION)]
},
},
}
target_bucket = bucket_factory(bucketclass=bucketclass_dict)[0]

mockup_logger = MockupBucketLogger(
awscli_pod=awscli_pod_session,
mcg_obj=mcg_obj_session,
bucket_factory=bucket_factory,
platform=constants.AWS_PLATFORM,
region=constants.DEFAULT_AWS_REGION,
)
replication_policy = AwsLogBasedReplicationPolicy(
destination_bucket=target_bucket.name,
sync_deletions=True,
logs_bucket=mockup_logger.logs_bucket_uls_name,
)

source_bucket = bucket_factory(
1, bucketclass=bucketclass_dict, replication_policy=replication_policy
)[0]

log.info("log-based replication setup complete")

return mockup_logger, source_bucket, target_bucket

return factory
69 changes: 46 additions & 23 deletions tests/cross_functional/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,31 +319,10 @@ def finalizer():


@pytest.fixture()
def noobaa_db_backup_and_recovery(request, snapshot_factory):
"""
Verify noobaa backup and recovery
1. Take snapshot db-noobaa-db-0 PVC and retore it to PVC
2. Scale down the statefulset noobaa-db
3. Get the yaml of the current PVC, db-noobaa-db-0 and
change the parameter persistentVolumeReclaimPolicy to Retain for restored PVC
4. Delete both PVCs, the PV for the original claim db-noobaa-db-0 will be removed.
The PV for claim db-noobaa-db-0-snapshot-restore will move to ‘Released’
5. Edit again restore PV and remove the claimRef section.
The volume will transition to Available.
6. Edit the yaml db-noobaa-db-0.yaml and change the setting volumeName to restored PVC.
7. Scale up the stateful set again and the pod should be running
"""
def noobaa_db_backup(request, snapshot_factory):
restore_pvc_objs = []

def factory(snapshot_factory=snapshot_factory):
# Get noobaa pods before execution
noobaa_pods = pod.get_noobaa_pods()

# Get noobaa PVC before execution
noobaa_pvc_obj = pvc.get_pvc_objs(pvc_names=["db-noobaa-db-pg-0"])
noobaa_pv_name = noobaa_pvc_obj[0].get("spec").get("spec").get("volumeName")
def factory(noobaa_pvc_obj):

# Take snapshot db-noobaa-db-0 PVC
logger.info(f"Creating snapshot of the {noobaa_pvc_obj[0].name} PVC")
Expand Down Expand Up @@ -381,6 +360,15 @@ def factory(snapshot_factory=snapshot_factory):
f"Succeesfuly created PVC {restore_pvc_obj.name} "
f"from snapshot {snap_obj.name}"
)
return restore_pvc_objs, snap_obj

return factory


@pytest.fixture()
def noobaa_db_recovery_from_backup(request):
def factory(snap_obj, noobaa_pvc_obj, noobaa_pods):
noobaa_pv_name = noobaa_pvc_obj[0].get("spec").get("spec").get("volumeName")

# Scale down the statefulset noobaa-db
modify_statefulset_replica_count(
Expand Down Expand Up @@ -474,6 +462,41 @@ def factory(snapshot_factory=snapshot_factory):
"Changed the parameter persistentVolumeReclaimPolicy to Delete again"
)

return factory


@pytest.fixture()
def noobaa_db_backup_and_recovery(
request, snapshot_factory, noobaa_db_backup, noobaa_db_recovery_from_backup
):
"""
Verify noobaa backup and recovery
1. Take snapshot db-noobaa-db-0 PVC and retore it to PVC
2. Scale down the statefulset noobaa-db
3. Get the yaml of the current PVC, db-noobaa-db-0 and
change the parameter persistentVolumeReclaimPolicy to Retain for restored PVC
4. Delete both PVCs, the PV for the original claim db-noobaa-db-0 will be removed.
The PV for claim db-noobaa-db-0-snapshot-restore will move to ‘Released’
5. Edit again restore PV and remove the claimRef section.
The volume will transition to Available.
6. Edit the yaml db-noobaa-db-0.yaml and change the setting volumeName to restored PVC.
7. Scale up the stateful set again and the pod should be running
"""
restore_pvc_objs = []

def factory(snapshot_factory=snapshot_factory):
global restore_pvc_objs
# Get noobaa pods before execution
noobaa_pods = pod.get_noobaa_pods()

# Get noobaa PVC before execution
noobaa_pvc_obj = pvc.get_pvc_objs(pvc_names=["db-noobaa-db-pg-0"])

restore_pvc_objs, snap_obj = noobaa_db_backup(noobaa_pvc_obj)
noobaa_db_recovery_from_backup(snap_obj, noobaa_pvc_obj, noobaa_pods)

def finalizer():
# Get the statefulset replica count
sst_obj = OCP(
Expand Down
Loading

0 comments on commit e444e7e

Please sign in to comment.