From 5848d3519d558540078be9257a8a9b0c89a8b062 Mon Sep 17 00:00:00 2001 From: Mahesh Shetty Date: Thu, 26 Oct 2023 15:02:25 +0530 Subject: [PATCH] rebase the code to use newly added system test entry criteria fixtures Signed-off-by: Mahesh Shetty --- ocs_ci/ocs/bucket_utils.py | 46 +-- tests/conftest.py | 25 +- tests/e2e/conftest.py | 281 +----------------- .../e2e/system_test/test_object_expiration.py | 88 ++++-- 4 files changed, 86 insertions(+), 354 deletions(-) diff --git a/ocs_ci/ocs/bucket_utils.py b/ocs_ci/ocs/bucket_utils.py index ae4d5dbe1a31..1b4402b01a13 100644 --- a/ocs_ci/ocs/bucket_utils.py +++ b/ocs_ci/ocs/bucket_utils.py @@ -6,7 +6,6 @@ import os import shlex from uuid import uuid4 -from datetime import date import boto3 from botocore.handlers import disable_signing @@ -1929,37 +1928,6 @@ def upload_bulk_buckets(s3_obj, buckets, amount=1, object_key="obj-key-0", prefi s3_put_object(s3_obj, bucket.name, f"{prefix}/{object_key}-{i}", object_key) -def change_expiration_query_interval(new_interval): - """ - Change how often noobaa should check for object expiration - By default it will be 8 hours - - Args: - new_interval (int): New interval in minutes - - """ - - from ocs_ci.ocs.resources.pod import ( - get_noobaa_core_pod, - wait_for_pods_to_be_running, - ) - - nb_core_pod = get_noobaa_core_pod() - new_interval = new_interval * 60 * 1000 - params = ( - '[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", ' - f'"value": {{ "name": "CONFIG_JS_LIFECYCLE_INTERVAL", "value": "{new_interval}" }}}}]' - ) - OCP(kind="statefulset", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE).patch( - resource_name=constants.NOOBAA_CORE_STATEFULSET, - params=params, - format_type="json", - ) - logger.info(f"Updated the expiration query interval to {new_interval} ms") - nb_core_pod.delete() - wait_for_pods_to_be_running(pod_names=[nb_core_pod.name], timeout=300) - - def expire_objects_in_bucket(bucket_name, new_expire_interval=None): """ @@ -1975,9 +1943,6 @@ def expire_objects_in_bucket(bucket_name, new_expire_interval=None): get_noobaa_db_pod, ) - if new_expire_interval is not None and isinstance(new_expire_interval, int): - change_expiration_query_interval(new_expire_interval) - creation_time = f"{date.today().year-1}-06-25T14:18:28.712Z" nb_db_pod = get_noobaa_db_pod() query = ( @@ -2035,3 +2000,14 @@ def sample_if_objects_expired(mcg_obj, bucket_name, prefix="", timeout=600, slee assert sampler.wait_for_func_status(result=True), f"{message} are not expired" logger.info(f"{message} are expired") + +def s3_put_bucket_lifecycle_config(mcg_obj, buckets, rule): + """ + This method applies lifecycle configuration for a bucket + + """ + for bucket in buckets: + mcg_obj.s3_client.put_bucket_lifecycle_configuration( + Bucket=bucket.name, LifecycleConfiguration=rule + ) + logger.info("Applied lifecyle rule on all the buckets") diff --git a/tests/conftest.py b/tests/conftest.py index f074419bcd03..9607be6f892c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -36,7 +36,6 @@ from ocs_ci.ocs.bucket_utils import ( craft_s3_command, put_bucket_policy, - change_expiration_query_interval, ) from ocs_ci.ocs.dr.dr_workload import BusyBox, BusyBox_AppSet @@ -96,7 +95,7 @@ verify_data_integrity_for_multi_pvc_objs, get_noobaa_pods, get_pod_count, - get_noobaa_core_pod, wait_for_pods_by_label_count, + wait_for_pods_by_label_count, ) from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc from ocs_ci.ocs.version import get_ocs_version, get_ocp_version_dict, report_ocs_version @@ -6913,25 +6912,3 @@ def factory(interval): ) return factory - - -def change_noobaa_lifecycle_interval(request): - nb_core_pod = get_noobaa_core_pod() - env_var = "CONFIG_JS_LIFECYCLE_INTERVAL" - - def factory(interval): - change_expiration_query_interval(new_interval=interval) - - def finalizer(): - params = f'[{{"op": "remove", "path": "/spec/template/spec/containers/0/env/name:{env_var}"}}]' - OCP(kind="statefulset", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE).patch( - resource_name=constants.NOOBAA_CORE_STATEFULSET, - params=params, - format_type="json", - ) - nb_core_pod.delete() - wait_for_pods_to_be_running(pod_names=[nb_core_pod.name], timeout=300) - log.info("Switched back to default lifecycle interval") - - request.addfinalizer(finalizer) - return factory diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index 4c1f52bd6936..5ca874c893d5 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -1,21 +1,11 @@ import os -import re import logging - -import botocore.exceptions as botoexeptions import boto3 import pytest - from concurrent.futures import ThreadPoolExecutor from threading import Event - -import boto3 -import pytest -import random -import copy - from ocs_ci.utility import version from ocs_ci.utility.retry import retry from ocs_ci.framework import config @@ -39,6 +29,7 @@ write_random_test_objects_to_bucket, retrieve_verification_mode, s3_list_objects_v2, + s3_put_bucket_lifecycle_config, ) from ocs_ci.ocs.benchmark_operator_fio import BenchmarkOperatorFIO @@ -49,9 +40,6 @@ from ocs_ci.ocs.resources.pod import ( Pod, get_pods_having_label, - get_rgw_pods, - get_pod_logs, - ) from ocs_ci.ocs.resources.deployment import Deployment from ocs_ci.ocs.exceptions import CommandFailed @@ -63,9 +51,6 @@ ) from ocs_ci.utility.utils import clone_notify -from ocs_ci.ocs.resources.rgw import RGW - -from ocs_ci.utility.utils import clone_notify, exec_cmd, run_cmd logger = logging.getLogger(__name__) @@ -930,10 +915,8 @@ def factory(number_of_buckets, bucket_types, cloud_providers): all_buckets = create_muliple_types_provider_obcs( number_of_buckets, type, cloud_providers, bucket_factory ) - for bucket in all_buckets: - mcg_obj.s3_client.put_bucket_lifecycle_configuration( - Bucket=bucket.name, LifecycleConfiguration=expiration_rule - ) + + s3_put_bucket_lifecycle_config(mcg_obj, all_buckets, expiration_rule) logger.info( f"Buckets created under expiration setup: {[bucket.name for bucket in all_buckets]}" @@ -1029,8 +1012,6 @@ def factory(): "kafka_topic": kafka_topic, } - validate_kafka_rgw_notifications(kafka_rgw_dict) - return kafka_rgw_dict def finalizer(): @@ -1165,90 +1146,6 @@ def factory( return event, futures_obj -def validate_kafka_rgw_notifications(kafka_rgw_dict): - - s3_client = kafka_rgw_dict["s3client"] - bucketname = kafka_rgw_dict["kafka_rgw_bucket"] - notify_cmd = kafka_rgw_dict["notify_cmd"] - data = kafka_rgw_dict["data"] - kafkadrop_host = kafka_rgw_dict["kafkadrop_host"] - kafka_topic = kafka_rgw_dict["kafka_topic"] - - # Put objects to bucket - - # @retry(botoexeptions.ClientError, tries=5, delay=5) - try: - - def put_object_to_bucket(bucket_name, key, body): - return s3_client.put_object(Bucket=bucket_name, Key=key, Body=body) - - except botoexeptions.ClientError: - logger.warning("s3 put object timedout but ignoring as of now") - - assert put_object_to_bucket(bucketname, "key-1", data), "Failed: Put object: key-1" - exec_cmd(notify_cmd) - - # Validate rgw logs notification are sent - # No errors are seen - pattern = "ERROR: failed to create push endpoint" - rgw_pod_obj = get_rgw_pods() - rgw_log = get_pod_logs(pod_name=rgw_pod_obj[0].name, container="rgw") - assert re.search(pattern=pattern, string=rgw_log) is None, ( - f"Error: {pattern} msg found in the rgw logs." - f"Validate {pattern} found on rgw logs and also " - f"rgw bucket notification is working correctly" - ) - assert put_object_to_bucket(bucketname, "key-2", data), "Failed: Put object: key-2" - exec_cmd(notify_cmd) - - # Validate message are received Kafka side using curl command - # A temporary way to check from Kafka side, need to check from UI - @retry(Exception, tries=5, delay=5) - def validate_kafa_for_message(): - curl_command = ( - f"curl -X GET {kafkadrop_host}/topic/{kafka_topic.name} " - "-H 'content-type: application/vnd.kafka.json.v2+json'" - ) - json_output = run_cmd(cmd=curl_command) - # logger.info("Json output:" f"{json_output}") - new_string = json_output.split() - messages = new_string[new_string.index("messages") + 1] - logger.info("Messages:" + str(messages)) - if messages.find("1") == -1: - raise Exception( - "Error: Messages are not recieved from Kafka side." - "RGW bucket notification is not working as expected." - ) - - validate_kafa_for_message() - - -@pytest.fixture() -def setup_mcg_bg_features(setup_mcg_system): - """ - This fixture helps to setup various noobaa feature buckets - * MCG bucket replication - * Noobaa caching - * NSFS bucket - * RGW kafka notification - perform basic s3 ops on the buckets - - Returns: - Dict: Dictionary representing mapping between feature and related - buckets - """ - - def factory(bucket_amount=1, object_amount=1): - mcg_sys_dict = setup_mcg_system( - bucket_amount=bucket_amount, object_amount=object_amount - ) - logger.info("NONE") - # kafka_rgw_dict = setup_kafka_rgw() - - return mcg_sys_dict, None - - return factory - @pytest.fixture() def setup_mcg_bg_features( @@ -1423,175 +1320,3 @@ def factory( return feature_setup_map return factory - - -def validate_mcg_bg_feature(verify_mcg_system_recovery): - def factory(mcg_sys_dict, kafka_rgw_dict): - verify_mcg_system_recovery(mcg_sys_dict) - # validate_kafka_rgw_notifications(kafka_rgw_dict) - - return factory - - -@pytest.fixture() -def multi_obc_setup_factory(request, bucket_factory, mcg_obj): - """ - Fixture for multi obc factory - - """ - return multi_obc_factory(bucket_factory, mcg_obj) - - -def multi_obc_factory(bucket_factory, mcg_obj): - """ - This function helps to create different types of - buckets backed by different providers - - """ - - def create_obcs(num_obcs=50, type_of_bucket=None, expiration_rule=None): - """ - This helps to create buckets in bulk, apply expiration rule if any - - Args: - num_obcs (int): number of OBCs - type_of_bucket (list): List representing type fo the buckets - can have values ['data', 'cache', 'namespace'] - expiration_rule (dict): Dictionary representing the object - expiration rule - Returns: - List: List of bucket objects - - """ - - def get_all_combinations_map(providers, bucket_types): - """ - Args: - providers (dict): dictionary representing cloud - providers and the respective config - bucket_types (dict): dictionary representing different - types of bucket and the respective config - - Returns: - List: containing all the possible combination of buckets - - """ - all_combinations = dict() - - for provider, provider_config in providers.items(): - for bucket_type, type_config in bucket_types.items(): - if provider == "pv" and bucket_type != "data": - provider = random.choice(["aws", "azure"]) - provider_config = providers[provider] - bucketclass = copy.deepcopy(type_config) - - if "backingstore_dict" in bucketclass.keys(): - bucketclass["backingstore_dict"][provider] = [provider_config] - elif "namespace_policy_dict" in bucketclass.keys(): - bucketclass["namespace_policy_dict"]["namespacestore_dict"][ - provider - ] = [provider_config] - all_combinations.update({f"{bucket_type}-{provider}": bucketclass}) - return all_combinations - - cloud_providers = { - "aws": (1, "eu-central-1"), - "azure": (1, None), - "pv": ( - 1, - constants.MIN_PV_BACKINGSTORE_SIZE_IN_GB, - "ocs-storagecluster-ceph-rbd", - ), - } - - bucket_types = { - "data": { - "interface": "OC", - "backingstore_dict": {}, - }, - "namespace": { - "interface": "OC", - "namespace_policy_dict": { - "type": "Single", - "namespacestore_dict": {}, - }, - }, - "cache": { - "interface": "OC", - "namespace_policy_dict": { - "type": "Cache", - "ttl": 300000, - "namespacestore_dict": {}, - }, - "placement_policy": { - "tiers": [ - {"backingStores": [constants.DEFAULT_NOOBAA_BACKINGSTORE]} - ] - }, - }, - } - to_remove = list() - if isinstance(type_of_bucket, list): - if set(type_of_bucket).issubset(set(list(bucket_types.keys()))): - for type in bucket_types.keys(): - if type not in type_of_bucket: - to_remove.append(type) - else: - logger.error( - "Invalid bucket types, only possible types are: data, cache, namespace" - ) - elif type_of_bucket is not None: - logger.error( - "Invalid argument type for 'type_of_bucket': It should be list type" - ) - - for i in range(len(to_remove)): - del bucket_types[to_remove[i]] - - all_combination_of_obcs = get_all_combinations_map( - cloud_providers, bucket_types - ) - buckets = list() - buckets_created = dict() - num_of_buckets_each = num_obcs // len(all_combination_of_obcs.keys()) - buckets_left = num_obcs % len(all_combination_of_obcs.keys()) - if num_of_buckets_each != 0: - for combo, combo_config in all_combination_of_obcs.items(): - buckets.extend( - bucket_factory( - interface="OC", - amount=num_of_buckets_each, - bucketclass=combo_config, - ) - ) - buckets_created.update({combo: num_of_buckets_each}) - - for i in range(0, buckets_left): - buckets.extend( - bucket_factory( - interface="OC", - amount=1, - bucketclass=all_combination_of_obcs[ - list(all_combination_of_obcs.keys())[i] - ], - ) - ) - buckets_created.update( - { - list(all_combination_of_obcs.keys())[i]: ( - buckets_created[list(all_combination_of_obcs.keys())[i]] - if len(buckets) >= len(all_combination_of_obcs.keys()) - else 0 - ) - + 1 - } - ) - - for bucket in buckets: - mcg_obj.s3_client.put_bucket_lifecycle_configuration( - Bucket=bucket.name, LifecycleConfiguration=expiration_rule - ) - logger.info("These are the buckets created:" f"{buckets_created}") - return buckets - - return create_obcs diff --git a/tests/e2e/system_test/test_object_expiration.py b/tests/e2e/system_test/test_object_expiration.py index 3820357ccf81..42687c1873fd 100644 --- a/tests/e2e/system_test/test_object_expiration.py +++ b/tests/e2e/system_test/test_object_expiration.py @@ -5,11 +5,7 @@ import pytest -from ocs_ci.framework.pytest_customization.marks import ( - bugzilla, - system_test, - magenta_squad, -) +from ocs_ci.helpers.e2e_helpers import create_muliple_types_provider_obcs from ocs_ci.helpers.sanity_helpers import Sanity from ocs_ci.ocs import constants @@ -21,6 +17,7 @@ upload_bulk_buckets, expire_objects_in_bucket, s3_list_objects_v2, + s3_put_bucket_lifecycle_config, ) from ocs_ci.ocs.resources.pod import ( get_noobaa_core_pod, @@ -125,19 +122,40 @@ def test_object_expiration(self, mcg_obj, bucket_factory): False ), f"Test failed, object {object_key} didn't get deleted after expiration + buffer time" + def create_obcs_apply_expire_rule( + self, + number_of_buckets, + cloud_providers, + bucket_types, + expiration_rule, + mcg_obj, + bucket_factory, + ): + """ + This method will create the obcs and then apply the expire rule + for each obcs created + + """ + all_buckets = create_muliple_types_provider_obcs( + number_of_buckets, bucket_types, cloud_providers, bucket_factory + ) + + s3_put_bucket_lifecycle_config(mcg_obj, all_buckets, expiration_rule) + + return all_buckets + @system_test def test_object_expiration_with_disruptions( self, mcg_obj, - multi_obc_setup_factory, + setup_mcg_bg_features, + validate_mcg_bg_features, awscli_pod_session, nodes, snapshot_factory, - setup_mcg_bg_features, - validate_mcg_bg_feature, + bucket_factory, noobaa_db_backup_and_recovery, noobaa_db_backup_and_recovery_locally, - change_noobaa_lifecycle_interval, node_drain_teardown, ): @@ -146,7 +164,13 @@ def test_object_expiration_with_disruptions( like node drain, node restart, nb db recovery etc """ - change_noobaa_lifecycle_interval(interval=2) + feature_setup_map = setup_mcg_bg_features( + num_of_buckets=5, + object_amount=5, + is_disruptive=True, + skip_any_features=["nsfs", "rgw kafka", "caching"], + ) + expiration_days = 1 expire_rule = { "Rules": [ @@ -162,6 +186,23 @@ def test_object_expiration_with_disruptions( ] } + cloud_providers = { + "aws": (1, "eu-central-1"), + "azure": (1, None), + "pv": ( + 1, + constants.MIN_PV_BACKINGSTORE_SIZE_IN_GB, + "ocs-storagecluster-ceph-rbd", + ), + } + + bucket_types = { + "data": { + "interface": "OC", + "backingstore_dict": {}, + } + } + expire_rule_prefix = deepcopy(expire_rule) number_of_buckets = 50 @@ -172,10 +213,14 @@ def test_object_expiration_with_disruptions( logger.info( f"Creating first set of {number_of_buckets} buckets with no-prefix expiry rule" ) - buckets_without_prefix = multi_obc_setup_factory( - num_obcs=number_of_buckets, + + buckets_without_prefix = self.create_obcs_apply_expire_rule( + number_of_buckets=number_of_buckets, + cloud_providers=cloud_providers, + bucket_types=bucket_types, expiration_rule=expire_rule, - type_of_bucket=["data"], + mcg_obj=mcg_obj, + bucket_factory=bucket_factory, ) # Create another set of bulk buckets with expiry rule and prefix set @@ -183,10 +228,13 @@ def test_object_expiration_with_disruptions( f"Create second set of {number_of_buckets} buckets with prefix 'others' expiry rule" ) expire_rule_prefix["Rules"][0]["Filter"]["Prefix"] = "others" - buckets_with_prefix = multi_obc_setup_factory( - num_obcs=number_of_buckets, + buckets_with_prefix = self.create_obcs_apply_expire_rule( + number_of_buckets=number_of_buckets, + cloud_providers=cloud_providers, + bucket_types=bucket_types, expiration_rule=expire_rule_prefix, - type_of_bucket=["data"], + mcg_obj=mcg_obj, + bucket_factory=bucket_factory, ) from botocore.exceptions import ClientError @@ -358,4 +406,10 @@ def check_if_objects_expired(mcg_obj, bucket_name, prefix=""): sample_if_objects_expired() # validate mcg entry criteria post test - validate_mcg_bg_feature(mcg_sys_dict, kafka_rgw_dict) + validate_mcg_bg_features( + feature_setup_map, + run_in_bg=False, + skip_any_features=["nsfs", "rgw kafka", "caching"], + object_amount=5, + ) + logger.info("No issues seen with the MCG bg feature validation")