Skip to content

Commit

Permalink
objects should be unique identifiers and data every time validation i…
Browse files Browse the repository at this point in the history
…s called

Signed-off-by: Mahesh Shetty <[email protected]>
  • Loading branch information
mashetty330 committed Oct 16, 2023
1 parent 2a758e8 commit c645cd1
Show file tree
Hide file tree
Showing 7 changed files with 133 additions and 163 deletions.
168 changes: 93 additions & 75 deletions ocs_ci/helpers/e2e_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import re
import time

import botocore.exceptions as botoexceptions
from uuid import uuid4
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.bucket_utils import (
random_object_round_trip_verification,
Expand All @@ -27,15 +27,35 @@
def create_muliple_types_provider_obcs(
num_of_buckets, bucket_types, cloud_providers, bucket_factory
):
"""
This function creates valid OBCs of different cloud providers
and bucket types
Args:
num_of_buckets (int): Number of buckets
bucket_types (dict): Dict representing mapping between
bucket type and relevant configuration
cloud_providers (dict): Dict representing mapping between
cloud providers and relevant configuration
bucket_factory (fixture): bucket_factory fixture method
Returns:
List: list of created buckets
"""

def get_all_combinations_map(providers, bucket_types):
"""
Create valid combination of cloud-providers and bucket-types
Args:
providers (dict): dictionary representing cloud
providers and the respective config
bucket_types (dict): dictionary representing different
types of bucket and the respective config
Returns:
List: containing all the possible combination of buckets
"""
all_combinations = dict()

Expand Down Expand Up @@ -69,13 +89,13 @@ def get_all_combinations_map(providers, bucket_types):
)
)

for i in range(0, buckets_left):
for index in range(0, buckets_left):
buckets.extend(
bucket_factory(
interface="OC",
amount=1,
bucketclass=all_combination_of_obcs[
list(all_combination_of_obcs.keys())[i]
list(all_combination_of_obcs.keys())[index]
],
)
)
Expand Down Expand Up @@ -121,11 +141,12 @@ def validate_mcg_bucket_replicaton(
upload_dir=bidi_uploaded_objs_dir_1,
download_dir=bidi_downloaded_objs_dir_1,
amount=object_amount,
pattern="FirstBiDi",
pattern=f"FirstBiDi-{uuid4().hex}",
wait_for_replication=True,
second_bucket_name=second_bucket.name,
mcg_obj=mcg_obj_session,
cleanup=True,
timeout=1200,
)

random_object_round_trip_verification(
Expand All @@ -134,11 +155,12 @@ def validate_mcg_bucket_replicaton(
upload_dir=bidi_uploaded_objs_dir_2,
download_dir=bidi_downloaded_objs_dir_2,
amount=object_amount,
pattern="SecondBiDi",
pattern=f"SecondBiDi-{uuid4().hex}",
wait_for_replication=True,
second_bucket_name=first_bucket.name,
mcg_obj=mcg_obj_session,
cleanup=True,
timeout=1200,
)
if event.is_set():
run_in_bg = False
Expand Down Expand Up @@ -176,73 +198,70 @@ def validate_mcg_caching(
"""
while True:
verified_buckets = list()
for bucket in cache_buckets:
try:
cache_uploaded_objs_dir = uploaded_objects_dir + "/cache"
cache_uploaded_objs_dir_2 = uploaded_objects_dir + "/cache_2"
cache_downloaded_objs_dir = downloaded_obejcts_dir + "/cache"
underlying_bucket_name = bucket.bucketclass.namespacestores[0].uls_name

# Upload a random object to the bucket
logger.info(f"Uploading to the cache bucket: {bucket.name}")
objs_written_to_cache_bucket = write_random_test_objects_to_bucket(
awscli_pod_session,
bucket.name,
cache_uploaded_objs_dir,
pattern="Cache-",
mcg_obj=mcg_obj_session,
)
wait_for_cache(
mcg_obj_session,
bucket.name,
objs_written_to_cache_bucket,
timeout=300,
)
cache_uploaded_objs_dir = uploaded_objects_dir + "/cache"
cache_uploaded_objs_dir_2 = uploaded_objects_dir + "/cache_2"
cache_downloaded_objs_dir = downloaded_obejcts_dir + "/cache"
underlying_bucket_name = bucket.bucketclass.namespacestores[0].uls_name

# Upload a random object to the bucket
logger.info(f"Uploading to the cache bucket: {bucket.name}")
obj_name = f"Cache-{uuid4().hex}"
objs_written_to_cache_bucket = write_random_test_objects_to_bucket(
awscli_pod_session,
bucket.name,
cache_uploaded_objs_dir,
pattern=obj_name,
mcg_obj=mcg_obj_session,
)
wait_for_cache(
mcg_obj_session,
bucket.name,
objs_written_to_cache_bucket,
timeout=300,
)

# Write a random, larger object directly to the underlying storage of the bucket
logger.info(
f"Uploading to the underlying bucket {underlying_bucket_name} directly"
)
write_random_test_objects_to_bucket(
awscli_pod_session,
underlying_bucket_name,
cache_uploaded_objs_dir_2,
pattern="Cache-",
s3_creds=cld_mgr.aws_client.nss_creds,
bs="2M",
)
# Write a random, larger object directly to the underlying storage of the bucket
logger.info(
f"Uploading to the underlying bucket {underlying_bucket_name} directly"
)
write_random_test_objects_to_bucket(
awscli_pod_session,
underlying_bucket_name,
cache_uploaded_objs_dir_2,
pattern=obj_name,
s3_creds=cld_mgr.aws_client.nss_creds,
bs="2M",
)

# Download the object from the cache bucket
sync_object_directory(
awscli_pod_session,
f"s3://{bucket.name}",
cache_downloaded_objs_dir,
mcg_obj_session,
)
# Download the object from the cache bucket
awscli_pod_session.exec_cmd_on_pod(f"mkdir -p {cache_downloaded_objs_dir}")
sync_object_directory(
awscli_pod_session,
f"s3://{bucket.name}",
cache_downloaded_objs_dir,
mcg_obj_session,
)

assert verify_s3_object_integrity(
original_object_path=f"{cache_uploaded_objs_dir}/Cache-0",
result_object_path=f"{cache_downloaded_objs_dir}/Cache-0",
assert verify_s3_object_integrity(
original_object_path=f"{cache_uploaded_objs_dir}/{obj_name}0",
result_object_path=f"{cache_downloaded_objs_dir}/{obj_name}0",
awscli_pod=awscli_pod_session,
), "The uploaded and downloaded cached objects have different checksums"

assert (
verify_s3_object_integrity(
original_object_path=f"{cache_uploaded_objs_dir_2}/{obj_name}0",
result_object_path=f"{cache_downloaded_objs_dir}/{obj_name}0",
awscli_pod=awscli_pod_session,
), "The uploaded and downloaded cached objects have different checksums"

assert (
verify_s3_object_integrity(
original_object_path=f"{cache_uploaded_objs_dir_2}/Cache-0",
result_object_path=f"{cache_downloaded_objs_dir}/Cache-0",
awscli_pod=awscli_pod_session,
)
is False
), "The cached object was replaced by the new one before the TTL has expired"
verified_buckets.append(bucket.name)
if event.is_set():
run_in_bg = False
break
except Exception:
logger.warning(f"verified so far: {verified_buckets}")

raise
)
is False
), "The cached object was replaced by the new one before the TTL has expired"
logger.info(f"Verified caching for bucket: {bucket.name}")

if event.is_set():
run_in_bg = False
break

if not run_in_bg:
logger.warning("Stopping noobaa caching verification")
Expand All @@ -269,14 +288,10 @@ def validate_rgw_kafka_notification(kafka_rgw_dict, event, run_in_bg=False):
kafka_topic = kafka_rgw_dict["kafka_topic"]

while True:
data = data + f"{uuid4().hex}"

try:

def put_object_to_bucket(bucket_name, key, body):
return s3_client.put_object(Bucket=bucket_name, Key=key, Body=body)

except botoexceptions.ClientError:
logger.warning("s3 put object timedout but ignoring as of now")
def put_object_to_bucket(bucket_name, key, body):
return s3_client.put_object(Bucket=bucket_name, Key=key, Body=body)

assert put_object_to_bucket(
bucketname, "key-1", data
Expand Down Expand Up @@ -345,7 +360,10 @@ def validate_mcg_object_expiration(

for i in range(object_amount):
s3_put_object(
mcg_obj, bucket.name, f"{prefix}/obj-key-{i}", "Some random data"
mcg_obj,
bucket.name,
f"{prefix}/obj-key-{uuid4().hex}",
"Some random data",
)
expire_objects_in_bucket(bucket.name)
sample_if_objects_expired(mcg_obj, bucket.name)
Expand Down
43 changes: 7 additions & 36 deletions ocs_ci/ocs/bucket_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1333,6 +1333,7 @@ def check_cached_objects_by_name(mcg_obj, bucket_name, expected_objects_names=No
Returns:
bool: True if all the objects exist in the cache as expected, False otherwise
"""
res = mcg_obj.send_rpc_query(
"object_api",
Expand All @@ -1344,7 +1345,7 @@ def check_cached_objects_by_name(mcg_obj, bucket_name, expected_objects_names=No
list_objects_res = [name["key"] for name in res.get("reply").get("objects")]
if not expected_objects_names:
expected_objects_names = []
# if set(expected_objects_names) == set(list_objects_res):

for obj in expected_objects_names:
if obj not in list_objects_res:
logger.warning(
Expand Down Expand Up @@ -1774,6 +1775,7 @@ def random_object_round_trip_verification(
cleanup=False,
result_pod=None,
result_pod_path=None,
**kwargs,
):
"""
Writes random objects in a pod, uploads them to a bucket,
Expand Down Expand Up @@ -1817,7 +1819,7 @@ def random_object_round_trip_verification(
)
written_objects = io_pod.exec_cmd_on_pod(f"ls -A1 {upload_dir}").split(" ")
if wait_for_replication:
compare_bucket_object_list(mcg_obj, bucket_name, second_bucket_name)
compare_bucket_object_list(mcg_obj, bucket_name, second_bucket_name, **kwargs)
bucket_name = second_bucket_name
# Download the random objects that were uploaded to the bucket
sync_object_directory(
Expand Down Expand Up @@ -1917,50 +1919,19 @@ def create_aws_bs_using_cli(
)


def change_expiration_query_interval(new_interval):
"""
Change how often noobaa should check for object expiration
By default it will be 8 hours
Args:
new_interval (int): New interval in minutes
"""

from ocs_ci.ocs.resources.pod import (
get_noobaa_core_pod,
wait_for_pods_to_be_running,
)

nb_core_pod = get_noobaa_core_pod()
new_interval = new_interval * 60 * 1000
params = (
'[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", '
f'"value": {{ "name": "CONFIG_JS_LIFECYCLE_INTERVAL", "value": "{new_interval}" }}}}]'
)
OCP(kind="statefulset", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE).patch(
resource_name=constants.NOOBAA_CORE_STATEFULSET,
params=params,
format_type="json",
)
logger.info(f"Updated the expiration query interval to {new_interval} ms")
nb_core_pod.delete()
wait_for_pods_to_be_running(pod_names=[nb_core_pod.name], timeout=300)


def expire_objects_in_bucket(bucket_name, new_expire_interval=None):
def expire_objects_in_bucket(bucket_name):
"""
Manually expire the objects in a bucket
Args:
bucket_name (str): Name of the bucket
new_expire_interval (int): New expiration interval
"""

from ocs_ci.ocs.resources.pod import (
get_noobaa_db_pod,
)

if new_expire_interval is not None and isinstance(new_expire_interval, int):
change_expiration_query_interval(new_expire_interval)

creation_time = f"{date.today().year-1}-06-25T14:18:28.712Z"
nb_db_pod = get_noobaa_db_pod()
query = (
Expand Down
2 changes: 2 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@
CONFIG_JS_PREFIX = "CONFIG_JS_"
BUCKET_REPLICATOR_DELAY_PARAM = CONFIG_JS_PREFIX + "BUCKET_REPLICATOR_DELAY"
BUCKET_LOG_REPLICATOR_DELAY_PARAM = CONFIG_JS_PREFIX + "BUCKET_LOG_REPLICATOR_DELAY"
LIFECYCLE_INTERVAL_PARAM = "CONFIG_JS_LIFECYCLE_INTERVAL"

# Resources / Kinds
CEPHFILESYSTEM = "CephFileSystem"
Expand Down Expand Up @@ -1756,6 +1757,7 @@
AWS_S3_ENDPOINT = "https://s3.amazonaws.com"
NAMESPACE_FILESYSTEM = "nsfs"


# Cosbench constants
COSBENCH = "cosbench"
COSBENCH_PROJECT = "cosbench-project"
Expand Down
14 changes: 4 additions & 10 deletions ocs_ci/ocs/resources/pod.py
Original file line number Diff line number Diff line change
Expand Up @@ -899,16 +899,10 @@ def get_noobaa_db_pod():
Returns:
Pod object: Noobaa db pod object
"""
if version.get_semantic_ocs_version_from_config() > version.VERSION_4_6:
nb_db = get_pods_having_label(
label=constants.NOOBAA_DB_LABEL_47_AND_ABOVE,
namespace=config.ENV_DATA["cluster_namespace"],
)
else:
nb_db = get_pods_having_label(
label=constants.NOOBAA_DB_LABEL_46_AND_UNDER,
namespace=config.ENV_DATA["cluster_namespace"],
)
nb_db = get_pods_having_label(
label=constants.NOOBAA_DB_LABEL_47_AND_ABOVE,
namespace=config.ENV_DATA["cluster_namespace"],
)
nb_db_pod = Pod(**nb_db[0])
return nb_db_pod

Expand Down
Loading

0 comments on commit c645cd1

Please sign in to comment.