Skip to content

Commit

Permalink
differentiate between platforms in measure_operation and add threadin…
Browse files Browse the repository at this point in the history
…g_lock where missing (#8729)

Signed-off-by: fbalak <[email protected]>
  • Loading branch information
fbalak authored Oct 24, 2023
1 parent 7dad677 commit 7f16a45
Showing 1 changed file with 55 additions and 23 deletions.
78 changes: 55 additions & 23 deletions tests/manage/monitoring/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ def stop_osd():


@pytest.fixture
def measure_corrupt_pg(request, measurement_dir):
def measure_corrupt_pg(request, measurement_dir, threading_lock):
"""
Create Ceph pool and corrupt Placement Group on one of OSDs, measures the
time when it was corrupted and records alerts that were triggered during
Expand Down Expand Up @@ -378,9 +378,12 @@ def wait_with_corrupted_pg():
test_file,
minimal_time=60 * 17,
pagerduty_service_ids=[get_pagerduty_service_id()],
threading_lock=threading_lock,
)
else:
measured_op = measure_operation(wait_with_corrupted_pg, test_file)
measured_op = measure_operation(
wait_with_corrupted_pg, test_file, threading_lock=threading_lock
)

teardown()

Expand Down Expand Up @@ -684,7 +687,9 @@ def workload_storageutilization_10g_cephfs(


@pytest.fixture
def measure_noobaa_exceed_bucket_quota(measurement_dir, request, mcg_obj, awscli_pod):
def measure_noobaa_exceed_bucket_quota(
measurement_dir, request, mcg_obj, awscli_pod, threading_lock
):
"""
Create NooBaa bucket, set its capacity quota to 2GB and fill it with data.
Expand Down Expand Up @@ -742,11 +747,17 @@ def exceed_bucket_quota():
test_file = os.path.join(
measurement_dir, "measure_noobaa_exceed__bucket_quota.json"
)
measured_op = measure_operation(
exceed_bucket_quota,
test_file,
pagerduty_service_ids=[get_pagerduty_service_id()],
)
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
measured_op = measure_operation(
exceed_bucket_quota,
test_file,
pagerduty_service_ids=[get_pagerduty_service_id()],
threading_lock=threading_lock,
)
else:
measured_op = measure_operation(
exceed_bucket_quota, test_file, threading_lock=threading_lock
)

bucket_info = mcg_obj.get_bucket_info(bucket.name)
logger.info(f"Bucket {bucket.name} storage: {bucket_info['storage']}")
Expand Down Expand Up @@ -837,12 +848,17 @@ def do_nothing():
else:
logger.debug("io_in_bg not detected, good")

measured_op = measure_operation(
do_nothing,
test_file,
pagerduty_service_ids=[get_pagerduty_service_id()],
threading_lock=threading_lock,
)
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
measured_op = measure_operation(
do_nothing,
test_file,
pagerduty_service_ids=[get_pagerduty_service_id()],
threading_lock=threading_lock,
)
else:
measured_op = measure_operation(
do_nothing, test_file, threading_lock=threading_lock
)
if restart_io_in_bg:
logger.info("reverting load_status to resume io_in_bg")
config.RUN["load_status"] = "to_be_resumed"
Expand Down Expand Up @@ -910,7 +926,12 @@ def stop_rgw():

@pytest.fixture
def measure_noobaa_ns_target_bucket_deleted(
measurement_dir, request, bucket_factory, namespace_store_factory, cld_mgr
measurement_dir,
request,
bucket_factory,
namespace_store_factory,
cld_mgr,
threading_lock,
):
"""
Create Namespace bucket from 2 namespace resources. Delete target bucket
Expand Down Expand Up @@ -958,11 +979,17 @@ def delete_target_bucket():
return ns_stores[0].uls_name

test_file = os.path.join(measurement_dir, "measure_delete_target_bucket.json")
measured_op = measure_operation(
delete_target_bucket,
test_file,
pagerduty_service_ids=[get_pagerduty_service_id()],
)
if config.ENV_DATA["platform"].lower() in constants.MANAGED_SERVICE_PLATFORMS:
measured_op = measure_operation(
delete_target_bucket,
test_file,
pagerduty_service_ids=[get_pagerduty_service_id()],
threading_lock=threading_lock,
)
else:
measured_op = measure_operation(
delete_target_bucket, test_file, threading_lock=threading_lock
)
logger.info("Delete NS bucket, bucketclass and NS store so that alert is cleared")
ns_bucket[0].delete()
ns_bucket[0].bucketclass.delete()
Expand All @@ -971,7 +998,7 @@ def delete_target_bucket():


@pytest.fixture
def measure_stop_worker_nodes(request, measurement_dir, nodes):
def measure_stop_worker_nodes(request, measurement_dir, nodes, threading_lock):
"""
Stop worker nodes that doesn't contain RGW (so that alerts are triggered
correctly), measure the time when it was stopped and monitors alerts that
Expand Down Expand Up @@ -1026,9 +1053,12 @@ def finalizer():
test_file,
minimal_time=60 * 8,
pagerduty_service_ids=[get_pagerduty_service_id()],
threading_lock=threading_lock,
)
else:
measured_op = measure_operation(stop_nodes, test_file)
measured_op = measure_operation(
stop_nodes, test_file, threading_lock=threading_lock
)
logger.info("Turning on nodes")
try:
nodes.start_nodes(nodes=test_nodes)
Expand Down Expand Up @@ -1084,7 +1114,9 @@ def teardown():
request.addfinalizer(teardown)

test_file = os.path.join(measurement_dir, "measure_rewrite_kms_endpoint.json")
measured_op = measure_operation(change_kms_endpoint, test_file)
measured_op = measure_operation(
change_kms_endpoint, test_file, threading_lock=threading_lock
)

teardown()

Expand Down

0 comments on commit 7f16a45

Please sign in to comment.