Skip to content

Commit

Permalink
Merge branch 'red-hat-storage:master' into custom_sc_tc_changes
Browse files Browse the repository at this point in the history
  • Loading branch information
paraggit authored Nov 2, 2023
2 parents b96fc2e + edc61fd commit 68bfd4f
Show file tree
Hide file tree
Showing 10 changed files with 115 additions and 22 deletions.
12 changes: 10 additions & 2 deletions ocs_ci/deployment/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -1559,6 +1559,12 @@ def deploy_ocs(self):
mcg_only_post_deployment_checks()
return

# get ODF version and set MGR count based on ODF version
ocs_version = version.get_semantic_ocs_version_from_config()
mgr_count = constants.MGR_COUNT_415
if ocs_version < version.VERSION_4_15:
mgr_count = constants.MGR_COUNT

pod = ocp.OCP(kind=constants.POD, namespace=self.namespace)
cfs = ocp.OCP(kind=constants.CEPHFILESYSTEM, namespace=self.namespace)
# Check for Ceph pods
Expand All @@ -1570,7 +1576,10 @@ def deploy_ocs(self):
timeout=mon_pod_timeout,
)
assert pod.wait_for_resource(
condition="Running", selector="app=rook-ceph-mgr", timeout=600
condition="Running",
selector="app=rook-ceph-mgr",
resource_count=mgr_count,
timeout=600,
)
assert pod.wait_for_resource(
condition="Running",
Expand All @@ -1583,7 +1592,6 @@ def deploy_ocs(self):
validate_cluster_on_pvc()

# check for odf-console
ocs_version = version.get_semantic_ocs_version_from_config()
if ocs_version >= version.VERSION_4_9:
assert pod.wait_for_resource(
condition="Running", selector="app=odf-console", timeout=600
Expand Down
4 changes: 2 additions & 2 deletions ocs_ci/framework/conf/ocs_version/ocs-4.15.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
DEPLOYMENT:
default_ocs_registry_image: "quay.io/rhceph-dev/ocs-registry:latest-4.15"
default_latest_tag: 'latest-4.15'
default_ocs_registry_image: "quay.io/rhceph-dev/ocs-registry:latest-stable-4.15"
default_latest_tag: 'latest-stable-4.15'
ocs_csv_channel: "stable-4.15"
default_ocp_version: '4.15'
ENV_DATA:
Expand Down
25 changes: 16 additions & 9 deletions ocs_ci/framework/pytest_customization/reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,25 +147,32 @@ def pytest_report_teststatus(report, config):
GV.TIMEREPORT_DICT[report.nodeid] = GV.TIMEREPORT_DICT.get(report.nodeid, {})

if report.when == "setup":
setup_duration = round(report.duration, 2)
logger.info(
f"duration reported by {report.nodeid} immediately after test execution: {round(report.duration, 2)}"
f"duration reported by {report.nodeid} immediately after test execution: {setup_duration}"
)
GV.TIMEREPORT_DICT[report.nodeid]["setup"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["total"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["setup"] = setup_duration
GV.TIMEREPORT_DICT[report.nodeid]["total"] = setup_duration

if "total" not in GV.TIMEREPORT_DICT[report.nodeid]:
GV.TIMEREPORT_DICT[report.nodeid]["total"] = 0

if report.when == "call":
call_duration = round(report.duration, 2)
logger.info(
f"duration reported by {report.nodeid} immediately after test execution: {round(report.duration, 2)}"
f"duration reported by {report.nodeid} immediately after test execution: {call_duration}"
)
GV.TIMEREPORT_DICT[report.nodeid]["call"] = call_duration
GV.TIMEREPORT_DICT[report.nodeid]["total"] = round(
GV.TIMEREPORT_DICT[report.nodeid]["total"] + call_duration, 2
)
GV.TIMEREPORT_DICT[report.nodeid]["call"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["total"] += round(report.duration, 2)

if report.when == "teardown":
teardown_duration = round(report.duration, 2)
logger.info(
f"duration reported by {report.nodeid} immediately after test execution: {round(report.duration, 2)}"
f"duration reported by {report.nodeid} immediately after test execution: {teardown_duration}"
)
GV.TIMEREPORT_DICT[report.nodeid]["teardown"] = teardown_duration
GV.TIMEREPORT_DICT[report.nodeid]["total"] = round(
GV.TIMEREPORT_DICT[report.nodeid]["total"] + teardown_duration, 2
)
GV.TIMEREPORT_DICT[report.nodeid]["teardown"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["total"] += round(report.duration, 2)
8 changes: 8 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -1219,6 +1219,10 @@
PDB_COUNT_2_MGR = 4
PDB_COUNT_ARBITER = 5

# MGR COUNT
MGR_COUNT = 1
MGR_COUNT_415 = 2

# Root Disk size
CURRENT_VM_ROOT_DISK_SIZE = "60"
VM_ROOT_DISK_SIZE = "120"
Expand Down Expand Up @@ -1609,6 +1613,10 @@
"4.14"
] = DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION["4.12"]

DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION[
"4.15"
] = DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION["4.12"]


# PSI-openstack constants
NOVA_CLNT_VERSION = "2.0"
Expand Down
7 changes: 6 additions & 1 deletion ocs_ci/ocs/ocs_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,12 @@ def verify_image_versions(old_images, upgrade_version, version_before_upgrade):
count=3,
timeout=820,
)
verify_pods_upgraded(old_images, selector=constants.MGR_APP_LABEL)
mgr_count = constants.MGR_COUNT_415
if upgrade_version < parse_version("4.15"):
mgr_count = constants.MGR_COUNT
verify_pods_upgraded(
old_images, selector=constants.MGR_APP_LABEL, count=mgr_count
)
osd_timeout = 600 if upgrade_version >= parse_version("4.5") else 750
osd_count = get_osd_count()
# In the debugging issue:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@
constants.CEPHFILESYSTEM: {
"type": "CephFS",
"sc": constants.CEPHFILESYSTEM_SC,
"delete_time": 2,
"delete_time": 10, # old value was 2
},
constants.CEPHBLOCKPOOL: {
"type": "RBD",
"sc": constants.CEPHBLOCKPOOL_SC,
"delete_time": 1,
"delete_time": 10, # old value was 1
},
}
Operations_Mesurment = ["create", "delete", "csi_create", "csi_delete"]
Expand Down Expand Up @@ -66,7 +66,7 @@ def teardown(self):
def create_fio_pod_yaml(self, pvc_size=1):
"""
This function create a new performance pod yaml file, which will trigger
the FIO command on starting and getting into Compleat state when finish
the FIO command on starting and getting into Complete state when finish
The FIO will fillup 70% of the PVC which will attached to the pod.
Expand Down Expand Up @@ -118,7 +118,7 @@ def create_pvcs_and_wait_for_bound(self, msg_prefix, pvcs, pvc_size, burst=True)
TimeoutExpiredError : if not all PVC(s) get into Bound state whithin 2 sec. per PVC
"""
# Creating PVC(s) for creation time mesurment and wait for bound state
timeout = pvcs * 2
timeout = pvcs * 4
start_time = self.get_time(time_format="csi")
log.info(f"{msg_prefix} Start creating new {pvcs} PVCs")
self.pvc_objs, _ = helpers.create_multiple_pvcs(
Expand Down Expand Up @@ -157,9 +157,9 @@ def run_io(self):
TimeoutExpiredError : if not all completed I/O whithin 20 Min.
"""
# wait up to 20 Min for all pod(s) to compleat running IO, this tuned for up to
# wait up to 60 Min for all pod(s) to complete running IO, this tuned for up to
# 120 PVCs of 25GiB each.
timeout = 1200
timeout = 5400 # old value 1200
pod_objs = []
# Create PODs, connect them to the PVCs and run IO on them
for pvc_obj in self.pvc_objs:
Expand All @@ -173,7 +173,7 @@ def run_io(self):
assert pod_obj, "Failed to create pod"
pod_objs.append(pod_obj)

log.info("Wait for all of the POD(s) to be created, and compleat running I/O")
log.info("Wait for all of the POD(s) to be created, and complete running I/O")
performance_lib.wait_for_resource_bulk_status(
"pod", len(pod_objs), self.namespace, constants.STATUS_COMPLETED, timeout, 5
)
Expand Down Expand Up @@ -251,7 +251,7 @@ def test_pvc_creation_deletion_measurement_performance(
if self.dev_mode:
num_of_samples = 2

accepted_creation_time = 1
accepted_creation_time = 5 # old_value=1
accepted_deletion_time = Interface_Info[self.interface]["delete_time"]
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
skipif_ocp_version,
performance,
performance_b,
ignore_leftovers,
)
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs import constants, exceptions
Expand Down Expand Up @@ -47,6 +48,7 @@
@performance_b
@skipif_ocp_version("<4.6")
@skipif_ocs_version("<4.6")
@ignore_leftovers
class TestPvcMultiClonePerformance(PASTest):
def setup(self):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
skipif_ocp_version,
performance,
performance_b,
ignore_leftovers,
)

from ocs_ci.helpers.helpers import get_full_test_logs_path
Expand All @@ -44,6 +45,7 @@
@performance_b
@skipif_ocp_version("<4.6")
@skipif_ocs_version("<4.6")
@ignore_leftovers
class TestPvcMultiSnapshotPerformance(PASTest):
"""
Tests to measure PVC snapshots creation performance & scale
Expand Down
59 changes: 59 additions & 0 deletions tests/manage/mcg/test_bucket_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -985,3 +985,62 @@ def test_multipart_with_policy(self, mcg_obj, bucket_factory):
# Completing the Multipart Upload
logger.info(f"Completing the MP Upload with on bucket: {bucket}")
complete_multipart_upload(obc_obj, bucket, object_key, upload_id, uploaded_part)

@tier1
@pytest.mark.bugzilla("2210289")
@pytest.mark.polarion_id("OCS-5183")
def test_supported_bucket_policy_operations(self, mcg_obj, bucket_factory):
"""
Test supported s3 bucket policies.
"""
# Creating obc and obc object to get account details, keys etc
obc_name = bucket_factory(amount=1, interface="OC")[0].name
obc_obj = OBC(obc_name)

actions_list = [
"GetBucketObjectLockConfiguration",
"GetObjectRetention",
"GetObjectLegalHold",
"PutBucketObjectLockConfiguration",
"PutObjectRetention",
"GetObjectLegalHold",
]
bucket_policy_generated = gen_bucket_policy(
user_list=obc_obj.obc_account,
actions_list=actions_list,
resources_list=[obc_obj.bucket_name],
)
bucket_policy = json.dumps(bucket_policy_generated)

# Add Bucket Policy
logger.info(f"Creating bucket policy on bucket: {obc_obj.bucket_name}")
put_policy = put_bucket_policy(mcg_obj, obc_obj.bucket_name, bucket_policy)

assert put_policy is not None, "Put policy response is None"
response = HttpResponseParser(put_policy)
assert (
response.status_code == 200
), f"Invalid Status code: {response.status_code}"
logger.info("Bucket policy has been created successfully")

# Get bucket policy
logger.info(f"Getting Bucket policy on bucket: {obc_obj.bucket_name}")
get_policy = get_bucket_policy(mcg_obj, obc_obj.bucket_name)
bucket_policy = get_policy["Policy"]
logger.info(f"Got bucket policy: {bucket_policy}")
bucket_policy = json.loads(bucket_policy)

# Find the missing bucket policies
bucket_policies = bucket_policy["statement"][0]["action"]
bucket_policies = [
action.split("s3:", 1)[1]
for action in bucket_policies
if action.startswith("s3:")
]
actions_list = [action.lower() for action in actions_list]
missing_policies = [
action for action in actions_list if action not in bucket_policies
]
assert (
not missing_policies
), f"Some bucket_policies are not created : {missing_policies}"
2 changes: 2 additions & 0 deletions tests/manage/rgw/test_rgw_routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from ocs_ci.framework.pytest_customization.marks import (
bugzilla,
on_prem_platform_required,
red_squad,
skipif_external_mode,
tier1,
)
Expand All @@ -22,6 +23,7 @@
log = logging.getLogger(__name__)


@red_squad
@on_prem_platform_required
class TestRGWRoutes:
"""
Expand Down

0 comments on commit 68bfd4f

Please sign in to comment.