Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/custom_sc_tc_changes' into custo…
Browse files Browse the repository at this point in the history
…m_sc_tc_changes
  • Loading branch information
paraggit committed Nov 5, 2023
2 parents 93432a5 + 68bfd4f commit 95c6e10
Show file tree
Hide file tree
Showing 17 changed files with 190 additions and 29 deletions.
12 changes: 10 additions & 2 deletions ocs_ci/deployment/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -1559,6 +1559,12 @@ def deploy_ocs(self):
mcg_only_post_deployment_checks()
return

# get ODF version and set MGR count based on ODF version
ocs_version = version.get_semantic_ocs_version_from_config()
mgr_count = constants.MGR_COUNT_415
if ocs_version < version.VERSION_4_15:
mgr_count = constants.MGR_COUNT

pod = ocp.OCP(kind=constants.POD, namespace=self.namespace)
cfs = ocp.OCP(kind=constants.CEPHFILESYSTEM, namespace=self.namespace)
# Check for Ceph pods
Expand All @@ -1570,7 +1576,10 @@ def deploy_ocs(self):
timeout=mon_pod_timeout,
)
assert pod.wait_for_resource(
condition="Running", selector="app=rook-ceph-mgr", timeout=600
condition="Running",
selector="app=rook-ceph-mgr",
resource_count=mgr_count,
timeout=600,
)
assert pod.wait_for_resource(
condition="Running",
Expand All @@ -1583,7 +1592,6 @@ def deploy_ocs(self):
validate_cluster_on_pvc()

# check for odf-console
ocs_version = version.get_semantic_ocs_version_from_config()
if ocs_version >= version.VERSION_4_9:
assert pod.wait_for_resource(
condition="Running", selector="app=odf-console", timeout=600
Expand Down
4 changes: 2 additions & 2 deletions ocs_ci/framework/conf/ocs_version/ocs-4.15.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
DEPLOYMENT:
default_ocs_registry_image: "quay.io/rhceph-dev/ocs-registry:latest-4.15"
default_latest_tag: 'latest-4.15'
default_ocs_registry_image: "quay.io/rhceph-dev/ocs-registry:latest-stable-4.15"
default_latest_tag: 'latest-stable-4.15'
ocs_csv_channel: "stable-4.15"
default_ocp_version: '4.15'
ENV_DATA:
Expand Down
25 changes: 16 additions & 9 deletions ocs_ci/framework/pytest_customization/reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,25 +147,32 @@ def pytest_report_teststatus(report, config):
GV.TIMEREPORT_DICT[report.nodeid] = GV.TIMEREPORT_DICT.get(report.nodeid, {})

if report.when == "setup":
setup_duration = round(report.duration, 2)
logger.info(
f"duration reported by {report.nodeid} immediately after test execution: {round(report.duration, 2)}"
f"duration reported by {report.nodeid} immediately after test execution: {setup_duration}"
)
GV.TIMEREPORT_DICT[report.nodeid]["setup"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["total"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["setup"] = setup_duration
GV.TIMEREPORT_DICT[report.nodeid]["total"] = setup_duration

if "total" not in GV.TIMEREPORT_DICT[report.nodeid]:
GV.TIMEREPORT_DICT[report.nodeid]["total"] = 0

if report.when == "call":
call_duration = round(report.duration, 2)
logger.info(
f"duration reported by {report.nodeid} immediately after test execution: {round(report.duration, 2)}"
f"duration reported by {report.nodeid} immediately after test execution: {call_duration}"
)
GV.TIMEREPORT_DICT[report.nodeid]["call"] = call_duration
GV.TIMEREPORT_DICT[report.nodeid]["total"] = round(
GV.TIMEREPORT_DICT[report.nodeid]["total"] + call_duration, 2
)
GV.TIMEREPORT_DICT[report.nodeid]["call"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["total"] += round(report.duration, 2)

if report.when == "teardown":
teardown_duration = round(report.duration, 2)
logger.info(
f"duration reported by {report.nodeid} immediately after test execution: {round(report.duration, 2)}"
f"duration reported by {report.nodeid} immediately after test execution: {teardown_duration}"
)
GV.TIMEREPORT_DICT[report.nodeid]["teardown"] = teardown_duration
GV.TIMEREPORT_DICT[report.nodeid]["total"] = round(
GV.TIMEREPORT_DICT[report.nodeid]["total"] + teardown_duration, 2
)
GV.TIMEREPORT_DICT[report.nodeid]["teardown"] = round(report.duration, 2)
GV.TIMEREPORT_DICT[report.nodeid]["total"] += round(report.duration, 2)
19 changes: 18 additions & 1 deletion ocs_ci/ocs/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -1240,9 +1240,26 @@ def validate_pdb_creation():
item_list = pdb_obj.get().get("items")
pdb_count = constants.PDB_COUNT
pdb_required = [constants.MDS_PDB, constants.MON_PDB, constants.OSD_PDB]

if version.get_semantic_ocs_version_from_config() >= version.VERSION_4_15:
pdb_count = constants.PDB_COUNT_2_MGR
pdb_required = [
constants.MDS_PDB,
constants.MON_PDB,
constants.OSD_PDB,
constants.MGR_PDB,
]

if config.DEPLOYMENT.get("arbiter_deployment"):
pdb_count = constants.PDB_COUNT_ARBITER
pdb_required.extend((constants.MGR_PDB, constants.RGW_PDB))
pdb_required = [
constants.MDS_PDB,
constants.MON_PDB,
constants.OSD_PDB,
constants.MGR_PDB,
constants.RGW_PDB,
]

if len(item_list) != pdb_count:
raise PDBNotCreatedException(
f"Not All PDB's created. Expected {pdb_count} PDB's but found {len(item_list)}"
Expand Down
9 changes: 9 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -1216,8 +1216,13 @@
MGR_PDB = "rook-ceph-mgr-pdb"
RGW_PDB = "rook-ceph-rgw-ocs-storagecluster-cephobjectstore"
PDB_COUNT = 3
PDB_COUNT_2_MGR = 4
PDB_COUNT_ARBITER = 5

# MGR COUNT
MGR_COUNT = 1
MGR_COUNT_415 = 2

# Root Disk size
CURRENT_VM_ROOT_DISK_SIZE = "60"
VM_ROOT_DISK_SIZE = "120"
Expand Down Expand Up @@ -1608,6 +1613,10 @@
"4.14"
] = DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION["4.12"]

DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION[
"4.15"
] = DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION["4.12"]


# PSI-openstack constants
NOVA_CLNT_VERSION = "2.0"
Expand Down
7 changes: 6 additions & 1 deletion ocs_ci/ocs/ocs_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,12 @@ def verify_image_versions(old_images, upgrade_version, version_before_upgrade):
count=3,
timeout=820,
)
verify_pods_upgraded(old_images, selector=constants.MGR_APP_LABEL)
mgr_count = constants.MGR_COUNT_415
if upgrade_version < parse_version("4.15"):
mgr_count = constants.MGR_COUNT
verify_pods_upgraded(
old_images, selector=constants.MGR_APP_LABEL, count=mgr_count
)
osd_timeout = 600 if upgrade_version >= parse_version("4.5") else 750
osd_count = get_osd_count()
# In the debugging issue:
Expand Down
35 changes: 35 additions & 0 deletions ocs_ci/repos/ocp_4_15_rhel8.repo
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
[openshift]
name=RHopenshift
baseurl=https://mirror.openshift.com/enterprise/reposync/4.15/rhel-8-server-ose-rpms/
enabled=1
gpgcheck=0
username=$mirror_openshift_user
password=$mirror_openshift_password
module_hotfixes = 1

[rhel-8-baseos]
name=rhel8-baseos
baseurl=https://mirror.openshift.com/enterprise/reposync/ci-deps/rhel-8-baseos-rpms/
enabled=1
gpgcheck=0
username=$mirror_openshift_user
password=$mirror_openshift_password
module_hotfixes = 1

[rhel-8-appstream]
name=rhel8-appstream
baseurl=https://mirror.openshift.com/enterprise/reposync/ci-deps/rhel-8-appstream-rpms/
enabled=1
gpgcheck=0
username=$mirror_openshift_user
password=$mirror_openshift_password
module_hotfixes = 1

[rhel-fast]
name=RHfast
baseurl=https://mirror.openshift.com/enterprise/reposync/4.15/rhel-8-fast-datapath-rpms/
enabled=1
gpgcheck=0
username=$mirror_openshift_user
password=$mirror_openshift_password
module_hotfixes = 1
1 change: 1 addition & 0 deletions ocs_ci/utility/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def get_semantic_version(version, only_major_minor=False, ignore_pre_release=Fal
VERSION_4_12 = get_semantic_version("4.12", True)
VERSION_4_13 = get_semantic_version("4.13", True)
VERSION_4_14 = get_semantic_version("4.14", True)
VERSION_4_15 = get_semantic_version("4.15", True)


def get_semantic_ocs_version_from_config(cluster_config=None):
Expand Down
12 changes: 6 additions & 6 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,21 +75,21 @@
# compatible systems. Resolves problem for m1 Mac chips
"greenlet==1.1.2",
"ovirt-engine-sdk-python==4.4.11",
"junitparser",
"junitparser==3.1.0",
"flaky==3.7.0",
"ocp-network-split",
"pyopenssl",
"pyparsing ==2.4.7",
"ocp-network-split==0.3.0",
"pyopenssl==23.3.0",
"pyparsing==2.4.7",
"mysql-connector-python==8.0.27",
"pytest-repeat",
"pytest-repeat==0.9.3",
"pexpect>=4.8.0",
# googleapis-common-protos 1.56.2 needs to have protobuf<4.0.0>=3.15.0
"protobuf==4.21.7",
"ping3>=4.0.3",
"psutil==5.9.0",
"azure-identity==1.12.0",
"azure-mgmt-storage==21.0.0",
"fauxfactory",
"fauxfactory==3.1.0",
],
entry_points={
"console_scripts": [
Expand Down
5 changes: 5 additions & 0 deletions tests/e2e/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
validate_pv_delete,
default_storage_class,
)
from ocs_ci.utility.kms import is_kms_enabled
from ocs_ci.utility.utils import clone_notify

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -147,6 +148,10 @@ def factory(
"noobaa-server",
"noobaa-endpoints",
]
if is_kms_enabled():
secrets = [
secret for secret in secrets if secret != "noobaa-root-master-key"
]
secrets_yaml = [
ocp_secret_obj.get(resource_name=f"{secret}") for secret in secrets
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@
constants.CEPHFILESYSTEM: {
"type": "CephFS",
"sc": constants.CEPHFILESYSTEM_SC,
"delete_time": 2,
"delete_time": 10, # old value was 2
},
constants.CEPHBLOCKPOOL: {
"type": "RBD",
"sc": constants.CEPHBLOCKPOOL_SC,
"delete_time": 1,
"delete_time": 10, # old value was 1
},
}
Operations_Mesurment = ["create", "delete", "csi_create", "csi_delete"]
Expand Down Expand Up @@ -66,7 +66,7 @@ def teardown(self):
def create_fio_pod_yaml(self, pvc_size=1):
"""
This function create a new performance pod yaml file, which will trigger
the FIO command on starting and getting into Compleat state when finish
the FIO command on starting and getting into Complete state when finish
The FIO will fillup 70% of the PVC which will attached to the pod.
Expand Down Expand Up @@ -118,7 +118,7 @@ def create_pvcs_and_wait_for_bound(self, msg_prefix, pvcs, pvc_size, burst=True)
TimeoutExpiredError : if not all PVC(s) get into Bound state whithin 2 sec. per PVC
"""
# Creating PVC(s) for creation time mesurment and wait for bound state
timeout = pvcs * 2
timeout = pvcs * 4
start_time = self.get_time(time_format="csi")
log.info(f"{msg_prefix} Start creating new {pvcs} PVCs")
self.pvc_objs, _ = helpers.create_multiple_pvcs(
Expand Down Expand Up @@ -157,9 +157,9 @@ def run_io(self):
TimeoutExpiredError : if not all completed I/O whithin 20 Min.
"""
# wait up to 20 Min for all pod(s) to compleat running IO, this tuned for up to
# wait up to 60 Min for all pod(s) to complete running IO, this tuned for up to
# 120 PVCs of 25GiB each.
timeout = 1200
timeout = 5400 # old value 1200
pod_objs = []
# Create PODs, connect them to the PVCs and run IO on them
for pvc_obj in self.pvc_objs:
Expand All @@ -173,7 +173,7 @@ def run_io(self):
assert pod_obj, "Failed to create pod"
pod_objs.append(pod_obj)

log.info("Wait for all of the POD(s) to be created, and compleat running I/O")
log.info("Wait for all of the POD(s) to be created, and complete running I/O")
performance_lib.wait_for_resource_bulk_status(
"pod", len(pod_objs), self.namespace, constants.STATUS_COMPLETED, timeout, 5
)
Expand Down Expand Up @@ -251,7 +251,7 @@ def test_pvc_creation_deletion_measurement_performance(
if self.dev_mode:
num_of_samples = 2

accepted_creation_time = 1
accepted_creation_time = 5 # old_value=1
accepted_deletion_time = Interface_Info[self.interface]["delete_time"]
accepted_creation_deviation_percent = 50
accepted_deletion_deviation_percent = 50
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
skipif_ocp_version,
performance,
performance_b,
ignore_leftovers,
)
from ocs_ci.helpers.helpers import get_full_test_logs_path
from ocs_ci.ocs import constants, exceptions
Expand Down Expand Up @@ -47,6 +48,7 @@
@performance_b
@skipif_ocp_version("<4.6")
@skipif_ocs_version("<4.6")
@ignore_leftovers
class TestPvcMultiClonePerformance(PASTest):
def setup(self):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
skipif_ocp_version,
performance,
performance_b,
ignore_leftovers,
)

from ocs_ci.helpers.helpers import get_full_test_logs_path
Expand All @@ -44,6 +45,7 @@
@performance_b
@skipif_ocp_version("<4.6")
@skipif_ocs_version("<4.6")
@ignore_leftovers
class TestPvcMultiSnapshotPerformance(PASTest):
"""
Tests to measure PVC snapshots creation performance & scale
Expand Down
3 changes: 3 additions & 0 deletions tests/encryption/test_intransit_encryption_sanity.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
log = logging.getLogger(__name__)


@pytest.mark.skip(
reason="Skip due to issue https://github.com/red-hat-storage/ocs-ci/issues/8759"
)
@green_squad
class TestInTransitEncryptionSanity:
@pytest.fixture(autouse=True)
Expand Down
6 changes: 6 additions & 0 deletions tests/manage/add_metadata_feature/test_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,9 @@ def test_create_pvc(self, pvc_factory):
), f"PVC {pvc_obj.name} is not deleted"


@pytest.mark.skip(
reason="Skip due to issue https://github.com/red-hat-storage/ocs-ci/issues/8759"
)
@tier1
@skipif_ocs_version("<4.12")
@skipif_ocp_version("<4.12")
Expand Down Expand Up @@ -226,6 +229,9 @@ def test_metadata_not_enabled_by_default(
), f"PVC {cloned_pvc_obj.name} is not deleted"


@pytest.mark.skip(
reason="Skip due to issue https://github.com/red-hat-storage/ocs-ci/issues/8759"
)
@skipif_ocs_version("<4.12")
@skipif_ocp_version("<4.12")
@skipif_managed_service
Expand Down
Loading

0 comments on commit 95c6e10

Please sign in to comment.