Skip to content

Commit

Permalink
fetch storage ns from config (#10951)
Browse files Browse the repository at this point in the history
* fetch storage ns from config

Signed-off-by: Daniel Osypenko <[email protected]>
  • Loading branch information
DanielOsypenko authored Dec 5, 2024
1 parent 051625c commit e17d1e9
Show file tree
Hide file tree
Showing 22 changed files with 61 additions and 48 deletions.
8 changes: 4 additions & 4 deletions ocs_ci/deployment/hosted_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -917,7 +917,7 @@ def get_onboarding_key(self):
str: onboarding token key
"""
secret_ocp_obj = ocp.OCP(
kind=constants.SECRET, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
kind=constants.SECRET, namespace=config.ENV_DATA["cluster_namespace"]
)

key = (
Expand Down Expand Up @@ -1160,7 +1160,7 @@ def get_provider_address(self):
"""
Get the provider address
"""
ocp = OCP(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
ocp = OCP(namespace=config.ENV_DATA["cluster_namespace"])
storage_provider_endpoint = ocp.exec_oc_cmd(
(
"get storageclusters.ocs.openshift.io -o jsonpath={'.items[*].status.storageProviderEndpoint'}"
Expand Down Expand Up @@ -1210,7 +1210,7 @@ def storage_claim_exists_cephfs(self):
else:
ocp = OCP(
kind=constants.STORAGECLAIM,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
cluster_kubeconfig=self.cluster_kubeconfig,
)

Expand Down Expand Up @@ -1297,7 +1297,7 @@ def storage_claim_exists_rbd(self):
else:
ocp = OCP(
kind=constants.STORAGECLAIM,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
cluster_kubeconfig=self.cluster_kubeconfig,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def provider_and_native_client_installation(
if self.ocs_version >= version.VERSION_4_16:
# Validate native client is created in openshift-storage namespace
self.deployment.wait_for_csv(
self.ocs_client_operator, constants.OPENSHIFT_STORAGE_NAMESPACE
self.ocs_client_operator, config.ENV_DATA["cluster_namespace"]
)

# Verify native storageclient is created successfully
Expand Down
2 changes: 1 addition & 1 deletion ocs_ci/helpers/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ def create_ceph_block_pool(


def create_ceph_file_system(
cephfs_name=None, label=None, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
cephfs_name=None, label=None, namespace=config.ENV_DATA["cluster_namespace"]
):
"""
Create a Ceph file system
Expand Down
2 changes: 1 addition & 1 deletion ocs_ci/ocs/bucket_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1090,7 +1090,7 @@ def check_pv_backingstore_status(

def check_pv_backingstore_type(
backingstore_name=constants.DEFAULT_NOOBAA_BACKINGSTORE,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
):
"""
check if existing pv backing store is in READY state
Expand Down
7 changes: 4 additions & 3 deletions ocs_ci/ocs/replica_one.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
from ocs_ci.ocs.constants import (
DEFAULT_CEPHBLOCKPOOL,
DEFAULT_STORAGE_CLUSTER,
OPENSHIFT_STORAGE_NAMESPACE,
OSD_APP_LABEL,
CEPHBLOCKPOOL,
STORAGECLASS,
Expand Down Expand Up @@ -141,7 +140,9 @@ def scaledown_deployment(deployment_names: list[str]) -> None:
"""
log.info("Starts Scaledown deployments")
deployment_obj = OCP(kind=DEPLOYMENT, namespace=OPENSHIFT_STORAGE_NAMESPACE)
deployment_obj = OCP(
kind=DEPLOYMENT, namespace=config.ENV_DATA["cluster_namespace"]
)
for deployment in deployment_names:
deployment_obj.exec_oc_cmd(f"scale deployment {deployment} --replicas=0")
log.info(f"scaling to 0: {deployment}")
Expand Down Expand Up @@ -221,7 +222,7 @@ def modify_replica1_osd_count(new_osd_count):
"""
storage_cluster = OCP(kind=STORAGECLUSTER, name=DEFAULT_STORAGE_CLUSTER)
storage_cluster.exec_oc_cmd(
f"patch storagecluster {DEFAULT_STORAGE_CLUSTER} -n {OPENSHIFT_STORAGE_NAMESPACE} "
f"patch storagecluster {DEFAULT_STORAGE_CLUSTER} -n {config.ENV_DATA['cluster_namespace']} "
f'--type json --patch \'[{{"op": "replace", "path": '
f'"/spec/managedResources/cephNonResilientPools/count", "value": {new_osd_count} }}]\''
)
Expand Down
8 changes: 4 additions & 4 deletions ocs_ci/ocs/resources/pod.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,7 +796,7 @@ def get_ceph_tools_pod(
cluster_kubeconfig = config.ENV_DATA.get("provider_kubeconfig", "")

if cluster_kubeconfig:
namespace = constants.OPENSHIFT_STORAGE_NAMESPACE
namespace = config.ENV_DATA["cluster_namespace"]
else:
namespace = namespace or config.ENV_DATA["cluster_namespace"]

Expand Down Expand Up @@ -1574,7 +1574,7 @@ def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):

def get_pods_having_label(
label,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
retry=0,
cluster_config=None,
statuses=None,
Expand Down Expand Up @@ -3727,7 +3727,7 @@ def get_mon_pod_by_pvc_name(pvc_name: str):
return Pod(**mon_pod_ocp)


def get_debug_pods(debug_nodes, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE):
def get_debug_pods(debug_nodes, namespace=config.ENV_DATA["cluster_namespace"]):
"""
Get debug pods created for the nodes in debug
Expand All @@ -3752,7 +3752,7 @@ def get_debug_pods(debug_nodes, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)


def wait_for_pods_deletion(
label, timeout=120, sleep=5, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
label, timeout=120, sleep=5, namespace=config.ENV_DATA["cluster_namespace"]
):
"""
Wait for the pods with particular label to be deleted
Expand Down
3 changes: 2 additions & 1 deletion ocs_ci/ocs/resources/stretchcluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from datetime import timedelta

from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from ocs_ci.ocs.node import get_nodes_having_label, get_ocs_nodes, get_node_objs
from ocs_ci.ocs.resources.ocs import OCS
Expand Down Expand Up @@ -542,7 +543,7 @@ def reset_conn_score(self):
Reset connection scores for all the mon's
"""
mon_pods = get_mon_pods(namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)
mon_pods = get_mon_pods(namespace=config.ENV_DATA["cluster_namespace"])
for pod_obj in mon_pods:
mon_pod_id = get_mon_pod_id(pod_obj)
cmd = f"ceph daemon mon.{mon_pod_id} connection scores reset"
Expand Down
6 changes: 5 additions & 1 deletion ocs_ci/ocs/ui/validation_ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,11 @@ def verify_ocs_operator_tabs(self):
)

logger.info("Verify Details tab on OCS operator")
strings_details_tab = ["Description", "Succeeded", "openshift-storage"]
strings_details_tab = [
"Description",
"Succeeded",
config.ENV_DATA["cluster_namespace"],
]
self.verify_page_contain_strings(
strings_on_page=strings_details_tab, page_name="details_tab"
)
Expand Down
5 changes: 3 additions & 2 deletions ocs_ci/utility/tests/test_prometheus.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import pytest

from ocs_ci.framework import config
from ocs_ci.utility.prometheus import check_query_range_result_enum


Expand All @@ -22,7 +23,7 @@ def query_range_result_ok():
"endpoint": "http-metrics",
"instance": "10.131.0.36:9283",
"job": "rook-ceph-mgr",
"namespace": "openshift-storage",
"namespace": config.ENV_DATA["cluster_namespace"],
"pod": "rook-ceph-mgr-a-66df496d9d-snssn",
"service": "rook-ceph-mgr",
},
Expand Down Expand Up @@ -52,7 +53,7 @@ def query_range_result_ok():
"endpoint": "http-metrics",
"instance": "10.131.0.36:9283",
"job": "rook-ceph-mgr",
"namespace": "openshift-storage",
"namespace": config.ENV_DATA["cluster_namespace"],
"pod": "rook-ceph-mgr-a-66df496d9d-snssn",
"service": "rook-ceph-mgr",
},
Expand Down
8 changes: 4 additions & 4 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.deployment.helpers.hypershift_base import HyperShiftBase
from ocs_ci.deployment.hosted_cluster import HostedClients
from ocs_ci.framework import config as ocsci_config, Config
from ocs_ci.framework import config as ocsci_config, Config, config
import ocs_ci.framework.pytest_customization.marks
from ocs_ci.framework.pytest_customization.marks import (
deployment,
Expand Down Expand Up @@ -8029,7 +8029,7 @@ def factory(min_ep_count=3, max_ep_count=3, cpu=6, memory="10Gi"):
storagecluster_obj = OCP(
kind=constants.STORAGECLUSTER,
resource_name=constants.DEFAULT_STORAGE_CLUSTER,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
)

scale_endpoint_pods_param = (
Expand Down Expand Up @@ -8346,7 +8346,7 @@ def factory(pv_size="50"):
get_pods_having_label(
label=label,
retry=5,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
)
)

Expand All @@ -8371,7 +8371,7 @@ def finalizer():
get_pods_having_label(
label=label,
retry=5,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
)
)

Expand Down
4 changes: 1 addition & 3 deletions tests/cross_functional/ui/test_odf_topology.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,7 @@ def test_validate_topology_configuration(
interface=constants.CEPHBLOCKPOOL,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
project=OCP(
kind="Project", namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
),
project=OCP(kind="Project", namespace=config.ENV_DATA["cluster_namespace"]),
)
pod_obj = helpers.create_pod(
interface_type=constants.CEPHBLOCKPOOL,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def test_nfs_not_enabled_by_default(self):
"""
storage_cluster_obj = ocp.OCP(
kind="Storagecluster", namespace="openshift-storage"
kind="Storagecluster", namespace=config.ENV_DATA["cluster_namespace"]
)
# Checks cephnfs resources not available by default
cephnfs_resource = storage_cluster_obj.exec_oc_cmd("get cephnfs")
Expand Down Expand Up @@ -125,7 +125,7 @@ def setup_teardown(self, request):
"""
self = request.node.cls
log.info("-----Setup-----")
self.namespace = "openshift-storage"
self.namespace = config.ENV_DATA["cluster_namespace"]
self.storage_cluster_obj = ocp.OCP(
kind="Storagecluster", namespace=self.namespace
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import boto3
import logging

from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
tier2,
bugzilla,
Expand All @@ -15,7 +16,6 @@
from ocs_ci.ocs.ocp import OCP
import botocore.exceptions as boto3exception
from ocs_ci.ocs.constants import (
OPENSHIFT_STORAGE_NAMESPACE,
SECRET,
)
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
Expand All @@ -41,7 +41,7 @@ def test_bucket_delete_using_obc_creds(mcg_obj, bucket_factory):
logger.info("Creating OBC")
bucket = bucket_factory(amount=1, interface="OC")[0].name
# Fetch OBC credentials
secret_ocp_obj = OCP(kind=SECRET, namespace=OPENSHIFT_STORAGE_NAMESPACE)
secret_ocp_obj = OCP(kind=SECRET, namespace=config.ENV_DATA["cluster_namespace"])
obc_secret_obj = secret_ocp_obj.get(bucket)
obc_access_key = base64.b64decode(
obc_secret_obj.get("data").get("AWS_ACCESS_KEY_ID")
Expand Down
3 changes: 2 additions & 1 deletion tests/functional/object/mcg/test_multi_region.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import pytest

from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
tier1,
tier4a,
Expand Down Expand Up @@ -200,7 +201,7 @@ def test_multiregion_spread_to_mirror(
bucket = bucket_factory(1, "OC", bucketclass=bucket_class)[0]
bucketclass_obj = ocp.OCP(
kind=constants.BUCKETCLASS,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=bucket.bucketclass.name,
)
# Patch bucket class to update placement from "Spread" to "Mirror"
Expand Down
5 changes: 3 additions & 2 deletions tests/functional/object/mcg/test_noobaa_db_pg_expansion.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import logging

from ocs_ci.framework import config
from ocs_ci.utility import utils
from ocs_ci.framework.pytest_customization.marks import (
vsphere_platform_required,
Expand Down Expand Up @@ -40,7 +41,7 @@ def test_noobaa_db_pg_expansion(self, scale_noobaa_db_pod_pv_size):

try:
ceph_toolbox = get_ceph_tools_pod(
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
namespace=config.ENV_DATA["cluster_namespace"]
)
except (AssertionError, CephToolBoxNotFoundException) as ex:
raise CommandFailed(ex)
Expand Down Expand Up @@ -74,7 +75,7 @@ def test_noobaa_db_pg_expansion(self, scale_noobaa_db_pod_pv_size):

# Verify default backingstore is in ready state or not
default_bs = OCP(
kind=constants.BACKINGSTORE, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE
kind=constants.BACKINGSTORE, namespace=config.ENV_DATA["cluster_namespace"]
).get(resource_name=constants.DEFAULT_NOOBAA_BACKINGSTORE)
assert (
default_bs["status"]["phase"] == constants.STATUS_READY
Expand Down
2 changes: 1 addition & 1 deletion tests/functional/object/mcg/test_pv_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ def test_pvpool_bs_in_fips(self, backingstore_factory):
# the backingstore has reached Rejected state
pv_bs_obj = OCP(
kind=constants.BACKINGSTORE,
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name=pv_backingstore.name,
)
assert pv_bs_obj.wait_for_resource(
Expand Down
6 changes: 3 additions & 3 deletions tests/functional/object/mcg/test_s3_regenerate_creds.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import logging

from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
tier2,
bugzilla,
Expand All @@ -9,7 +10,6 @@
mcg,
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs import constants

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -37,14 +37,14 @@ def test_s3_regenerate_creds(mcg_obj, project_factory):
logger.info(f"Creating OBC {obc_name}")
mcg_obj.exec_mcg_cmd(
cmd=f"obc create {obc_name} --app-namespace {proj_name}",
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
)
ocp_obj.get(resource_name=obc_name)

# regenerate credential
mcg_obj.exec_mcg_cmd(
cmd=f"obc regenerate {obc_name} --app-namespace {proj_name}",
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
use_yes=True,
)
logger.info("Successfully regenerated s3 credentials")
5 changes: 2 additions & 3 deletions tests/functional/object/mcg/test_virtual_hosted_buckets.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
import logging


from ocs_ci.framework import config
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants
from ocs_ci.ocs.bucket_utils import (
verify_s3_object_integrity,
write_random_objects_in_pod,
Expand Down Expand Up @@ -45,7 +44,7 @@ def test_virtual_hosted_bucket(
# create a route for the bucket create above
s3_route_data = OCP(
kind="route",
namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,
namespace=config.ENV_DATA["cluster_namespace"],
resource_name="s3",
).get()
host_base = f'{s3_route_data["spec"]["host"]}'
Expand Down
4 changes: 2 additions & 2 deletions tests/functional/pod_and_daemons/test_csi_logs_rotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
import logging
import pytest

from ocs_ci.framework import config
from ocs_ci.framework.testlib import BaseTest
from ocs_ci.ocs.resources import pod
from ocs_ci.ocs.constants import OPENSHIFT_STORAGE_NAMESPACE
from ocs_ci.framework.pytest_customization.marks import (
brown_squad,
tier2,
Expand Down Expand Up @@ -161,7 +161,7 @@ def test_pods_csi_log_rotation(
"""
csi_interface_plugin_pod_objs = pod.get_all_pods(
namespace=OPENSHIFT_STORAGE_NAMESPACE, selector=[pod_selector]
namespace=config.ENV_DATA["cluster_namespace"], selector=[pod_selector]
)

# check on the first pod
Expand Down
Loading

0 comments on commit e17d1e9

Please sign in to comment.