Skip to content

Commit

Permalink
Resolve conflicts
Browse files Browse the repository at this point in the history
Signed-off-by: Shrivaibavi Raghaventhiran <[email protected]>
  • Loading branch information
Shrivaibavi committed Jun 18, 2024
1 parent 8d09501 commit f24539a
Show file tree
Hide file tree
Showing 3 changed files with 90 additions and 62 deletions.
46 changes: 42 additions & 4 deletions ocs_ci/ocs/dr/dr_workload.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,13 +180,45 @@ def __init__(self, **kwargs):
self.workload_subscription_dir, self.workload_name, "placementrule.yaml"
)

def deploy_workload(self, primary_cluster=None, secondary_cluster=None):
def deploy_workload(self):
"""
Deployment specific to busybox workload
"""
self._deploy_prereqs()
self.workload_namespace = self._get_workload_namespace()

# load drpc.yaml
drpc_yaml_data = templating.load_yaml(self.drpc_yaml_file)
drpc_yaml_data["spec"]["preferredCluster"] = self.preferred_primary_cluster
drpc_yaml_data["spec"]["drPolicyRef"]["name"] = self.dr_policy_name
templating.dump_data_to_temp_yaml(drpc_yaml_data, self.drpc_yaml_file)

# TODO
# drpc_yaml_file needs to be committed back to the repo
# because ACM would refetch from repo directly

# load channel.yaml
channel_yaml_data = templating.load_yaml(self.channel_yaml_file)
channel_yaml_data["spec"]["pathname"] = self.workload_repo_url
templating.dump_data_to_temp_yaml(channel_yaml_data, self.channel_yaml_file)

# Create the resources on Hub cluster
config.switch_acm_ctx()
run_cmd(f"oc create -k {self.workload_subscription_dir}")
run_cmd(f"oc create -k {self.workload_subscription_dir}/{self.workload_name}")

self.verify_workload_deployment()

def deploy_workloads_on_managed_clusters(
self, primary_cluster=True, secondary_cluster=None
):
"""
Deployment specific to busybox workload on both primary and secondary clusters
Args:
bool: True if apps needs to be deployed on primary cluster
bool: True if apps needs to be deployed on secondary cluster
primary_cluster(bool) : True if apps needs to be deployed on primary cluster
secondary_cluster(bool) : True if apps needs to be deployed on secondary cluster
"""
self._deploy_prereqs()
Expand Down Expand Up @@ -334,14 +366,20 @@ def _get_workload_namespace(self):
return namespace_yaml_data["metadata"]["name"]

def _get_ramen_namespace(self):
""" """
"""
Get the ramen repo namespace
"""
git_ramen_yaml_data = templating.load_yaml(self.git_repo_namespace_yaml_file)
return git_ramen_yaml_data["metadata"]["name"]

def verify_workload_deployment(self, cluster=None):
"""
Verify busybox workload
Args:
cluster : Cluster to verify if workload is running on it
"""
self.workload_namespace = self._get_workload_namespace()
if cluster is None:
Expand Down
6 changes: 3 additions & 3 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6687,7 +6687,7 @@ def factory(
)
primary_cluster_instances.append(workload)
total_pvc_count += workload_details["pvc_count"]
workload.deploy_workload(
workload.deploy_workloads_on_managed_clusters(
primary_cluster=primary_cluster, secondary_cluster=None
)

Expand All @@ -6702,7 +6702,7 @@ def factory(
)
secondary_cluster_instances.append(workload)
total_pvc_count += workload_details["pvc_count"]
workload.deploy_workload(
workload.deploy_workloads_on_managed_clusters(
primary_cluster=None, secondary_cluster=secondary_cluster
)

Expand All @@ -6716,7 +6716,7 @@ def teardown():
for workload in instance:
try:
dr_helpers_ui.delete_application_ui(
acm_obj, workload_to_delete=workload.name
acm_obj, workload_to_delete=workload.workload_namespace
)
except ResourceNotDeleted:
failed_to_delete = True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,28 +4,22 @@
from ocs_ci.framework.pytest_customization.marks import tier1
from ocs_ci.framework import config

# from ocs_ci.ocs.acm.acm import AcmAddClusters
from ocs_ci.ocs.acm.acm import AcmAddClusters
from ocs_ci.ocs import constants
from ocs_ci.helpers.dr_helpers import (
enable_fence,
enable_unfence,
get_fence_state,
# failover,
# relocate,
set_current_primary_cluster_context,
# set_current_secondary_cluster_context,
get_current_primary_cluster_name,
get_current_secondary_cluster_name,
# wait_for_all_resources_creation,
# wait_for_all_resources_deletion,
wait_for_all_resources_creation,
gracefully_reboot_ocp_nodes,
)

# from ocs_ci.helpers.dr_helpers_ui import (
# check_cluster_status_on_acm_console,
# failover_relocate_ui,
# verify_failover_relocate_status_ui,
# )
from ocs_ci.helpers.dr_helpers_ui import (
failover_relocate_ui,
verify_failover_relocate_status_ui,
)
from ocs_ci.framework.pytest_customization.marks import turquoise_squad
from ocs_ci.utility import version

Expand Down Expand Up @@ -83,7 +77,7 @@ def test_application_failover_and_relocate(
)
raise NotImplementedError

# acm_obj = AcmAddClusters()
acm_obj = AcmAddClusters()
primary_instances = []
secondary_instances = []

Expand All @@ -109,48 +103,44 @@ def test_application_failover_and_relocate(
)
logger.info(f"The secondary cluster is {secondary_cluster_name}")

logger.info(f"Primary instances {primary_instances}")
logger.info(f"Secondary instances {secondary_instances}")

# Fence the primary managed cluster
enable_fence(drcluster_name=self.primary_cluster_name)

# # Application Failover to Secondary managed cluster
# for instance in primary_instances:
# if (
# config.RUN.get("mdr_failover_via_ui")
# and workload_type == constants.SUBSCRIPTION
# ):
# logger.info(
# "Start the process of Failover of subscription based app from ACM UI"
# )
# config.switch_acm_ctx()
# failover_relocate_ui(
# acm_obj,
# workload_to_move=f"{instance.workload_namespace}-1",
# policy_name=workload.dr_policy_name,
# failover_or_preferred_cluster=secondary_cluster_name,
# )
# if workload_type == constants.APPLICATION_SET:
# # TODO: Failover appset based apps via UI
# # TODO: Failover of multiple apps to
# failover(
# failover_cluster=secondary_cluster_name,
# namespace=workload.workload_namespace,
# workload_type=workload_type,
# workload_placement_name=workload.appset_placement_name,
# )
#
# # Verify application are running in other managedcluster
# # And not in previous cluster
# set_current_primary_cluster_context(
# primary_instances[0].workload_namespace, workload_type
# )
# for instance in primary_instances:
# wait_for_all_resources_creation(
# workload.workload_pvc_count,
# workload.workload_pod_count,
# instance.workload_namespace,
# )
#
# # Verify the failover status from UI
# if config.RUN.get("mdr_failover_via_ui"):
# config.switch_acm_ctx()
# verify_failover_relocate_status_ui(acm_obj)
# Application Failover to Secondary managed cluster
config.switch_acm_ctx()
for instance in primary_instances:
if (
config.RUN.get("mdr_failover_via_ui")
and workload_type == constants.SUBSCRIPTION
):
logger.info(
"Start the process of Failover of subscription based app from ACM UI"
)
failover_relocate_ui(
acm_obj,
workload_to_move=f"{instance.workload_namespace}-1",
policy_name=instance.dr_policy_name,
failover_or_preferred_cluster=secondary_cluster_name,
)

# Verify application are running in other managedcluster
# And not in previous cluster
set_current_primary_cluster_context(
primary_instances[0].workload_namespace, workload_type
)
for instance in primary_instances:
wait_for_all_resources_creation(
instance.workload_pvc_count,
instance.workload_pod_count,
instance.workload_namespace,
)

# Verify the failover status from UI
if config.RUN.get("mdr_failover_via_ui"):
config.switch_acm_ctx()
verify_failover_relocate_status_ui(acm_obj)

# TODO Relocate of sub apps

0 comments on commit f24539a

Please sign in to comment.