From 035ad5ceb3a74c20f6fa756df6071ec960955f5d Mon Sep 17 00:00:00 2001 From: Umanga Chapagain Date: Wed, 29 Nov 2023 15:22:59 +0530 Subject: [PATCH 1/2] validate if cluster FSID is empty When generating replicationID in CreateUniqueReplicationId(), ensure that at least 2 non-empty cluster FSID strings are available. Also, refactor to include sorting logic to enure consistent hashing. Signed-off-by: Umanga Chapagain --- .../agent_mirrorpeer_controller.go | 11 ++- controllers/drpolicy_controller.go | 12 +--- controllers/drpolicy_controller_test.go | 32 ++++++++- controllers/utils/hash.go | 18 ++++- controllers/utils/hash_test.go | 69 ++++++++++++++++++- 5 files changed, 121 insertions(+), 21 deletions(-) diff --git a/addons/token-exchange/agent_mirrorpeer_controller.go b/addons/token-exchange/agent_mirrorpeer_controller.go index 9b30025a..0061c2ea 100644 --- a/addons/token-exchange/agent_mirrorpeer_controller.go +++ b/addons/token-exchange/agent_mirrorpeer_controller.go @@ -21,7 +21,6 @@ import ( "crypto/sha1" "encoding/hex" "fmt" - "sort" "strconv" "time" @@ -597,14 +596,12 @@ func (r *MirrorPeerReconciler) labelCephClusters(ctx context.Context, scr *multi if found.Labels == nil { found.Labels = make(map[string]string) } - var fsids []string - for _, v := range clusterFSIDs { - fsids = append(fsids, v) + replicationId, err := utils.CreateUniqueReplicationId(clusterFSIDs) + if err != nil { + return err } - // To ensure reliability of hash generation - sort.Strings(fsids) - replicationId := utils.CreateUniqueReplicationId(fsids) + if found.Labels[utils.CephClusterReplicationIdLabel] != replicationId { klog.Infof("adding label %s/%s to cephcluster %s", utils.CephClusterReplicationIdLabel, replicationId, found.Name) found.Labels[utils.CephClusterReplicationIdLabel] = replicationId diff --git a/controllers/drpolicy_controller.go b/controllers/drpolicy_controller.go index 1eb4486d..becb1eaa 100644 --- a/controllers/drpolicy_controller.go +++ b/controllers/drpolicy_controller.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "sort" "time" "github.com/red-hat-storage/odf-multicluster-orchestrator/addons/setup" @@ -166,16 +165,11 @@ func (r *DRPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Context, mp *multiclusterv1alpha1.MirrorPeer, dp *ramenv1alpha1.DRPolicy, clusterFSIDs map[string]string) error { - var fsids []string - for _, v := range clusterFSIDs { - fsids = append(fsids, v) + replicationId, err := utils.CreateUniqueReplicationId(clusterFSIDs) + if err != nil { + return err } - // To ensure reliability of hash generation - sort.Strings(fsids) - - replicationId := utils.CreateUniqueReplicationId(fsids) - for _, pr := range mp.Spec.Items { manifestWorkName := fmt.Sprintf("vrc-%v", utils.FnvHash(dp.Name)) // Two ManifestWork per DRPolicy found := &workv1.ManifestWork{ diff --git a/controllers/drpolicy_controller_test.go b/controllers/drpolicy_controller_test.go index e0f88303..6df0daec 100644 --- a/controllers/drpolicy_controller_test.go +++ b/controllers/drpolicy_controller_test.go @@ -3,16 +3,18 @@ package controllers import ( "context" "fmt" + "testing" + ramenv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" multiclusterv1alpha1 "github.com/red-hat-storage/odf-multicluster-orchestrator/api/v1alpha1" "github.com/red-hat-storage/odf-multicluster-orchestrator/controllers/utils" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + clusterv1 "open-cluster-management.io/api/cluster/v1" workv1 "open-cluster-management.io/api/work/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "testing" ) const ( @@ -101,7 +103,33 @@ func getFakeDRPolicyReconciler(drpolicy *ramenv1alpha1.DRPolicy, mp *multicluste Name: cName2, }, } - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(drpolicy, mp, ns1, ns2).Build() + mc1 := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cName1, + }, + Status: clusterv1.ManagedClusterStatus{ + ClusterClaims: []clusterv1.ManagedClusterClaim{ + { + Name: "cephfsid.odf.openshift.io", + Value: "db47dafb-1459-44ca-8a7a-b55ba2ec2d7c", + }, + }, + }, + } + mc2 := &clusterv1.ManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: cName2, + }, + Status: clusterv1.ManagedClusterStatus{ + ClusterClaims: []clusterv1.ManagedClusterClaim{ + { + Name: "cephfsid.odf.openshift.io", + Value: "5b544f43-3ff9-4296-bc9f-051e60dcecdf", + }, + }, + }, + } + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(drpolicy, mp, ns1, ns2, mc1, mc2).Build() r := DRPolicyReconciler{ HubClient: fakeClient, diff --git a/controllers/utils/hash.go b/controllers/utils/hash.go index 50b01d96..fd9b88b8 100644 --- a/controllers/utils/hash.go +++ b/controllers/utils/hash.go @@ -4,6 +4,7 @@ import ( "crypto/sha512" "fmt" "hash/fnv" + "sort" "strings" ) @@ -34,6 +35,19 @@ func CreateUniqueSecretName(managedCluster, storageClusterNamespace, storageClus return CreateUniqueName(managedCluster, storageClusterNamespace, storageClusterName)[0:39] } -func CreateUniqueReplicationId(fsids []string) string { - return CreateUniqueName(fsids...)[0:39] +func CreateUniqueReplicationId(clusterFSIDs map[string]string) (string, error) { + var fsids []string + for _, v := range clusterFSIDs { + if v != "" { + fsids = append(fsids, v) + } + } + + if len(fsids) < 2 { + return "", fmt.Errorf("replicationID can not be generated due to missing cluster FSID") + } + + // To ensure reliability of hash generation + sort.Strings(fsids) + return CreateUniqueName(fsids...)[0:39], nil } diff --git a/controllers/utils/hash_test.go b/controllers/utils/hash_test.go index d13ed083..545c87de 100644 --- a/controllers/utils/hash_test.go +++ b/controllers/utils/hash_test.go @@ -1,6 +1,8 @@ package utils -import "testing" +import ( + "testing" +) func TestFnvHashTest(t *testing.T) { testCases := []struct { @@ -19,3 +21,68 @@ func TestFnvHashTest(t *testing.T) { } } } + +func TestCreateUniqueReplicationId(t *testing.T) { + type args struct { + fsids map[string]string + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "Case 1: Both FSID available", + args: args{ + fsids: map[string]string{ + "c1": "7e252ee3-abd9-4c54-a4ff-a2fdce8931a0", + "c2": "aacfbd7e-5ced-42a5-bdc2-483fcbe5a29d", + }, + }, + want: "a99df9fc6c52c7ef44222ab38657a0b15628a14", + wantErr: false, + }, + { + name: "Case 2: FSID for C1 is available", + args: args{ + fsids: map[string]string{ + "c1": "7e252ee3-abd9-4c54-a4ff-a2fdce8931a0", + "c2": "", + }, + }, + want: "", + wantErr: true, + }, + { + name: "Case 3: FSID for C2 is available", + args: args{ + fsids: map[string]string{ + "c1": "", + "c2": "aacfbd7e-5ced-42a5-bdc2-483fcbe5a29d", + }, + }, + want: "", + wantErr: true, + }, + { + name: "Case 4: Both FSID unavailable", + args: args{ + fsids: map[string]string{ + "c1": "", + "c2": "", + }, + }, + want: "", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := CreateUniqueReplicationId(tt.args.fsids) + if ((err != nil) != tt.wantErr) || got != tt.want { + t.Errorf("CreateUniqueReplicationId() = %v, want %v", got, tt.want) + } + }) + } +} From 5d1c0299534f7d0d21db19e7cdf79ae4ddd72122 Mon Sep 17 00:00:00 2001 From: Umanga Chapagain Date: Wed, 29 Nov 2023 19:02:28 +0530 Subject: [PATCH 2/2] update ManifestWork for VRC when FSID is changed In some cases, FSID can be nil and later be populated. For such cases, we need to ensure that ManifestWork for VRC is updated with newly generated replication ID. Signed-off-by: Umanga Chapagain --- controllers/drpolicy_controller.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/controllers/drpolicy_controller.go b/controllers/drpolicy_controller.go index becb1eaa..322fb235 100644 --- a/controllers/drpolicy_controller.go +++ b/controllers/drpolicy_controller.go @@ -233,13 +233,14 @@ func (r *DRPolicyReconciler) createOrUpdateManifestWorkForVRC(ctx context.Contex }, }, }, - Spec: workv1.ManifestWorkSpec{ - Workload: workv1.ManifestsTemplate{Manifests: []workv1.Manifest{ - manifest, - }}, - }, } _, err = controllerutil.CreateOrUpdate(ctx, r.HubClient, &mw, func() error { + mw.Spec = workv1.ManifestWorkSpec{ + Workload: workv1.ManifestsTemplate{ + Manifests: []workv1.Manifest{ + manifest, + }}, + } return nil })