From 319272e4e79818cd7c2265f90af98c949357ab77 Mon Sep 17 00:00:00 2001 From: Rewant Soni Date: Tue, 12 Nov 2024 12:34:26 +0530 Subject: [PATCH] controllers: add a new controller to setup mirroring Signed-off-by: Rewant Soni --- config/rbac/role.yaml | 6 + controllers/mirroring/mirroring_controller.go | 414 ++++++++++++++++++ controllers/storagecluster/cephblockpools.go | 4 + .../storageclusterpeer_controller.go | 23 +- .../storagerequest_controller.go | 5 +- controllers/util/k8sutil.go | 3 + deploy/csv-templates/ocs-operator.csv.yaml.in | 6 + .../ocs-operator.clusterserviceversion.yaml | 6 + main.go | 8 + .../v4/controllers/util/k8sutil.go | 3 + 10 files changed, 473 insertions(+), 5 deletions(-) create mode 100644 controllers/mirroring/mirroring_controller.go diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 08ee59544a..b2f47aecc2 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -122,6 +122,12 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - configmaps/finalizers + verbs: + - update - apiGroups: - "" resources: diff --git a/controllers/mirroring/mirroring_controller.go b/controllers/mirroring/mirroring_controller.go new file mode 100644 index 0000000000..44c622c936 --- /dev/null +++ b/controllers/mirroring/mirroring_controller.go @@ -0,0 +1,414 @@ +/* +Copyright 2020 Red Hat OpenShift Container Storage. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mirroring + +import ( + "context" + "fmt" + "slices" + "time" + + ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" + ocsv1alpha1 "github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1" + providerClient "github.com/red-hat-storage/ocs-operator/services/provider/api/v4/client" + "github.com/red-hat-storage/ocs-operator/v4/controllers/storageclusterpeer" + controllers "github.com/red-hat-storage/ocs-operator/v4/controllers/storageconsumer" + "github.com/red-hat-storage/ocs-operator/v4/controllers/util" + + "github.com/go-logr/logr" + rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + rBDMirrorDaemonName = "rbd-mirror" + // internalKey is a special key for client-mapping-config to establish mirroring between blockPools for internal mode + internalKey = "internal" + mirroringFinalizer = "mirroring.ocs.openshift.io" +) + +// MirroringReconciler reconciles a Mirroring fields for Ceph Object(s) +// nolint:revive +type MirroringReconciler struct { + client.Client + Scheme *runtime.Scheme + + log logr.Logger + ctx context.Context + clientMappingConfig *corev1.ConfigMap + storageClusterPeer *ocsv1.StorageClusterPeer + mapClientIDToConsumerObj map[string]*ocsv1alpha1.StorageConsumer +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MirroringReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.ConfigMap{}, builder.WithPredicates(util.NamePredicate(storageclusterpeer.StorageClientMappingConfigName))). + Watches(&ocsv1.StorageClusterPeer{}, &handler.EnqueueRequestForObject{}). + Watches(&ocsv1alpha1.StorageConsumer{}, &handler.EnqueueRequestForObject{}). + Complete(r) +} + +//+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclusterpeers;storageconsumers,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=configmaps/finalizers,verbs=update +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;delete + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *MirroringReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { + var err error + r.ctx = ctx + r.log = log.FromContext(ctx, "Mirroring Controller", request) + r.log.Info("Reconciling Mirroring Controller.") + + r.clientMappingConfig = &corev1.ConfigMap{} + r.clientMappingConfig.Name = request.Name + r.clientMappingConfig.Namespace = request.Namespace + + if err = r.get(r.clientMappingConfig); err != nil { + if k8serrors.IsNotFound(err) { + r.log.Info("ClientMappingConfig %s not found. Ignoring since object must be deleted.", "ConfigMap", r.clientMappingConfig.Name) + return ctrl.Result{}, nil + } + r.log.Error(err, "Failed to get ConfigMap.", "ConfigMap", r.clientMappingConfig.Name) + return ctrl.Result{}, err + } + + // marked for deletion + if !r.clientMappingConfig.GetDeletionTimestamp().IsZero() { + if res, err := r.disableBlockPoolMirroring(); err != nil || !res.IsZero() { + return res, err + } + + if controllerutil.RemoveFinalizer(r.clientMappingConfig, mirroringFinalizer) { + r.log.Info("removing finalizer from ClientMappingConfig.", "ClientMappingConfig", r.clientMappingConfig.Name) + if err := r.update(r.clientMappingConfig); err != nil { + r.log.Info("Failed to remove finalizer from ClientMappingConfig", "ClientMappingConfig", r.clientMappingConfig.Name) + return ctrl.Result{}, fmt.Errorf("failed to remove finalizer from ClientMappingConfig: %v", err) + } + } + } + + if len(r.clientMappingConfig.Data) < 1 { + return ctrl.Result{}, nil + } + + if controllerutil.AddFinalizer(r.clientMappingConfig, mirroringFinalizer) { + r.log.Info("Finalizer not found for ClientMappingConfig. Adding finalizer.", "ClientMappingConfig", r.clientMappingConfig.Name) + if err := r.update(r.clientMappingConfig); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update ClientMappingConfig: %v", err) + } + } + + // Find the StorageClusterPeer from OwnerRef + owner := util.FindOwnerRefByKind(r.clientMappingConfig, "StorageClusterPeer") + + if owner == nil { + return ctrl.Result{}, fmt.Errorf("failed to find StorgeClusterPeer owning the ClientMappingConfig") + } + + // Fetch the StorageClusterPeer instance + r.storageClusterPeer = &ocsv1.StorageClusterPeer{} + r.storageClusterPeer.Name = owner.Name + r.storageClusterPeer.Namespace = request.Namespace + + if err = r.get(r.storageClusterPeer); err != nil { + if k8serrors.IsNotFound(err) { + r.log.Info("StorageClusterPeer resource not found. Ignoring since object must be deleted.") + return ctrl.Result{}, nil + } + r.log.Error(err, "Failed to get StorageClusterPeer.") + return ctrl.Result{}, err + } + + if r.storageClusterPeer.Status.State != ocsv1.StorageClusterPeerStatePeered { + return ctrl.Result{}, fmt.Errorf("waiting for StorageClusterPeer %s to be in Peered state", r.storageClusterPeer.Name) + } + + r.mapClientIDToConsumerObj = map[string]*ocsv1alpha1.StorageConsumer{} + + storageConsumerList := &ocsv1alpha1.StorageConsumerList{} + err = r.list(storageConsumerList, client.InNamespace(r.storageClusterPeer.Namespace)) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list StorageConsumer objects: %w", err) + } + for i := range storageConsumerList.Items { + r.mapClientIDToConsumerObj[storageConsumerList.Items[i].Status.Client.ID] = &storageConsumerList.Items[i] + } + + return r.reconcilePhases() +} + +func (r *MirroringReconciler) reconcilePhases() (ctrl.Result, error) { + ocsClient, err := providerClient.NewProviderClient(r.ctx, r.storageClusterPeer.Spec.ApiEndpoint, time.Second*10) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create a new provider client: %v", err) + } + defer ocsClient.Close() + + err = r.reconcileRBDMirrorDaemon() + if err != nil { + return ctrl.Result{}, err + } + + if res, err := r.reconcileBlockPoolMirroring(ocsClient); err != nil || !res.IsZero() { + return res, err + } + + if res, err := r.reconcileRadosNamespaceMirroring(ocsClient); err != nil || !res.IsZero() { + return res, err + } + + return ctrl.Result{}, nil +} + +func (r *MirroringReconciler) disableBlockPoolMirroring() (ctrl.Result, error) { + + cephBlockPoolsList, err := r.listCephBlockPools() + if err != nil { + return ctrl.Result{}, err + } + + for i := range cephBlockPoolsList.Items { + cephBlockPool := &cephBlockPoolsList.Items[i] + + cephBlockPool.Spec.Mirroring = rookCephv1.MirroringSpec{} + err := r.update(cephBlockPool) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to disable mirroring for CephBlockPool %q: %v", cephBlockPool.Name, err) + } + } + return ctrl.Result{}, nil +} + +func (r *MirroringReconciler) reconcileRBDMirrorDaemon() error { + rbdMirror := &rookCephv1.CephRBDMirror{} + rbdMirror.Name = rBDMirrorDaemonName + rbdMirror.Namespace = r.clientMappingConfig.Namespace + + enableMaintenanceMode := false + for _, consumer := range r.mapClientIDToConsumerObj { + if _, ok := consumer.GetAnnotations()[util.RequestMaintenanceModeAnnotation]; ok { + enableMaintenanceMode = true + } + } + if enableMaintenanceMode { + if err := r.delete(rbdMirror); err != nil { + return fmt.Errorf("failed to delete CephRBDMirror: %v", err) + } + } + _, err := ctrl.CreateOrUpdate(r.ctx, r.Client, rbdMirror, func() error { + if err := r.own(rbdMirror); err != nil { + return err + } + rbdMirror.Spec.Count = 1 + return nil + }) + if err != nil { + r.log.Error(err, "Failed to create/update the CephRBDMirror", "CephRBDMirror", rbdMirror) + return err + } + + return nil +} + +func (r *MirroringReconciler) reconcileBlockPoolMirroring(ocsClient *providerClient.OCSProviderClient) (ctrl.Result, error) { + + cephBlockPoolsList, err := r.listCephBlockPools() + if err != nil { + return ctrl.Result{}, err + } + + var blockPoolsList []string + mapBlockPoolNameToObj := map[string]*rookCephv1.CephBlockPool{} + + //enable mirroring for blockpools + for i := range cephBlockPoolsList.Items { + blockPoolsList = append(blockPoolsList, cephBlockPoolsList.Items[i].Name) + mapBlockPoolNameToObj[cephBlockPoolsList.Items[i].Name] = &cephBlockPoolsList.Items[i] + + cephBlockPool := cephBlockPoolsList.Items[i] + cephBlockPool.Spec.Mirroring.Enabled = true + cephBlockPool.Spec.Mirroring.Mode = "image" + err = r.update(&cephBlockPool) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to enable mirroring for CephBlockPool %v: %w", cephBlockPool.Name, err) + } + } + + // fetch BlockPoolsInfo + response, err := ocsClient.GetBlockPoolsInfo(r.ctx, r.storageClusterPeer.Status.PeerInfo.StorageClusterUid, blockPoolsList) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get CephBlockPool(s) info from Peer: %w", err) + } + + for i := range response.BlockPoolsInfo { + blockPoolName := response.BlockPoolsInfo[i].BlockPoolName + + if len(response.BlockPoolsInfo[i].MirroringToken) == 0 { + return ctrl.Result{}, fmt.Errorf("failed to fetch mirroring token for the blockPool") + } + + secretName := fmt.Sprintf("%s-%s", "peer", blockPoolName) + err = r.updateMirroringSecretForBlockPool(blockPoolName, secretName, response.BlockPoolsInfo[i].MirroringToken) + if err != nil { + return ctrl.Result{}, err + } + + cephBlockPool := mapBlockPoolNameToObj[blockPoolName] + + util.AddAnnotation(cephBlockPool, util.BlockPoolMirroringTargetIDAnnotation, response.BlockPoolsInfo[i].BlockPoolID) + + if cephBlockPool.Spec.Mirroring.Peers == nil { + cephBlockPool.Spec.Mirroring.Peers = &rookCephv1.MirroringPeerSpec{SecretNames: []string{}} + } + if !slices.Contains(cephBlockPool.Spec.Mirroring.Peers.SecretNames, secretName) { + cephBlockPool.Spec.Mirroring.Peers.SecretNames = append(cephBlockPool.Spec.Mirroring.Peers.SecretNames, secretName) + } + err = r.update(cephBlockPool) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update bootstrap secret ref on CephBlockPool %q: %v", cephBlockPool.Name, err) + } + } + + return ctrl.Result{}, nil +} + +func (r *MirroringReconciler) reconcileRadosNamespaceMirroring(ocsClient *providerClient.OCSProviderClient) (ctrl.Result, error) { + peerClientIDs := []string{} + + for localClientID, peerClientID := range r.clientMappingConfig.Data { + // for internal mode, we need only blockPool mirroring, hence skipping this for the special key "internal" + if localClientID == internalKey { + continue + } + // Check if the storageConsumer with the ClientID exists + if r.mapClientIDToConsumerObj[localClientID] == nil { + return ctrl.Result{}, fmt.Errorf("failed to find StorageConsumer %s", localClientID) + } + peerClientIDs = append(peerClientIDs, peerClientID) + } + + response, err := ocsClient.GetStorageClientsInfo(r.ctx, r.storageClusterPeer.Status.PeerInfo.StorageClusterUid, peerClientIDs) + if err != nil { + return ctrl.Result{}, err + } + + type ClientInfo struct { + radosNamespace string + } + + mapPeerClientIDToRadosNamespace := map[string]ClientInfo{} + for i := range response.ClientsInfo { + client := response.ClientsInfo[i] + mapPeerClientIDToRadosNamespace[client.ClientID] = ClientInfo{client.RadosNamespace} + } + + for localClientID, peerClientID := range r.clientMappingConfig.Data { + if localClientID == internalKey { + continue + } + radosNamespaceList := &rookCephv1.CephBlockPoolRadosNamespaceList{} + err = r.list( + radosNamespaceList, + client.InNamespace(r.storageClusterPeer.Namespace), + client.MatchingLabels{controllers.StorageConsumerNameLabel: r.mapClientIDToConsumerObj[localClientID].Name}, + ) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list radosnamespace(s) for StorageConsumer %s", localClientID) + } + for i := range radosNamespaceList.Items { + rns := &radosNamespaceList.Items[i] + rns.Spec.Mirroring = &rookCephv1.RadosNamespaceMirroring{ + RemoteNamespace: ptr.To(mapPeerClientIDToRadosNamespace[peerClientID].radosNamespace), + Mode: "image", + } + err := r.update(rns) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update radosnamespace %s", rns.Name) + } + } + } + return ctrl.Result{}, nil +} + +func (r *MirroringReconciler) get(obj client.Object) error { + return r.Client.Get(r.ctx, client.ObjectKeyFromObject(obj), obj) +} + +func (r *MirroringReconciler) list(obj client.ObjectList, listOptions ...client.ListOption) error { + return r.Client.List(r.ctx, obj, listOptions...) +} + +func (r *MirroringReconciler) update(obj client.Object, opts ...client.UpdateOption) error { + return r.Client.Update(r.ctx, obj, opts...) +} + +func (r *MirroringReconciler) delete(obj client.Object, opts ...client.DeleteOption) error { + return r.Client.Delete(r.ctx, obj, opts...) +} + +func (r *MirroringReconciler) own(obj client.Object) error { + return controllerutil.SetControllerReference(r.storageClusterPeer, obj, r.Scheme) +} + +func (r *MirroringReconciler) listCephBlockPools() (*rookCephv1.CephBlockPoolList, error) { + selector := labels.NewSelector() + blockPoolLabelSelectorRequirement, err := labels.NewRequirement(util.ForbidMirroringLabel, selection.NotEquals, []string{"true"}) + if err != nil { + return nil, err + } + selector = selector.Add(*blockPoolLabelSelectorRequirement) + + cephBlockPoolsList := &rookCephv1.CephBlockPoolList{} + err = r.list(cephBlockPoolsList, client.InNamespace(r.storageClusterPeer.Namespace), client.MatchingLabelsSelector{Selector: selector}) + if err != nil { + return nil, fmt.Errorf("failed to list CephBlockPools: %w", err) + } + return cephBlockPoolsList, nil +} + +func (r *MirroringReconciler) updateMirroringSecretForBlockPool(blockPoolName, secretName, mirroringToken string) error { + mirroringSecret := &corev1.Secret{} + mirroringSecret.Name = secretName + mirroringSecret.Namespace = r.clientMappingConfig.Namespace + var err error + + _, err = ctrl.CreateOrUpdate(r.ctx, r.Client, mirroringSecret, func() error { + if err = r.own(mirroringSecret); err != nil { + return err + } + mirroringSecret.Data = map[string][]byte{ + "pool": []byte(blockPoolName), + "token": []byte(mirroringToken), + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to create/update bootstrap secret: %w", err) + } + return nil +} diff --git a/controllers/storagecluster/cephblockpools.go b/controllers/storagecluster/cephblockpools.go index 361ea33238..6cd0ea92ef 100644 --- a/controllers/storagecluster/cephblockpools.go +++ b/controllers/storagecluster/cephblockpools.go @@ -4,6 +4,7 @@ import ( "fmt" ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" + "github.com/red-hat-storage/ocs-operator/v4/controllers/util" cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -149,6 +150,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata") + util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme) }) @@ -197,6 +199,8 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") cephBlockPool.Spec.PoolSpec.EnableRBDStats = true + util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") + return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme) }) if err != nil { diff --git a/controllers/storageclusterpeer/storageclusterpeer_controller.go b/controllers/storageclusterpeer/storageclusterpeer_controller.go index 2da99f7d31..d85efba286 100644 --- a/controllers/storageclusterpeer/storageclusterpeer_controller.go +++ b/controllers/storageclusterpeer/storageclusterpeer_controller.go @@ -21,7 +21,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "google.golang.org/grpc/codes" "strings" "time" @@ -31,17 +30,24 @@ import ( "github.com/red-hat-storage/ocs-operator/v4/services" "github.com/go-logr/logr" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" ) +const ( + StorageClientMappingConfigName = "storage-client-mapping" +) + // StorageClusterPeerReconciler reconciles a StorageClusterPeer object // nolint:revive type StorageClusterPeerReconciler struct { @@ -57,6 +63,7 @@ func (r *StorageClusterPeerReconciler) SetupWithManager(mgr ctrl.Manager) error return ctrl.NewControllerManagedBy(mgr). For(&ocsv1.StorageClusterPeer{}). Watches(&ocsv1.StorageCluster{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&corev1.ConfigMap{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(util.NamePredicate(StorageClientMappingConfigName))). Complete(r) } @@ -64,6 +71,7 @@ func (r *StorageClusterPeerReconciler) SetupWithManager(mgr ctrl.Manager) error //+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclusterpeers/status,verbs=get;update;patch //+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclusterpeers/finalizers,verbs=update //+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;delete;patch // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -88,6 +96,15 @@ func (r *StorageClusterPeerReconciler) Reconcile(ctx context.Context, request ct } if storageClusterPeer.Status.State == ocsv1.StorageClusterPeerStatePeered { + clientConfigMap := &corev1.ConfigMap{} + clientConfigMap.Name = StorageClientMappingConfigName + clientConfigMap.Namespace = storageClusterPeer.Namespace + _, err = controllerutil.CreateOrUpdate(r.ctx, r.Client, clientConfigMap, func() error { + return r.own(storageClusterPeer, clientConfigMap) + }) + if err != nil { + return ctrl.Result{}, err + } return ctrl.Result{}, nil } @@ -192,3 +209,7 @@ func (r *StorageClusterPeerReconciler) get(obj client.Object) error { key := client.ObjectKeyFromObject(obj) return r.Client.Get(r.ctx, key, obj) } + +func (r *StorageClusterPeerReconciler) own(storageClusterPeer *ocsv1.StorageClusterPeer, obj client.Object) error { + return controllerutil.SetControllerReference(storageClusterPeer, obj, r.Scheme) +} diff --git a/controllers/storagerequest/storagerequest_controller.go b/controllers/storagerequest/storagerequest_controller.go index c2c057ff3b..964107f0d6 100644 --- a/controllers/storagerequest/storagerequest_controller.go +++ b/controllers/storagerequest/storagerequest_controller.go @@ -370,10 +370,7 @@ func (r *StorageRequestReconciler) reconcileRadosNamespace() error { } // add a blockpool name in the label so UI can watch for the rados namespace // that belongs to the particular blockpool - addLabel(r.cephRadosNamespace, blockPoolNameLabel, blockPoolName) - r.cephRadosNamespace.Spec = rookCephv1.CephBlockPoolRadosNamespaceSpec{ - BlockPoolName: blockPoolName, - } + r.cephRadosNamespace.Spec.BlockPoolName = blockPoolName return nil }) diff --git a/controllers/util/k8sutil.go b/controllers/util/k8sutil.go index 1d93c28510..6746fd9a9a 100644 --- a/controllers/util/k8sutil.go +++ b/controllers/util/k8sutil.go @@ -53,7 +53,10 @@ const ( OdfInfoNamespacedNameClaimName = "odfinfo.odf.openshift.io" ExitCodeThatShouldRestartTheProcess = 42 + //ForbidMirroringLabel is used to forbid mirroring for ceph resources such as CephBlockPool + ForbidMirroringLabel = "ocs.openshift.io/forbid-mirroring" BlockPoolMirroringTargetIDAnnotation = "ocs.openshift.io/mirroring-target-id" + RequestMaintenanceModeAnnotation = "ocs.openshift.io/request-maintenance-mode" ) var podNamespace = os.Getenv(PodNamespaceEnvVar) diff --git a/deploy/csv-templates/ocs-operator.csv.yaml.in b/deploy/csv-templates/ocs-operator.csv.yaml.in index ffa68ff072..71dde54fe0 100644 --- a/deploy/csv-templates/ocs-operator.csv.yaml.in +++ b/deploy/csv-templates/ocs-operator.csv.yaml.in @@ -293,6 +293,12 @@ spec: - patch - update - watch + - apiGroups: + - "" + resources: + - configmaps/finalizers + verbs: + - update - apiGroups: - "" resources: diff --git a/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml b/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml index 30a1d7cab1..438e2abda6 100644 --- a/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml +++ b/deploy/ocs-operator/manifests/ocs-operator.clusterserviceversion.yaml @@ -302,6 +302,12 @@ spec: - patch - update - watch + - apiGroups: + - "" + resources: + - configmaps/finalizers + verbs: + - update - apiGroups: - "" resources: diff --git a/main.go b/main.go index 37ea4d8d9e..19ff7324e6 100644 --- a/main.go +++ b/main.go @@ -58,6 +58,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" metrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "github.com/red-hat-storage/ocs-operator/v4/controllers/mirroring" "github.com/red-hat-storage/ocs-operator/v4/controllers/ocsinitialization" "github.com/red-hat-storage/ocs-operator/v4/controllers/platform" "github.com/red-hat-storage/ocs-operator/v4/controllers/storagecluster" @@ -234,6 +235,13 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "StorageClusterPeer") os.Exit(1) } + if err = (&mirroring.MirroringReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Mirroring") + os.Exit(1) + } // +kubebuilder:scaffold:builder // Create OCSInitialization CR if it's not present diff --git a/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go b/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go index 1d93c28510..6746fd9a9a 100644 --- a/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go +++ b/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go @@ -53,7 +53,10 @@ const ( OdfInfoNamespacedNameClaimName = "odfinfo.odf.openshift.io" ExitCodeThatShouldRestartTheProcess = 42 + //ForbidMirroringLabel is used to forbid mirroring for ceph resources such as CephBlockPool + ForbidMirroringLabel = "ocs.openshift.io/forbid-mirroring" BlockPoolMirroringTargetIDAnnotation = "ocs.openshift.io/mirroring-target-id" + RequestMaintenanceModeAnnotation = "ocs.openshift.io/request-maintenance-mode" ) var podNamespace = os.Getenv(PodNamespaceEnvVar)