diff --git a/api/v1alpha1/storageclient_types.go b/api/v1alpha1/storageclient_types.go index 6ca4d7c8..b89d71ac 100644 --- a/api/v1alpha1/storageclient_types.go +++ b/api/v1alpha1/storageclient_types.go @@ -50,6 +50,8 @@ type StorageClientSpec struct { type StorageClientStatus struct { Phase storageClientPhase `json:"phase,omitempty"` + InMaintenanceMode bool `json:"inMaintenanceMode"` + // ConsumerID will hold the identity of this cluster inside the attached provider cluster ConsumerID string `json:"id,omitempty"` } diff --git a/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml b/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml index ad1dcf4f..cb09d8e5 100644 --- a/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml +++ b/bundle/manifests/ocs-client-operator.clusterserviceversion.yaml @@ -7,7 +7,7 @@ metadata: categories: Storage console.openshift.io/plugins: '["odf-client-console"]' containerImage: quay.io/ocs-dev/ocs-client-operator:latest - createdAt: "2024-11-22T04:24:54Z" + createdAt: "2024-11-22T06:22:20Z" description: OpenShift Data Foundation client operator enables consumption of storage services from a remote centralized OpenShift Data Foundation provider cluster. @@ -241,6 +241,15 @@ spec: - patch - update - watch + - apiGroups: + - ocs.openshift.io + resources: + - storageclaims + - storageclients + verbs: + - get + - list + - watch - apiGroups: - ocs.openshift.io resources: @@ -315,6 +324,25 @@ spec: - list - update - watch + - apiGroups: + - ramendr.openshift.io + resources: + - maintenancemodes + verbs: + - create + - delete + - get + - list + - update + - watch + - apiGroups: + - ramendr.openshift.io + resources: + - maintenancemodes/status + verbs: + - get + - patch + - update - apiGroups: - replication.storage.openshift.io resources: diff --git a/bundle/manifests/ocs.openshift.io_storageclients.yaml b/bundle/manifests/ocs.openshift.io_storageclients.yaml index ef513ed9..fbb3fece 100644 --- a/bundle/manifests/ocs.openshift.io_storageclients.yaml +++ b/bundle/manifests/ocs.openshift.io_storageclients.yaml @@ -65,8 +65,12 @@ spec: description: ConsumerID will hold the identity of this cluster inside the attached provider cluster type: string + inMaintenanceMode: + type: boolean phase: type: string + required: + - inMaintenanceMode type: object type: object served: true diff --git a/cmd/main.go b/cmd/main.go index 9c2432fc..9dcb5e20 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -154,7 +154,7 @@ func main() { os.Exit(1) } - _, err = getAvailableCRDNames(context.Background(), apiClient) + availCrds, err := getAvailableCRDNames(context.Background(), apiClient) if err != nil { setupLog.Error(err, "Unable get a list of available CRD names") os.Exit(1) @@ -204,11 +204,22 @@ func main() { Scheme: mgr.GetScheme(), OperatorNamespace: utils.GetOperatorNamespace(), ConsolePort: int32(consolePort), + AvailableCrds: availCrds, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "OperatorConfigMapReconciler") os.Exit(1) } + if availCrds[controller.MaintenanceModeCRDName] { + if err = (&controller.MaintenanceModeReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MaintenanceMode") + os.Exit(1) + } + } + setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") diff --git a/config/crd/bases/ocs.openshift.io_storageclients.yaml b/config/crd/bases/ocs.openshift.io_storageclients.yaml index 90b12081..7802959b 100644 --- a/config/crd/bases/ocs.openshift.io_storageclients.yaml +++ b/config/crd/bases/ocs.openshift.io_storageclients.yaml @@ -65,8 +65,12 @@ spec: description: ConsumerID will hold the identity of this cluster inside the attached provider cluster type: string + inMaintenanceMode: + type: boolean phase: type: string + required: + - inMaintenanceMode type: object type: object served: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 02c3c174..042fa8a8 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -198,6 +198,15 @@ rules: - patch - update - watch +- apiGroups: + - ocs.openshift.io + resources: + - storageclaims + - storageclients + verbs: + - get + - list + - watch - apiGroups: - ocs.openshift.io resources: @@ -272,6 +281,25 @@ rules: - list - update - watch +- apiGroups: + - ramendr.openshift.io + resources: + - maintenancemodes + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - ramendr.openshift.io + resources: + - maintenancemodes/status + verbs: + - get + - patch + - update - apiGroups: - replication.storage.openshift.io resources: diff --git a/internal/controller/maintenancemode_controller.go b/internal/controller/maintenancemode_controller.go new file mode 100644 index 00000000..d444920f --- /dev/null +++ b/internal/controller/maintenancemode_controller.go @@ -0,0 +1,188 @@ +package controller + +import ( + "context" + "fmt" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/event" + "time" + + "github.com/red-hat-storage/ocs-client-operator/api/v1alpha1" + + "github.com/go-logr/logr" + ramenv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + providerclient "github.com/red-hat-storage/ocs-operator/services/provider/api/v4/client" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + MaintenanceModeCRDName = "maintenancemodes.ramendr.openshift.io" +) + +// MaintenanceModeReconciler reconciles a ClusterVersion object +type MaintenanceModeReconciler struct { + client.Client + Scheme *runtime.Scheme + + log logr.Logger + ctx context.Context +} + +// SetupWithManager sets up the controller with the Manager. +func (r *MaintenanceModeReconciler) SetupWithManager(mgr ctrl.Manager) error { + generationChangePredicate := predicate.GenerationChangedPredicate{} + storageClientStatusPredicate := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + oldObj := e.ObjectOld.(*v1alpha1.StorageClient) + newObj := e.ObjectNew.(*v1alpha1.StorageClient) + return !reflect.DeepEqual(oldObj.Status.InMaintenanceMode, newObj.Status.InMaintenanceMode) + }, + } + return ctrl.NewControllerManagedBy(mgr).Named("Maintenance Mode"). + Watches( + &ramenv1alpha1.MaintenanceMode{}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(generationChangePredicate), + ). + Watches( + &v1alpha1.StorageClaim{}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(generationChangePredicate), + ). + Watches( + &v1alpha1.StorageClient{}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates(generationChangePredicate, storageClientStatusPredicate), + ). + Complete(r) +} + +//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=maintenancemodes,verbs=get;list;update;create;watch;delete +//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=maintenancemodes/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=ocs.openshift.io,resources=storageclients;storageclaims,verbs=get;list;watch + +func (r *MaintenanceModeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.ctx = ctx + r.log = log.FromContext(ctx, "MaintenanceMode", req) + r.log.Info("Reconciling MaintenanceMode") + + mapClientNameToObj := map[string]*v1alpha1.StorageClient{} + + maintenanceModes := &ramenv1alpha1.MaintenanceModeList{} + if err := r.list(maintenanceModes); err != nil { + r.log.Error(err, "failed to list the Maintenance Modes") + return reconcile.Result{}, err + } + + for i := range maintenanceModes.Items { + mm := &maintenanceModes.Items[i] + sc := &v1alpha1.StorageClaim{} + // MMode's TargetID is replicationID, which in our case is storageClaim name + sc.Name = mm.Spec.TargetID + err := r.get(sc) + if err != nil { + return ctrl.Result{}, err + } + clientName := sc.Spec.StorageClient + if mapClientNameToObj[clientName] == nil { + storageClient := &v1alpha1.StorageClient{} + storageClient.Name = clientName + err := r.get(storageClient) + if err != nil { + return ctrl.Result{}, err + } + mapClientNameToObj[clientName] = storageClient + } + if mapClientNameToObj[clientName].Status.InMaintenanceMode { + err := r.updateStatusCompletedForMM(mm) + if err != nil { + return ctrl.Result{}, err + } + } + } + + storageClients := &v1alpha1.StorageClientList{} + if err := r.list(storageClients); err != nil { + r.log.Error(err, "failed to list the Storage Clients") + return reconcile.Result{}, err + } + + for i := range storageClients.Items { + storageClient := &storageClients.Items[i] + if mapClientNameToObj[storageClient.Name] != nil && !storageClient.Status.InMaintenanceMode { + if err := r.sendRequest(storageClient, true); err != nil { + return ctrl.Result{}, err + } + } else if mapClientNameToObj[storageClient.Name] == nil && storageClient.Status.InMaintenanceMode { + if err := r.sendRequest(storageClient, false); err != nil { + return ctrl.Result{}, err + } + } + } + + return ctrl.Result{}, nil +} + +func (r *MaintenanceModeReconciler) sendRequest(storageClient *v1alpha1.StorageClient, enable bool) error { + providerClient, err := providerclient.NewProviderClient( + r.ctx, + storageClient.Spec.StorageProviderEndpoint, + 10*time.Second, + ) + if err != nil { + return fmt.Errorf( + "failed to create provider client with endpoint %v: %v", + storageClient.Spec.StorageProviderEndpoint, + err, + ) + } + // Close client-side connections. + defer providerClient.Close() + + _, err = providerClient.RequestMaintenanceMode(r.ctx, storageClient.Status.ConsumerID, enable) + if err != nil { + return fmt.Errorf("failed to Request maintenance mode: %v", err) + } + return nil +} + +func (r *MaintenanceModeReconciler) updateStatusCompletedForMM(maintenanceMode *ramenv1alpha1.MaintenanceMode) error { + // Ramen reads the State and Conditions in order to determine that the MaintenanceMode is Completed + maintenanceMode.Status.State = ramenv1alpha1.MModeStateCompleted + maintenanceMode.Status.ObservedGeneration = maintenanceMode.Generation + meta.SetStatusCondition(&maintenanceMode.Status.Conditions, + metav1.Condition{ + Type: string(ramenv1alpha1.MModeConditionFailoverActivated), + ObservedGeneration: maintenanceMode.Generation, + Reason: string(ramenv1alpha1.MModeStateCompleted), + Status: metav1.ConditionTrue, + }, + ) + + statusErr := r.Client.Status().Update(r.ctx, maintenanceMode) + if statusErr != nil { + r.log.Error(statusErr, "Failed to update MaintenanceMode status", "MaintenanceMode", maintenanceMode.Name) + return statusErr + } + return nil +} + +func (r *MaintenanceModeReconciler) list(obj client.ObjectList, opts ...client.ListOption) error { + return r.List(r.ctx, obj, opts...) +} + +func (r *MaintenanceModeReconciler) get(obj client.Object, opts ...client.GetOption) error { + return r.Get(r.ctx, client.ObjectKeyFromObject(obj), obj, opts...) +} diff --git a/internal/controller/operatorconfigmap_controller.go b/internal/controller/operatorconfigmap_controller.go index 1a57278d..854cf9f4 100644 --- a/internal/controller/operatorconfigmap_controller.go +++ b/internal/controller/operatorconfigmap_controller.go @@ -80,6 +80,7 @@ type OperatorConfigMapReconciler struct { OperatorNamespace string ConsolePort int32 Scheme *runtime.Scheme + AvailableCrds map[string]bool log logr.Logger ctx context.Context @@ -157,7 +158,15 @@ func (c *OperatorConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&csiopv1a1.OperatorConfig{}, builder.WithPredicates(generationChangePredicate)). Owns(&csiopv1a1.Driver{}, builder.WithPredicates(generationChangePredicate)). Watches(&configv1.ClusterVersion{}, enqueueConfigMapRequest, clusterVersionPredicates). - Watches(&extv1.CustomResourceDefinition{}, enqueueConfigMapRequest, builder.OnlyMetadata). + Watches( + &extv1.CustomResourceDefinition{}, + &handler.EnqueueRequestForObject{}, + builder.WithPredicates( + utils.NamePredicate(MaintenanceModeCRDName), + utils.CrdCreateAndDeletePredicate(&c.log, MaintenanceModeCRDName, c.AvailableCrds[MaintenanceModeCRDName]), + ), + builder.OnlyMetadata, + ). Watches(&opv1a1.Subscription{}, enqueueConfigMapRequest, subscriptionPredicates). Watches(&admrv1.ValidatingWebhookConfiguration{}, enqueueConfigMapRequest, webhookPredicates). Watches(&v1alpha1.StorageClient{}, enqueueConfigMapRequest, builder.WithPredicates(predicate.AnnotationChangedPredicate{})) @@ -188,6 +197,15 @@ func (c *OperatorConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Re c.log = log.FromContext(ctx, "OperatorConfigMap", req) c.log.Info("Reconciling OperatorConfigMap") + crd := &metav1.PartialObjectMetadata{} + crd.SetGroupVersionKind(extv1.SchemeGroupVersion.WithKind("CustomResourceDefinition")) + crd.Name = MaintenanceModeCRDName + if err := c.Client.Get(ctx, client.ObjectKeyFromObject(crd), crd); client.IgnoreNotFound(err) != nil { + c.log.Error(err, "Failed to get CRD", "CRD", crd.Name) + return reconcile.Result{}, err + } + utils.AssertEqual(c.AvailableCrds[crd.Name], crd.UID != "", utils.ExitCodeThatShouldRestartTheProcess) + c.operatorConfigMap = &corev1.ConfigMap{} c.operatorConfigMap.Name = req.Name c.operatorConfigMap.Namespace = req.Namespace diff --git a/internal/controller/storageclient_controller.go b/internal/controller/storageclient_controller.go index 35754e99..25ca7e98 100644 --- a/internal/controller/storageclient_controller.go +++ b/internal/controller/storageclient_controller.go @@ -207,6 +207,8 @@ func (r *StorageClientReconciler) reconcilePhases() (ctrl.Result, error) { return reconcile.Result{}, fmt.Errorf("failed to get StorageConfig: %v", err) } + r.storageClient.Status.InMaintenanceMode = storageClientResponse.SystemAttributes.SystemInMaintenanceMode + if res, err := r.reconcileClientStatusReporterJob(); err != nil { return res, err } diff --git a/vendor/github.com/red-hat-storage/ocs-client-operator/api/v1alpha1/storageclient_types.go b/vendor/github.com/red-hat-storage/ocs-client-operator/api/v1alpha1/storageclient_types.go index 6ca4d7c8..b89d71ac 100644 --- a/vendor/github.com/red-hat-storage/ocs-client-operator/api/v1alpha1/storageclient_types.go +++ b/vendor/github.com/red-hat-storage/ocs-client-operator/api/v1alpha1/storageclient_types.go @@ -50,6 +50,8 @@ type StorageClientSpec struct { type StorageClientStatus struct { Phase storageClientPhase `json:"phase,omitempty"` + InMaintenanceMode bool `json:"inMaintenanceMode"` + // ConsumerID will hold the identity of this cluster inside the attached provider cluster ConsumerID string `json:"id,omitempty"` }