diff --git a/controllers/clusterversion_controller.go b/controllers/operatorconfigmap_controller.go similarity index 63% rename from controllers/clusterversion_controller.go rename to controllers/operatorconfigmap_controller.go index f873e039e..b27dbf43c 100644 --- a/controllers/clusterversion_controller.go +++ b/controllers/operatorconfigmap_controller.go @@ -16,7 +16,9 @@ package controllers import ( "bytes" "context" + "fmt" "strings" + "time" // The embed package is required for the prometheus rule files _ "embed" @@ -31,6 +33,7 @@ import ( monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -51,19 +54,20 @@ import ( var pvcPrometheusRules string const ( - operatorConfigMapName = "ocs-client-operator-config" + operatorConfigMapName = "ocs-client-operator-manager-config" // ClusterVersionName is the name of the ClusterVersion object in the // openshift cluster. clusterVersionName = "version" + + operatorConfigMapFinalizer = "ocs-client-operator.ocs.openshift.io/finalizer" ) -// ClusterVersionReconciler reconciles a ClusterVersion object -type ClusterVersionReconciler struct { +// OperatorConfigMapReconciler reconciles a ClusterVersion object +type OperatorConfigMapReconciler struct { client.Client - OperatorDeployment *appsv1.Deployment - OperatorNamespace string - ConsolePort int32 - Scheme *runtime.Scheme + OperatorNamespace string + ConsolePort int32 + Scheme *runtime.Scheme log logr.Logger ctx context.Context @@ -76,7 +80,7 @@ type ClusterVersionReconciler struct { } // SetupWithManager sets up the controller with the Manager. -func (c *ClusterVersionReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (c *OperatorConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { clusterVersionPredicates := builder.WithPredicates( predicate.GenerationChangedPredicate{}, ) @@ -91,19 +95,20 @@ func (c *ClusterVersionReconciler) SetupWithManager(mgr ctrl.Manager) error { ), ) // Reconcile the ClusterVersion object when the operator config map is updated - enqueueClusterVersionRequest := handler.EnqueueRequestsFromMapFunc( + enqueueConfigMapRequest := handler.EnqueueRequestsFromMapFunc( func(_ context.Context, client client.Object) []reconcile.Request { return []reconcile.Request{{ NamespacedName: types.NamespacedName{ - Name: clusterVersionName, + Name: operatorConfigMapName, + Namespace: c.OperatorNamespace, }, }} }, ) return ctrl.NewControllerManagedBy(mgr). - For(&configv1.ClusterVersion{}, clusterVersionPredicates). - Watches(&corev1.ConfigMap{}, enqueueClusterVersionRequest, configMapPredicates). + For(&corev1.ConfigMap{}, configMapPredicates). + Watches(&configv1.ClusterVersion{}, enqueueConfigMapRequest, clusterVersionPredicates). Complete(c) } @@ -123,23 +128,56 @@ func (c *ClusterVersionReconciler) SetupWithManager(mgr ctrl.Manager) error { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile -func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (c *OperatorConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { var err error c.ctx = ctx - c.log = log.FromContext(ctx, "ClusterVersion", req) - c.log.Info("Reconciling ClusterVersion") - + c.log = log.FromContext(ctx, "OperatorConfigMap", req) + c.log.Info("Reconciling OperatorConfigMap") + + operatorConfigMap := &corev1.ConfigMap{} + if err := c.Client.Get(c.ctx, types.NamespacedName{Name: operatorConfigMapName, Namespace: c.OperatorNamespace}, operatorConfigMap); err != nil { + c.log.Error(err, "failed to get the operator's configMap") + return reconcile.Result{}, err + } + //debugging + c.log.Info("printing operator configmap:", "configmap", operatorConfigMap) + + // deletion phase + if !operatorConfigMap.GetDeletionTimestamp().IsZero() { + result, err := c.deletionPhase() + if err != nil { + return ctrl.Result{}, err + } + if !result.Requeue { + //remove finalizer + controllerutil.RemoveFinalizer(operatorConfigMap, operatorConfigMapFinalizer) + if err := c.Client.Update(c.ctx, operatorConfigMap); err != nil { + return ctrl.Result{}, err + } + c.log.Info("finallizer removed successfully") + } + return result, nil + } + //ensure finalizer + if !controllerutil.ContainsFinalizer(operatorConfigMap, operatorConfigMapFinalizer) { + c.log.Info("finalizer missing on the OperatorConfigMap resource, adding...") + controllerutil.AddFinalizer(operatorConfigMap, operatorConfigMapFinalizer) + if err := c.Client.Update(c.ctx, operatorConfigMap); err != nil { + return ctrl.Result{}, err + } + } if err := c.ensureConsolePlugin(); err != nil { c.log.Error(err, "unable to deploy client console") return ctrl.Result{}, err } - instance := configv1.ClusterVersion{} - if err = c.Client.Get(context.TODO(), req.NamespacedName, &instance); err != nil { - return ctrl.Result{}, err + clusterVersion := &configv1.ClusterVersion{} + if err := c.Client.Get(c.ctx, types.NamespacedName{Name: clusterVersionName}, clusterVersion); err != nil { + c.log.Error(err, "failed to get the clusterVersion version of the OCP cluster") + return reconcile.Result{}, err } - if err := csi.InitializeSidecars(c.log, instance.Status.Desired.Version); err != nil { + if err := csi.InitializeSidecars(c.log, clusterVersion.Status.Desired.Version); err != nil { c.log.Error(err, "unable to initialize sidecars") return ctrl.Result{}, err } @@ -173,11 +211,10 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque "config.json": "[]", }, } - if err := c.own(monConfigMap); err != nil { + if err := c.own(monConfigMap, operatorConfigMap); err != nil { return ctrl.Result{}, err } - err = c.create(monConfigMap) - if err != nil && !k8serrors.IsAlreadyExists(err) { + if err := c.create(monConfigMap); err != nil && !k8serrors.IsAlreadyExists(err) { c.log.Error(err, "failed to create monitor configmap", "name", monConfigMap.Name) return ctrl.Result{}, err } @@ -194,26 +231,19 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque "config.json": "[]", }, } - if err := c.own(monConfigMap); err != nil { + if err := c.own(encConfigMap, operatorConfigMap); err != nil { return ctrl.Result{}, err } - err = c.create(encConfigMap) - if err != nil && !k8serrors.IsAlreadyExists(err) { + if err := c.create(encConfigMap); err != nil && !k8serrors.IsAlreadyExists(err) { c.log.Error(err, "failed to create monitor configmap", "name", encConfigMap.Name) return ctrl.Result{}, err } - c.cephFSDeployment = &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: csi.CephFSDeploymentName, - Namespace: c.OperatorNamespace, - }, - } err = c.createOrUpdate(c.cephFSDeployment, func() error { - if err := c.own(c.cephFSDeployment); err != nil { + c.cephFSDeployment = csi.GetCephFSDeployment(c.OperatorNamespace) + if err := c.own(c.cephFSDeployment, operatorConfigMap); err != nil { return err } - csi.GetCephFSDeployment(c.OperatorNamespace).DeepCopyInto(c.cephFSDeployment) return nil }) if err != nil { @@ -221,17 +251,11 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } - c.cephFSDaemonSet = &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: csi.CephFSDamonSetName, - Namespace: c.OperatorNamespace, - }, - } err = c.createOrUpdate(c.cephFSDaemonSet, func() error { - if err := c.own(c.cephFSDaemonSet); err != nil { + c.cephFSDaemonSet = csi.GetCephFSDaemonSet(c.OperatorNamespace) + if err := c.own(c.cephFSDaemonSet, operatorConfigMap); err != nil { return err } - csi.GetCephFSDaemonSet(c.OperatorNamespace).DeepCopyInto(c.cephFSDaemonSet) return nil }) if err != nil { @@ -239,17 +263,11 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } - c.rbdDeployment = &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: csi.RBDDeploymentName, - Namespace: c.OperatorNamespace, - }, - } err = c.createOrUpdate(c.rbdDeployment, func() error { - if err := c.own(c.rbdDeployment); err != nil { + c.rbdDeployment = csi.GetRBDDeployment(c.OperatorNamespace) + if err := c.own(c.rbdDeployment, operatorConfigMap); err != nil { return err } - csi.GetRBDDeployment(c.OperatorNamespace).DeepCopyInto(c.rbdDeployment) return nil }) if err != nil { @@ -257,17 +275,11 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } - c.rbdDaemonSet = &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: csi.RBDDaemonSetName, - Namespace: c.OperatorNamespace, - }, - } err = c.createOrUpdate(c.rbdDaemonSet, func() error { - if err := c.own(c.rbdDaemonSet); err != nil { + c.rbdDaemonSet = csi.GetRBDDaemonSet(c.OperatorNamespace) + if err := c.own(c.rbdDaemonSet, operatorConfigMap); err != nil { return err } - csi.GetRBDDaemonSet(c.OperatorNamespace).DeepCopyInto(c.rbdDaemonSet) return nil }) if err != nil { @@ -279,36 +291,29 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque // ownerReference on it as its cluster scoped resource cephfsCSIDriver := templates.CephFSCSIDriver.DeepCopy() cephfsCSIDriver.ObjectMeta.Name = csi.GetCephFSDriverName() - err = csi.CreateCSIDriver(c.ctx, c.Client, cephfsCSIDriver) - if err != nil { + if err := csi.CreateCSIDriver(c.ctx, c.Client, cephfsCSIDriver); err != nil { c.log.Error(err, "unable to create cephfs CSIDriver") return ctrl.Result{}, err } rbdCSIDriver := templates.RbdCSIDriver.DeepCopy() rbdCSIDriver.ObjectMeta.Name = csi.GetRBDDriverName() - err = csi.CreateCSIDriver(c.ctx, c.Client, rbdCSIDriver) - if err != nil { + if err := csi.CreateCSIDriver(c.ctx, c.Client, rbdCSIDriver); err != nil { c.log.Error(err, "unable to create rbd CSIDriver") return ctrl.Result{}, err } prometheusRule := &monitoringv1.PrometheusRule{} - err = k8sYAML.NewYAMLOrJSONDecoder(bytes.NewBufferString(string(pvcPrometheusRules)), 1000).Decode(prometheusRule) - if err != nil { + if err := k8sYAML.NewYAMLOrJSONDecoder(bytes.NewBufferString(string(pvcPrometheusRules)), 1000).Decode(prometheusRule); err != nil { c.log.Error(err, "Unable to retrieve prometheus rules.", "prometheusRule", klog.KRef(prometheusRule.Namespace, prometheusRule.Name)) return ctrl.Result{}, err } - operatorConfig, err := c.getOperatorConfig() - if err != nil { - return ctrl.Result{}, err - } prometheusRule.SetNamespace(c.OperatorNamespace) err = c.createOrUpdate(prometheusRule, func() error { - applyLabels(operatorConfig.Data["OCS_METRICS_LABELS"], &prometheusRule.ObjectMeta) - return c.own(prometheusRule) + applyLabels(operatorConfigMap.Data["OCS_METRICS_LABELS"], &prometheusRule.ObjectMeta) + return c.own(prometheusRule, operatorConfigMap) }) if err != nil { c.log.Error(err, "failed to create/update prometheus rules") @@ -320,7 +325,64 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } -func (c *ClusterVersionReconciler) createOrUpdate(obj client.Object, f controllerutil.MutateFn) error { +func (c *OperatorConfigMapReconciler) deletionPhase() (reconcile.Result, error) { + ocsPvsPresent, err := c.hasOCSVolumes() + if err != nil { + c.log.Error(err, "unable to verify PVs presence prior deletion of ceph resources") + return ctrl.Result{}, err + } + if ocsPvsPresent { + c.log.Info("unable to delete ceph resources, PVs consuming client resources are present") + return ctrl.Result{Requeue: true, RequeueAfter: 10 * time.Second}, nil + } + if err := csi.DeleteCSIDriver(c.ctx, c.Client, csi.GetCephFSDriverName()); err != nil && !k8serrors.IsNotFound(err) { + c.log.Error(err, "unable to delete cephfs CSIDriver") + return ctrl.Result{}, err + } + if err := csi.DeleteCSIDriver(c.ctx, c.Client, csi.GetRBDDriverName()); err != nil && !k8serrors.IsNotFound(err) { + c.log.Error(err, "unable to delete rbd CSIDriver") + return ctrl.Result{}, err + } + if err := c.Client.Delete(c.ctx, c.scc); err != nil && !k8serrors.IsNotFound(err) { + c.log.Error(err, "unable to delete SCC") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil +} + +func (c *OperatorConfigMapReconciler) hasOCSVolumes() (bool, error) { + // get all the storage class + storageClassList := storagev1.StorageClassList{} + if err := c.Client.List(c.ctx, &storageClassList); err != nil { + return false, fmt.Errorf("unable to list storage classes: %v", err) + } + + // create a set of storage class names who are using the client's provisioners + ocsStorageClass := make(map[string]bool) + for i := range storageClassList.Items { + storageClass := &storageClassList.Items[i] + if (storageClassList.Items[i].Provisioner == csi.GetCephFSDriverName()) || (storageClassList.Items[i].Provisioner == csi.GetRBDDriverName()) { + ocsStorageClass[storageClass.Name] = true + } + } + + // get all the PVs + pvList := &corev1.PersistentVolumeList{} + if err := c.Client.List(c.ctx, pvList); err != nil { + return false, fmt.Errorf("unable to list persistent volumes: %v", err) + } + + // check if there are any PVs using client's storage classes + for i := range pvList.Items { + scName := pvList.Items[i].Spec.StorageClassName + if ocsStorageClass[scName] { + return true, nil + } + } + return false, nil +} + +func (c *OperatorConfigMapReconciler) createOrUpdate(obj client.Object, f controllerutil.MutateFn) error { result, err := controllerutil.CreateOrUpdate(c.ctx, c.Client, obj, f) if err != nil { return err @@ -329,11 +391,14 @@ func (c *ClusterVersionReconciler) createOrUpdate(obj client.Object, f controlle return nil } -func (c *ClusterVersionReconciler) own(obj client.Object) error { - return controllerutil.SetControllerReference(c.OperatorDeployment, obj, c.Client.Scheme()) +func (c *OperatorConfigMapReconciler) own(obj client.Object, operatorConfigMap *corev1.ConfigMap) error { + //debugging + c.log.Info("printing operator configmap:", "configmap", operatorConfigMap) + c.log.Info("printing owned resource:", "resource", obj) + return controllerutil.SetControllerReference(operatorConfigMap, obj, c.Client.Scheme()) } -func (c *ClusterVersionReconciler) create(obj client.Object) error { +func (c *OperatorConfigMapReconciler) create(obj client.Object) error { return c.Client.Create(c.ctx, obj) } @@ -357,16 +422,7 @@ func applyLabels(label string, t *metav1.ObjectMeta) { t.Labels = promLabel } -func (c *ClusterVersionReconciler) getOperatorConfig() (*corev1.ConfigMap, error) { - cm := &corev1.ConfigMap{} - err := c.Client.Get(c.ctx, types.NamespacedName{Name: operatorConfigMapName, Namespace: c.OperatorNamespace}, cm) - if err != nil && !k8serrors.IsNotFound(err) { - return nil, err - } - return cm, nil -} - -func (c *ClusterVersionReconciler) ensureConsolePlugin() error { +func (c *OperatorConfigMapReconciler) ensureConsolePlugin() error { c.consoleDeployment = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: console.DeploymentName, diff --git a/main.go b/main.go index a00c5aa31..92381289a 100644 --- a/main.go +++ b/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "context" "flag" "os" @@ -39,7 +38,6 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" - apiclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" metrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -95,12 +93,6 @@ func main() { os.Exit(1) } - // apiclient.New() returns a client without cache. - // cache is not initialized before mgr.Start() - // we need this because we need to interact with OperatorCondition - apiClient, err := apiclient.New(mgr.GetConfig(), apiclient.Options{ - Scheme: mgr.GetScheme(), - }) if err != nil { setupLog.Error(err, "Unable to get Client") os.Exit(1) @@ -146,20 +138,13 @@ func main() { os.Exit(1) } - operatorDeployment, err := utils.GetOperatorDeployment(context.TODO(), apiClient) - if err != nil { - setupLog.Error(err, "unable to get operator deployment") - os.Exit(1) - } - - if err = (&controllers.ClusterVersionReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - OperatorDeployment: operatorDeployment, - OperatorNamespace: utils.GetOperatorNamespace(), - ConsolePort: int32(consolePort), + if err = (&controllers.OperatorConfigMapReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + OperatorNamespace: utils.GetOperatorNamespace(), + ConsolePort: int32(consolePort), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ClusterVersionReconciler") + setupLog.Error(err, "unable to create controller", "controller", "OperatorConfigMapReconciler") os.Exit(1) } diff --git a/pkg/csi/cephfsdaemonset.go b/pkg/csi/cephfsdaemonset.go index 4d9ace2d0..3bb86f293 100644 --- a/pkg/csi/cephfsdaemonset.go +++ b/pkg/csi/cephfsdaemonset.go @@ -51,7 +51,7 @@ func GetCephFSDaemonSet(namespace string) *appsv1.DaemonSet { templates.DefaultKubeletDirPath, GetCephFSDriverName()), ) - return &appsv1.DaemonSet{ + daemonset := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: CephFSDamonSetName, Namespace: namespace, @@ -270,4 +270,7 @@ func GetCephFSDaemonSet(namespace string) *appsv1.DaemonSet { }, }, } + cephfsDaemonset := &appsv1.DaemonSet{} + daemonset.DeepCopyInto(cephfsDaemonset) + return cephfsDaemonset } diff --git a/pkg/csi/cephfsdeployment.go b/pkg/csi/cephfsdeployment.go index 74e8196ee..5ecca56ac 100644 --- a/pkg/csi/cephfsdeployment.go +++ b/pkg/csi/cephfsdeployment.go @@ -61,7 +61,7 @@ func GetCephFSDeployment(namespace string) *appsv1.Deployment { ) snapshotter.Image = sidecarImages.ContainerImages.SnapshotterImageURL - return &appsv1.Deployment{ + deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: CephFSDeploymentName, Namespace: namespace, @@ -215,4 +215,7 @@ func GetCephFSDeployment(namespace string) *appsv1.Deployment { }, }, } + cephfsDeployment := &appsv1.Deployment{} + deployment.DeepCopyInto(cephfsDeployment) + return cephfsDeployment } diff --git a/pkg/csi/rbddaemonset.go b/pkg/csi/rbddaemonset.go index b4fdc3147..e8514f2e3 100644 --- a/pkg/csi/rbddaemonset.go +++ b/pkg/csi/rbddaemonset.go @@ -63,7 +63,7 @@ func GetRBDDaemonSet(namespace string) *appsv1.DaemonSet { templates.DefaultKubeletDirPath, GetRBDDriverName()), ) - return &appsv1.DaemonSet{ + daemonset := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: RBDDaemonSetName, Namespace: namespace, @@ -398,4 +398,7 @@ func GetRBDDaemonSet(namespace string) *appsv1.DaemonSet { }, }, } + rbdDaemonset := &appsv1.DaemonSet{} + daemonset.DeepCopyInto(rbdDaemonset) + return rbdDaemonset } diff --git a/pkg/csi/rbddeployment.go b/pkg/csi/rbddeployment.go index 4cdd64bc5..7cfa12d4d 100644 --- a/pkg/csi/rbddeployment.go +++ b/pkg/csi/rbddeployment.go @@ -60,7 +60,7 @@ func GetRBDDeployment(namespace string) *appsv1.Deployment { ) snapshotter.Image = sidecarImages.ContainerImages.SnapshotterImageURL - return &appsv1.Deployment{ + deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: RBDDeploymentName, Namespace: namespace, @@ -251,4 +251,7 @@ func GetRBDDeployment(namespace string) *appsv1.Deployment { }, }, } + rbdDeployment := &appsv1.Deployment{} + deployment.DeepCopyInto(rbdDeployment) + return rbdDeployment } diff --git a/pkg/utils/deployment.go b/pkg/utils/deployment.go deleted file mode 100644 index 37008eeca..000000000 --- a/pkg/utils/deployment.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2022 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package utils - -import ( - "context" - "os" - "strings" - - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// GetOperatorDeployment returns the operator deployment object -func GetOperatorDeployment(ctx context.Context, c client.Client) (*appsv1.Deployment, error) { - deployment := &appsv1.Deployment{} - podNameStrings := strings.Split(os.Getenv(OperatorPodNameEnvVar), "-") - deploymentName := strings.Join(podNameStrings[:len(podNameStrings)-2], "-") - err := c.Get(ctx, types.NamespacedName{Name: deploymentName, Namespace: GetOperatorNamespace()}, deployment) - if err != nil { - return nil, err - } - - return deployment, nil -}