Skip to content

Commit

Permalink
controllers: pr issue fix
Browse files Browse the repository at this point in the history
fixed ownership of resources and remove finalizer in deletion

Signed-off-by: Amit Berner <[email protected]>
  • Loading branch information
bernerhat committed Feb 12, 2024
1 parent 18a6a73 commit 45d644a
Show file tree
Hide file tree
Showing 7 changed files with 162 additions and 149 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@ package controllers
import (
"bytes"
"context"
"fmt"
"strings"
"time"

// The embed package is required for the prometheus rule files
_ "embed"
Expand All @@ -31,6 +33,7 @@ import (
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand All @@ -51,19 +54,20 @@ import (
var pvcPrometheusRules string

const (
operatorConfigMapName = "ocs-client-operator-config"
operatorConfigMapName = "ocs-client-operator-manager-config"
// ClusterVersionName is the name of the ClusterVersion object in the
// openshift cluster.
clusterVersionName = "version"

operatorConfigMapFinalizer = "ocs-client-operator.ocs.openshift.io/finalizer"
)

// ClusterVersionReconciler reconciles a ClusterVersion object
type ClusterVersionReconciler struct {
// OperatorConfigMapReconciler reconciles a ClusterVersion object
type OperatorConfigMapReconciler struct {
client.Client
OperatorDeployment *appsv1.Deployment
OperatorNamespace string
ConsolePort int32
Scheme *runtime.Scheme
OperatorNamespace string
ConsolePort int32
Scheme *runtime.Scheme

log logr.Logger
ctx context.Context
Expand All @@ -76,7 +80,7 @@ type ClusterVersionReconciler struct {
}

// SetupWithManager sets up the controller with the Manager.
func (c *ClusterVersionReconciler) SetupWithManager(mgr ctrl.Manager) error {
func (c *OperatorConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error {
clusterVersionPredicates := builder.WithPredicates(
predicate.GenerationChangedPredicate{},
)
Expand All @@ -91,19 +95,20 @@ func (c *ClusterVersionReconciler) SetupWithManager(mgr ctrl.Manager) error {
),
)
// Reconcile the ClusterVersion object when the operator config map is updated
enqueueClusterVersionRequest := handler.EnqueueRequestsFromMapFunc(
enqueueConfigMapRequest := handler.EnqueueRequestsFromMapFunc(
func(_ context.Context, client client.Object) []reconcile.Request {

Check failure on line 99 in controllers/operatorconfigmap_controller.go

View workflow job for this annotation

GitHub Actions / golangci-lint (1.20)

unused-parameter: parameter 'client' seems to be unused, consider removing or renaming it as _ (revive)

Check failure on line 99 in controllers/operatorconfigmap_controller.go

View workflow job for this annotation

GitHub Actions / golangci-lint (1.21)

unused-parameter: parameter 'client' seems to be unused, consider removing or renaming it as _ (revive)
return []reconcile.Request{{
NamespacedName: types.NamespacedName{
Name: clusterVersionName,
Name: operatorConfigMapName,
Namespace: c.OperatorNamespace,
},
}}
},
)

return ctrl.NewControllerManagedBy(mgr).
For(&configv1.ClusterVersion{}, clusterVersionPredicates).
Watches(&corev1.ConfigMap{}, enqueueClusterVersionRequest, configMapPredicates).
For(&corev1.ConfigMap{}, configMapPredicates).
Watches(&configv1.ClusterVersion{}, enqueueConfigMapRequest, clusterVersionPredicates).
Complete(c)
}

Expand All @@ -123,23 +128,56 @@ func (c *ClusterVersionReconciler) SetupWithManager(mgr ctrl.Manager) error {

// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
func (c *OperatorConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var err error
c.ctx = ctx
c.log = log.FromContext(ctx, "ClusterVersion", req)
c.log.Info("Reconciling ClusterVersion")

c.log = log.FromContext(ctx, "OperatorConfigMap", req)
c.log.Info("Reconciling OperatorConfigMap")

operatorConfigMap := &corev1.ConfigMap{}
if err := c.Client.Get(c.ctx, types.NamespacedName{Name: operatorConfigMapName, Namespace: c.OperatorNamespace}, operatorConfigMap); err != nil {
c.log.Error(err, "failed to get the operator's configMap")
return reconcile.Result{}, err
}
//debugging
c.log.Info("printing operator configmap:", "configmap", operatorConfigMap)

// deletion phase
if !operatorConfigMap.GetDeletionTimestamp().IsZero() {
result, err := c.deletionPhase()
if err != nil {
return ctrl.Result{}, err
}
if !result.Requeue {
//remove finalizer
controllerutil.RemoveFinalizer(operatorConfigMap, operatorConfigMapFinalizer)
if err := c.Client.Update(c.ctx, operatorConfigMap); err != nil {
return ctrl.Result{}, err
}
c.log.Info("finallizer removed successfully")
}
return result, nil
}
//ensure finalizer
if !controllerutil.ContainsFinalizer(operatorConfigMap, operatorConfigMapFinalizer) {
c.log.Info("finalizer missing on the OperatorConfigMap resource, adding...")
controllerutil.AddFinalizer(operatorConfigMap, operatorConfigMapFinalizer)
if err := c.Client.Update(c.ctx, operatorConfigMap); err != nil {
return ctrl.Result{}, err
}
}
if err := c.ensureConsolePlugin(); err != nil {
c.log.Error(err, "unable to deploy client console")
return ctrl.Result{}, err
}

instance := configv1.ClusterVersion{}
if err = c.Client.Get(context.TODO(), req.NamespacedName, &instance); err != nil {
return ctrl.Result{}, err
clusterVersion := &configv1.ClusterVersion{}
if err := c.Client.Get(c.ctx, types.NamespacedName{Name: clusterVersionName}, clusterVersion); err != nil {
c.log.Error(err, "failed to get the clusterVersion version of the OCP cluster")
return reconcile.Result{}, err
}

if err := csi.InitializeSidecars(c.log, instance.Status.Desired.Version); err != nil {
if err := csi.InitializeSidecars(c.log, clusterVersion.Status.Desired.Version); err != nil {
c.log.Error(err, "unable to initialize sidecars")
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -173,11 +211,10 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
"config.json": "[]",
},
}
if err := c.own(monConfigMap); err != nil {
if err := c.own(monConfigMap, operatorConfigMap); err != nil {
return ctrl.Result{}, err
}
err = c.create(monConfigMap)
if err != nil && !k8serrors.IsAlreadyExists(err) {
if err := c.create(monConfigMap); err != nil && !k8serrors.IsAlreadyExists(err) {
c.log.Error(err, "failed to create monitor configmap", "name", monConfigMap.Name)
return ctrl.Result{}, err
}
Expand All @@ -194,80 +231,55 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
"config.json": "[]",
},
}
if err := c.own(monConfigMap); err != nil {
if err := c.own(encConfigMap, operatorConfigMap); err != nil {
return ctrl.Result{}, err
}
err = c.create(encConfigMap)
if err != nil && !k8serrors.IsAlreadyExists(err) {
if err := c.create(encConfigMap); err != nil && !k8serrors.IsAlreadyExists(err) {
c.log.Error(err, "failed to create monitor configmap", "name", encConfigMap.Name)
return ctrl.Result{}, err
}

c.cephFSDeployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: csi.CephFSDeploymentName,
Namespace: c.OperatorNamespace,
},
}
err = c.createOrUpdate(c.cephFSDeployment, func() error {
if err := c.own(c.cephFSDeployment); err != nil {
c.cephFSDeployment = csi.GetCephFSDeployment(c.OperatorNamespace)
if err := c.own(c.cephFSDeployment, operatorConfigMap); err != nil {
return err
}
csi.GetCephFSDeployment(c.OperatorNamespace).DeepCopyInto(c.cephFSDeployment)
return nil
})
if err != nil {
c.log.Error(err, "failed to create/update cephfs deployment")
return ctrl.Result{}, err
}

c.cephFSDaemonSet = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: csi.CephFSDamonSetName,
Namespace: c.OperatorNamespace,
},
}
err = c.createOrUpdate(c.cephFSDaemonSet, func() error {
if err := c.own(c.cephFSDaemonSet); err != nil {
c.cephFSDaemonSet = csi.GetCephFSDaemonSet(c.OperatorNamespace)
if err := c.own(c.cephFSDaemonSet, operatorConfigMap); err != nil {
return err
}
csi.GetCephFSDaemonSet(c.OperatorNamespace).DeepCopyInto(c.cephFSDaemonSet)
return nil
})
if err != nil {
c.log.Error(err, "failed to create/update cephfs daemonset")
return ctrl.Result{}, err
}

c.rbdDeployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: csi.RBDDeploymentName,
Namespace: c.OperatorNamespace,
},
}
err = c.createOrUpdate(c.rbdDeployment, func() error {
if err := c.own(c.rbdDeployment); err != nil {
c.rbdDeployment = csi.GetRBDDeployment(c.OperatorNamespace)
if err := c.own(c.rbdDeployment, operatorConfigMap); err != nil {
return err
}
csi.GetRBDDeployment(c.OperatorNamespace).DeepCopyInto(c.rbdDeployment)
return nil
})
if err != nil {
c.log.Error(err, "failed to create/update rbd deployment")
return ctrl.Result{}, err
}

c.rbdDaemonSet = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: csi.RBDDaemonSetName,
Namespace: c.OperatorNamespace,
},
}
err = c.createOrUpdate(c.rbdDaemonSet, func() error {
if err := c.own(c.rbdDaemonSet); err != nil {
c.rbdDaemonSet = csi.GetRBDDaemonSet(c.OperatorNamespace)
if err := c.own(c.rbdDaemonSet, operatorConfigMap); err != nil {
return err
}
csi.GetRBDDaemonSet(c.OperatorNamespace).DeepCopyInto(c.rbdDaemonSet)
return nil
})
if err != nil {
Expand All @@ -279,36 +291,29 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// ownerReference on it as its cluster scoped resource
cephfsCSIDriver := templates.CephFSCSIDriver.DeepCopy()
cephfsCSIDriver.ObjectMeta.Name = csi.GetCephFSDriverName()
err = csi.CreateCSIDriver(c.ctx, c.Client, cephfsCSIDriver)
if err != nil {
if err := csi.CreateCSIDriver(c.ctx, c.Client, cephfsCSIDriver); err != nil {
c.log.Error(err, "unable to create cephfs CSIDriver")
return ctrl.Result{}, err
}

rbdCSIDriver := templates.RbdCSIDriver.DeepCopy()
rbdCSIDriver.ObjectMeta.Name = csi.GetRBDDriverName()
err = csi.CreateCSIDriver(c.ctx, c.Client, rbdCSIDriver)
if err != nil {
if err := csi.CreateCSIDriver(c.ctx, c.Client, rbdCSIDriver); err != nil {
c.log.Error(err, "unable to create rbd CSIDriver")
return ctrl.Result{}, err
}

prometheusRule := &monitoringv1.PrometheusRule{}
err = k8sYAML.NewYAMLOrJSONDecoder(bytes.NewBufferString(string(pvcPrometheusRules)), 1000).Decode(prometheusRule)
if err != nil {
if err := k8sYAML.NewYAMLOrJSONDecoder(bytes.NewBufferString(string(pvcPrometheusRules)), 1000).Decode(prometheusRule); err != nil {
c.log.Error(err, "Unable to retrieve prometheus rules.", "prometheusRule", klog.KRef(prometheusRule.Namespace, prometheusRule.Name))
return ctrl.Result{}, err
}

operatorConfig, err := c.getOperatorConfig()
if err != nil {
return ctrl.Result{}, err
}
prometheusRule.SetNamespace(c.OperatorNamespace)

err = c.createOrUpdate(prometheusRule, func() error {
applyLabels(operatorConfig.Data["OCS_METRICS_LABELS"], &prometheusRule.ObjectMeta)
return c.own(prometheusRule)
applyLabels(operatorConfigMap.Data["OCS_METRICS_LABELS"], &prometheusRule.ObjectMeta)
return c.own(prometheusRule, operatorConfigMap)
})
if err != nil {
c.log.Error(err, "failed to create/update prometheus rules")
Expand All @@ -320,7 +325,64 @@ func (c *ClusterVersionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, nil
}

func (c *ClusterVersionReconciler) createOrUpdate(obj client.Object, f controllerutil.MutateFn) error {
func (c *OperatorConfigMapReconciler) deletionPhase() (reconcile.Result, error) {
ocsPvsPresent, err := c.hasOCSVolumes()
if err != nil {
c.log.Error(err, "unable to verify PVs presence prior deletion of ceph resources")
return ctrl.Result{}, err
}
if ocsPvsPresent {
c.log.Info("unable to delete ceph resources, PVs consuming client resources are present")
return ctrl.Result{Requeue: true, RequeueAfter: 10 * time.Second}, nil
}
if err := csi.DeleteCSIDriver(c.ctx, c.Client, csi.GetCephFSDriverName()); err != nil && !k8serrors.IsNotFound(err) {
c.log.Error(err, "unable to delete cephfs CSIDriver")
return ctrl.Result{}, err
}
if err := csi.DeleteCSIDriver(c.ctx, c.Client, csi.GetRBDDriverName()); err != nil && !k8serrors.IsNotFound(err) {
c.log.Error(err, "unable to delete rbd CSIDriver")
return ctrl.Result{}, err
}
if err := c.Client.Delete(c.ctx, c.scc); err != nil && !k8serrors.IsNotFound(err) {
c.log.Error(err, "unable to delete SCC")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}

func (c *OperatorConfigMapReconciler) hasOCSVolumes() (bool, error) {
// get all the storage class
storageClassList := storagev1.StorageClassList{}
if err := c.Client.List(c.ctx, &storageClassList); err != nil {
return false, fmt.Errorf("unable to list storage classes: %v", err)
}

// create a set of storage class names who are using the client's provisioners
ocsStorageClass := make(map[string]bool)
for i := range storageClassList.Items {
storageClass := &storageClassList.Items[i]
if (storageClassList.Items[i].Provisioner == csi.GetCephFSDriverName()) || (storageClassList.Items[i].Provisioner == csi.GetRBDDriverName()) {
ocsStorageClass[storageClass.Name] = true
}
}

// get all the PVs
pvList := &corev1.PersistentVolumeList{}
if err := c.Client.List(c.ctx, pvList); err != nil {
return false, fmt.Errorf("unable to list persistent volumes: %v", err)
}

// check if there are any PVs using client's storage classes
for i := range pvList.Items {
scName := pvList.Items[i].Spec.StorageClassName
if ocsStorageClass[scName] {
return true, nil
}
}
return false, nil
}

func (c *OperatorConfigMapReconciler) createOrUpdate(obj client.Object, f controllerutil.MutateFn) error {
result, err := controllerutil.CreateOrUpdate(c.ctx, c.Client, obj, f)
if err != nil {
return err
Expand All @@ -329,11 +391,14 @@ func (c *ClusterVersionReconciler) createOrUpdate(obj client.Object, f controlle
return nil
}

func (c *ClusterVersionReconciler) own(obj client.Object) error {
return controllerutil.SetControllerReference(c.OperatorDeployment, obj, c.Client.Scheme())
func (c *OperatorConfigMapReconciler) own(obj client.Object, operatorConfigMap *corev1.ConfigMap) error {
//debugging
c.log.Info("printing operator configmap:", "configmap", operatorConfigMap)
c.log.Info("printing owned resource:", "resource", obj)
return controllerutil.SetControllerReference(operatorConfigMap, obj, c.Client.Scheme())
}

func (c *ClusterVersionReconciler) create(obj client.Object) error {
func (c *OperatorConfigMapReconciler) create(obj client.Object) error {
return c.Client.Create(c.ctx, obj)
}

Expand All @@ -357,16 +422,7 @@ func applyLabels(label string, t *metav1.ObjectMeta) {
t.Labels = promLabel
}

func (c *ClusterVersionReconciler) getOperatorConfig() (*corev1.ConfigMap, error) {
cm := &corev1.ConfigMap{}
err := c.Client.Get(c.ctx, types.NamespacedName{Name: operatorConfigMapName, Namespace: c.OperatorNamespace}, cm)
if err != nil && !k8serrors.IsNotFound(err) {
return nil, err
}
return cm, nil
}

func (c *ClusterVersionReconciler) ensureConsolePlugin() error {
func (c *OperatorConfigMapReconciler) ensureConsolePlugin() error {
c.consoleDeployment = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: console.DeploymentName,
Expand Down
Loading

0 comments on commit 45d644a

Please sign in to comment.