diff --git a/bundle/manifests/odf-multicluster-orchestrator.clusterserviceversion.yaml b/bundle/manifests/odf-multicluster-orchestrator.clusterserviceversion.yaml index 8e5746e2..5181eb92 100644 --- a/bundle/manifests/odf-multicluster-orchestrator.clusterserviceversion.yaml +++ b/bundle/manifests/odf-multicluster-orchestrator.clusterserviceversion.yaml @@ -36,7 +36,7 @@ metadata: ] capabilities: Basic Install console.openshift.io/plugins: '["odf-multicluster-console"]' - createdAt: "2024-07-12T13:14:27Z" + createdAt: "2024-07-16T05:37:17Z" olm.skipRange: "" operators.openshift.io/infrastructure-features: '["disconnected"]' operators.operatorframework.io/builder: operator-sdk-v1.34.1 diff --git a/controllers/managedcluster_controller.go b/controllers/managedcluster_controller.go index d7589f16..2bc85045 100644 --- a/controllers/managedcluster_controller.go +++ b/controllers/managedcluster_controller.go @@ -28,7 +28,6 @@ const ( OdfInfoClusterClaimNamespacedName = "odfinfo.odf.openshift.io" ) -// +kubebuilder:rbac:groups=view.open-cluster-management.io,resources=managedclusterviews,verbs=get;list;watch;create;update func (r *ManagedClusterReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { logger := r.Logger.With("ManagedCluster", req.NamespacedName) logger.Info("Reconciling ManagedCluster") diff --git a/controllers/managedclusterview_controller.go b/controllers/managedclusterview_controller.go new file mode 100644 index 00000000..b01d7c6f --- /dev/null +++ b/controllers/managedclusterview_controller.go @@ -0,0 +1,186 @@ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + "strings" + + viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ManagedClusterViewReconciler struct { + Client client.Client + Logger *slog.Logger +} + +const ( + ODFInfoConfigMapName = "odf-info" + ConfigMapResourceType = "ConfigMap" + ClientInfoConfigMapName = "odf-client-info" +) + +// StorageClusterConfig represents the structure of the storage cluster configuration. +type StorageClusterConfig struct { + Clients []string `json:"Clients,omitempty"` + DeploymentType string `json:"DeploymentType"` + StorageCluster struct { + CephClusterFSID string `json:"CephClusterFSID"` + NamespacedName struct { + Name string `json:"Name"` + Namespace string `json:"Namespace"` + } `json:"NamespacedName"` + StorageProviderEndpoint string `json:"StorageProviderEndpoint"` + } `json:"StorageCluster"` + StorageSystemName string `json:"StorageSystemName"` + Version string `json:"Version"` + ProviderClusterName string `json:"ProviderClusterName,omitempty"` +} + +func (r *ManagedClusterViewReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.Logger.Info("Setting up ManagedClusterViewReconciler with manager") + managedClusterViewPredicate := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + obj, ok := e.ObjectNew.(*viewv1beta1.ManagedClusterView) + if !ok { + return false + } + return hasODFInfoInScope(obj) + }, + CreateFunc: func(e event.CreateEvent) bool { + obj, ok := e.Object.(*viewv1beta1.ManagedClusterView) + if !ok { + return false + } + return hasODFInfoInScope(obj) + }, + + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } + + return ctrl.NewControllerManagedBy(mgr). + For(&viewv1beta1.ManagedClusterView{}, builder.WithPredicates(managedClusterViewPredicate, predicate.ResourceVersionChangedPredicate{}, predicate.GenerationChangedPredicate{})). + Owns(&corev1.ConfigMap{}). + Complete(r) +} + +func hasODFInfoInScope(mc *viewv1beta1.ManagedClusterView) bool { + if mc.Spec.Scope.Name == ODFInfoConfigMapName && mc.Spec.Scope.Kind == ConfigMapResourceType { + return true + } + return false +} + +func (r *ManagedClusterViewReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + logger := r.Logger.With("ManagedClusterView", req.NamespacedName) + logger.Info("Reconciling ManagedClusterView") + + var managedClusterView viewv1beta1.ManagedClusterView + if err := r.Client.Get(ctx, req.NamespacedName, &managedClusterView); err != nil { + if client.IgnoreNotFound(err) != nil { + logger.Error("Failed to get ManagedClusterView", "error", err) + } + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if err := createOrUpdateConfigMap(ctx, r.Client, managedClusterView, r.Logger); err != nil { + logger.Error("Failed to create or update ConfigMap for ManagedClusterView", "error", err) + return ctrl.Result{}, err + } + + logger.Info("Successfully reconciled ManagedClusterView", "name", managedClusterView.Name) + + return ctrl.Result{}, nil +} + +func createOrUpdateConfigMap(ctx context.Context, c client.Client, managedClusterView viewv1beta1.ManagedClusterView, logger *slog.Logger) error { + logger = logger.With("ManagedClusterView", managedClusterView.Name, "Namespace", managedClusterView.Namespace) + + var resultData map[string]string + err := json.Unmarshal(managedClusterView.Status.Result.Raw, &resultData) + if err != nil { + return fmt.Errorf("failed to unmarshal result data: %v", err) + } + + reverseLookup := make(map[string]string) + + for key, value := range resultData { + if strings.HasSuffix(key, ".config.yaml") { + var config StorageClusterConfig + err := json.Unmarshal([]byte(value), &config) + if err != nil { + return fmt.Errorf("failed to unmarshal config data for key %s: %v", key, err) + } + + providerInfo := config + providerInfo.Clients = nil + providerInfo.ProviderClusterName = managedClusterView.Namespace + + providerInfoJSON, err := json.Marshal(providerInfo) + if err != nil { + return fmt.Errorf("failed to marshal provider info: %v", err) + } + + for _, client := range config.Clients { + reverseLookup[client] = string(providerInfoJSON) + } + } + } + + configMapData := make(map[string]string) + for client, info := range reverseLookup { + configMapData[client] = info + } + + operatorNamespace := os.Getenv("POD_NAMESPACE") + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: ClientInfoConfigMapName, + Namespace: operatorNamespace, + }, + Data: configMapData, + } + + op, err := controllerutil.CreateOrUpdate(ctx, c, configMap, func() error { + configMap.Data = configMapData + + ownerExists := false + for _, ownerRef := range configMap.OwnerReferences { + if ownerRef.UID == managedClusterView.UID { + ownerExists = true + break + } + } + + if !ownerExists { + ownerRef := *metav1.NewControllerRef(&managedClusterView, viewv1beta1.GroupVersion.WithKind("ManagedClusterView")) + logger.Info("OwnerRef added", "UID", string(ownerRef.UID)) + configMap.OwnerReferences = append(configMap.OwnerReferences, ownerRef) + } + return nil + }) + + if err != nil { + return fmt.Errorf("failed to create or update ConfigMap: %v", err) + } + + logger.Info(fmt.Sprintf("ConfigMap %s in namespace %s has been %s", ClientInfoConfigMapName, operatorNamespace, op)) + + return nil +} diff --git a/controllers/managedclusterview_controller_test.go b/controllers/managedclusterview_controller_test.go new file mode 100644 index 00000000..877f6926 --- /dev/null +++ b/controllers/managedclusterview_controller_test.go @@ -0,0 +1,133 @@ +//go:build unit +// +build unit + +package controllers + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/google/uuid" + "github.com/red-hat-storage/odf-multicluster-orchestrator/controllers/utils" + viewv1beta1 "github.com/stolostron/multicloud-operators-foundation/pkg/apis/view/v1beta1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestCreateOrUpdateConfigMap(t *testing.T) { + + s := scheme.Scheme + _ = viewv1beta1.AddToScheme(s) + _ = corev1.AddToScheme(s) + + c := fake.NewClientBuilder().WithScheme(s).Build() + os.Setenv("POD_NAMESPACE", "openshift-operators") + logger := utils.GetLogger(utils.GetZapLogger(true)) + + createManagedClusterView := func(name, namespace string, data map[string]string) *viewv1beta1.ManagedClusterView { + raw, _ := json.Marshal(data) + return &viewv1beta1.ManagedClusterView{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + UID: types.UID(uuid.New().String()), + }, + Status: viewv1beta1.ViewStatus{ + Result: runtime.RawExtension{Raw: raw}, + }, + } + } + + t.Run("Create ConfigMap", func(t *testing.T) { + data := map[string]string{ + "openshift-storage_ocs-storagecluster.config.yaml": ` + { + "Clients": ["client1", "client2"], + "DeploymentType": "internal", + "StorageCluster": { + "CephClusterFSID": "7a3d6b81-a55d-44fe-84d0-46c67cd395ca", + "NamespacedName": { + "Name": "ocs-storagecluster", + "Namespace": "openshift-storage" + }, + "StorageProviderEndpoint": "" + }, + "StorageSystemName": "ocs-storagecluster-storagesystem", + "Version": "4.Y.Z" + } + `, + } + mcv := createManagedClusterView("test-view", "local-cluster", data) + + ctx := context.TODO() + err := c.Create(ctx, mcv) + assert.NoError(t, err) + + err = createOrUpdateConfigMap(ctx, c, *mcv, logger) + assert.NoError(t, err) + + cm := &corev1.ConfigMap{} + err = c.Get(ctx, types.NamespacedName{Name: ClientInfoConfigMapName, Namespace: os.Getenv("POD_NAMESPACE")}, cm) + assert.NoError(t, err) + assert.NotNil(t, cm) + + expectedData := map[string]string{ + "client1": `{"DeploymentType":"internal","StorageCluster":{"CephClusterFSID":"7a3d6b81-a55d-44fe-84d0-46c67cd395ca","NamespacedName":{"Name":"ocs-storagecluster","Namespace":"openshift-storage"},"StorageProviderEndpoint":""},"StorageSystemName":"ocs-storagecluster-storagesystem","Version":"4.Y.Z","ProviderClusterName":"local-cluster"}`, + "client2": `{"DeploymentType":"internal","StorageCluster":{"CephClusterFSID":"7a3d6b81-a55d-44fe-84d0-46c67cd395ca","NamespacedName":{"Name":"ocs-storagecluster","Namespace":"openshift-storage"},"StorageProviderEndpoint":""},"StorageSystemName":"ocs-storagecluster-storagesystem","Version":"4.Y.Z","ProviderClusterName":"local-cluster"}`, + } + assert.Equal(t, expectedData, cm.Data) + assert.Equal(t, 1, len(cm.OwnerReferences)) + assert.Equal(t, mcv.Name, cm.OwnerReferences[0].Name) + assert.Equal(t, "ManagedClusterView", cm.OwnerReferences[0].Kind) + assert.Equal(t, viewv1beta1.GroupVersion.String(), cm.OwnerReferences[0].APIVersion) + + }) + + t.Run("Update ConfigMap with additional owner reference", func(t *testing.T) { + ctx := context.TODO() + data := map[string]string{ + "openshift-storage_ocs-storagecluster.config.yaml": ` + { + "Clients": ["client1", "client2"], + "DeploymentType": "internal", + "StorageCluster": { + "CephClusterFSID": "7a3d6b81-a55d-44fe-84d0-46c67cd395ca", + "NamespacedName": { + "Name": "ocs-storagecluster", + "Namespace": "openshift-storage" + }, + "StorageProviderEndpoint": "" + }, + "StorageSystemName": "ocs-storagecluster-storagesystem", + "Version": "4.Y.Z" + } + `, + } + mcv := createManagedClusterView("new-view", "local-cluster", data) + + err := c.Create(ctx, mcv) + assert.NoError(t, err) + + err = createOrUpdateConfigMap(ctx, c, *mcv, logger) + assert.NoError(t, err) + + cm := &corev1.ConfigMap{} + err = c.Get(ctx, types.NamespacedName{Name: ClientInfoConfigMapName, Namespace: os.Getenv("POD_NAMESPACE")}, cm) + assert.NoError(t, err) + assert.NotNil(t, cm) + + expectedData := map[string]string{ + "client1": `{"DeploymentType":"internal","StorageCluster":{"CephClusterFSID":"7a3d6b81-a55d-44fe-84d0-46c67cd395ca","NamespacedName":{"Name":"ocs-storagecluster","Namespace":"openshift-storage"},"StorageProviderEndpoint":""},"StorageSystemName":"ocs-storagecluster-storagesystem","Version":"4.Y.Z","ProviderClusterName":"local-cluster"}`, + "client2": `{"DeploymentType":"internal","StorageCluster":{"CephClusterFSID":"7a3d6b81-a55d-44fe-84d0-46c67cd395ca","NamespacedName":{"Name":"ocs-storagecluster","Namespace":"openshift-storage"},"StorageProviderEndpoint":""},"StorageSystemName":"ocs-storagecluster-storagesystem","Version":"4.Y.Z","ProviderClusterName":"local-cluster"}`, + } + assert.Equal(t, expectedData, cm.Data) + assert.Equal(t, 2, len(cm.OwnerReferences)) + }) +} diff --git a/controllers/manager.go b/controllers/manager.go index 455f48aa..6dcf7a5a 100644 --- a/controllers/manager.go +++ b/controllers/manager.go @@ -150,6 +150,14 @@ func (o *ManagerOptions) runManager() { os.Exit(1) } + if err = (&ManagedClusterViewReconciler{ + Client: mgr.GetClient(), + Logger: logger.With("controller", "ManagedClusterViewReconciler"), + }).SetupWithManager(mgr); err != nil { + logger.Error("Failed to create ManagedClusterView controller", "error", err) + os.Exit(1) + } + if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { err = console.InitConsole(ctx, mgr.GetClient(), o.MulticlusterConsolePort, namespace) if err != nil { diff --git a/go.mod b/go.mod index 78d999df..883b4679 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.21 require ( github.com/csi-addons/kubernetes-csi-addons v0.8.0 github.com/go-logr/zapr v1.3.0 + github.com/google/uuid v1.6.0 github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.32.0 @@ -54,7 +55,6 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect