Skip to content

Commit

Permalink
Expands OnboardCosumer and creates odf-info
Browse files Browse the repository at this point in the history
Signed-off-by: raaizik <[email protected]>
  • Loading branch information
raaizik committed Feb 20, 2024
1 parent 35ac7ee commit 0cbbe5a
Show file tree
Hide file tree
Showing 8 changed files with 488 additions and 218 deletions.
67 changes: 3 additions & 64 deletions controllers/storagecluster/clusterclaims.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,9 @@ package storagecluster
import (
"context"
"fmt"
"strconv"
"strings"

"github.com/go-logr/logr"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
Expand All @@ -22,23 +17,19 @@ import (
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"strconv"
)

const (
RookCephMonSecretName = "rook-ceph-mon"
FsidKey = "fsid"
OdfOperatorNamePrefix = "odf-operator"
ClusterClaimCRDName = "clusterclaims.cluster.open-cluster-management.io"
ClusterClaimCRDName = "clusterclaims.cluster.open-cluster-management.io"
)

var (
ClusterClaimGroup = "odf"
OdfVersion = fmt.Sprintf("version.%s.openshift.io", ClusterClaimGroup)
StorageSystemName = fmt.Sprintf("storagesystemname.%s.openshift.io", ClusterClaimGroup)
StorageClusterName = fmt.Sprintf("storageclustername.%s.openshift.io", ClusterClaimGroup)
StorageClusterCount = fmt.Sprintf("count.storageclusters.%s.openshift.io", ClusterClaimGroup)
StorageClusterDROptimized = fmt.Sprintf("droptimized.%s.openshift.io", ClusterClaimGroup)
CephFsid = fmt.Sprintf("cephfsid.%s.openshift.io", ClusterClaimGroup)
)

type ocsClusterClaim struct{}
Expand Down Expand Up @@ -83,20 +74,8 @@ func (obj *ocsClusterClaim) ensureCreated(r *StorageClusterReconciler, instance
StorageCluster: instance,
}

odfVersion, err := creator.getOdfVersion()
if err != nil {
r.Log.Error(err, "failed to get odf version for operator. retrying again")
return reconcile.Result{}, err
}

storageClusterCount := len(r.clusters.GetStorageClusters())

cephFsid, err := creator.getCephFsid()
if err != nil {
r.Log.Error(err, "failed to get ceph fsid from secret. retrying again")
return reconcile.Result{}, err
}

storageSystemName, err := creator.getStorageSystemName()
if err != nil {
r.Log.Error(err, "failed to get storagesystem name. retrying again")
Expand All @@ -117,8 +96,6 @@ func (obj *ocsClusterClaim) ensureCreated(r *StorageClusterReconciler, instance
err = creator.setStorageClusterCount(strconv.Itoa(storageClusterCount)).
setStorageSystemName(storageSystemName).
setStorageClusterName(instance.Name).
setOdfVersion(odfVersion).
setCephFsid(cephFsid).
setDROptimized(isDROptimized).
create()

Expand Down Expand Up @@ -179,34 +156,6 @@ func (c *ClusterClaimCreator) create() error {

return nil
}
func (c *ClusterClaimCreator) getOdfVersion() (string, error) {
var csvs operatorsv1alpha1.ClusterServiceVersionList
err := c.Client.List(c.Context, &csvs, &client.ListOptions{Namespace: c.StorageCluster.Namespace})
if err != nil {
return "", err
}

for _, csv := range csvs.Items {
if strings.HasPrefix(csv.Name, OdfOperatorNamePrefix) {
return csv.Spec.Version.String(), nil
}
}

return "", fmt.Errorf("failed to find csv with prefix %q", OdfOperatorNamePrefix)
}

func (c *ClusterClaimCreator) getCephFsid() (string, error) {
var rookCephMonSecret corev1.Secret
err := c.Client.Get(c.Context, types.NamespacedName{Name: RookCephMonSecretName, Namespace: c.StorageCluster.Namespace}, &rookCephMonSecret)
if err != nil {
return "", err
}
if val, ok := rookCephMonSecret.Data[FsidKey]; ok {
return string(val), nil
}

return "", fmt.Errorf("failed to fetch ceph fsid from %q secret", RookCephMonSecretName)
}

func (c *ClusterClaimCreator) getIsDROptimized(serverVersion *version.Info) (string, error) {
var cephCluster rookCephv1.CephCluster
Expand Down Expand Up @@ -238,21 +187,11 @@ func (c *ClusterClaimCreator) setStorageSystemName(name string) *ClusterClaimCre
return c
}

func (c *ClusterClaimCreator) setOdfVersion(version string) *ClusterClaimCreator {
c.Values[OdfVersion] = version
return c
}

func (c *ClusterClaimCreator) setStorageClusterName(name string) *ClusterClaimCreator {
c.Values[StorageClusterName] = fmt.Sprintf("%s/%s", name, c.StorageCluster.GetNamespace())
return c
}

func (c *ClusterClaimCreator) setCephFsid(fsid string) *ClusterClaimCreator {
c.Values[CephFsid] = fsid
return c
}

func (c *ClusterClaimCreator) setDROptimized(optimized string) *ClusterClaimCreator {
c.Values[StorageClusterDROptimized] = optimized
return c
Expand All @@ -277,7 +216,7 @@ func (obj *ocsClusterClaim) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1.
}
return reconcile.Result{}, nil
}
names := []string{OdfVersion, StorageSystemName, StorageClusterName, CephFsid}
names := []string{StorageSystemName, StorageClusterName}
for _, name := range names {
cc := clusterv1alpha1.ClusterClaim{
ObjectMeta: metav1.ObjectMeta{
Expand Down
24 changes: 23 additions & 1 deletion controllers/storagecluster/initialization_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@ package storagecluster

import (
"context"
"fmt"
"github.com/blang/semver/v4"
version2 "github.com/operator-framework/api/pkg/lib/version"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
version3 "github.com/red-hat-storage/ocs-operator/v4/version"
"os"
"testing"

Expand Down Expand Up @@ -346,6 +351,23 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti
Phase: cephv1.ConditionType(util.PhaseReady),
},
}
verOdf, _ := semver.Make(getSemVer(version3.Version, 1, true))
csv := &operatorsv1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("odf-operator-%s", sc.Name),
Namespace: sc.Namespace,
},
Spec: operatorsv1alpha1.ClusterServiceVersionSpec{
Version: version2.OperatorVersion{Version: verOdf},
},
}

rookCephMonSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "rook-ceph-mon", Namespace: sc.Namespace},
Data: map[string][]byte{
"fsid": []byte("b88c2d78-9de9-4227-9313-a63f62f78743"),
},
}

statusSubresourceObjs := []client.Object{sc}
var runtimeObjects []runtime.Object
Expand All @@ -360,7 +382,7 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti
}
}

runtimeObjects = append(runtimeObjects, mockNodeList.DeepCopy(), cbp, cfs, cnfs, cnfsbp, cnfssvc, infrastructure, networkConfig)
runtimeObjects = append(runtimeObjects, mockNodeList.DeepCopy(), cbp, cfs, cnfs, cnfsbp, cnfssvc, infrastructure, networkConfig, rookCephMonSecret, csv)
client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjects...).WithStatusSubresource(statusSubresourceObjs...).Build()

return StorageClusterReconciler{
Expand Down
151 changes: 151 additions & 0 deletions controllers/storagecluster/odfinfoconfig.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
package storagecluster

import (
"context"
"encoding/json"
"fmt"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"strings"
)

type ConnectedClient struct {
Metadata types.NamespacedName `json:"metadata"`
ClusterID string `json:"clusterId"`
}
type InfoStorageCluster struct {
Metadata types.NamespacedName `json:"metadata"`
StorageProviderEndpoint string `json:"storageProviderEndpoint"`
CephClusterFSID string `json:"cephClusterFSID"`
}

type OdfInfoData struct {
OdfVersion string `json:"odfVersion"`
OdfDeploymentType string `json:"odfDeploymentType"`
Clients []ConnectedClient `json:"clients"`
StorageCluster InfoStorageCluster `json:"storageCluster"`
}

const (
RDROdfInfoKeyName = "config.yaml"
RDROdfDeploymentTypeExternal = "external"
RDROdfDeploymentTypeInternal = "internal"
RookCephMonSecretName = "rook-ceph-mon"
FsidKey = "fsid"
OdfOperatorNamePrefix = "odf-operator"
RDROdfInfoConfigMapName = "odf-info"
RDROdfInfoMapKind = "ConfigMap"
)

type odfInfoConfig struct{}

// ensureCreated ensures that a ConfigMap resource exists with its Spec in
// the desired state.
func (obj *odfInfoConfig) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (reconcile.Result, error) {
odfInfo, configErr := getOdfInfoConfig(r, sc)
if configErr != nil {
return reconcile.Result{}, fmt.Errorf("failed to get ODF info config data: %w", configErr)
}
odfInfoConfigOverrideData := map[string]string{
RDROdfInfoKeyName: odfInfo,
}
odfInfoConfigOverrideCM := &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: RDROdfInfoMapKind,
APIVersion: sc.APIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: RDROdfInfoConfigMapName + "-" + sc.Name,
Namespace: sc.Namespace,
},
Data: odfInfoConfigOverrideData,
}

_, err := ctrl.CreateOrUpdate(context.Background(), r.Client, odfInfoConfigOverrideCM, func() error {
if !reflect.DeepEqual(odfInfoConfigOverrideCM.Data, odfInfoConfigOverrideData) {
r.Log.Info("updating ODF info config override configmap", RDROdfInfoMapKind, klog.KRef(sc.Namespace, RDROdfInfoConfigMapName))
odfInfoConfigOverrideCM.Data = odfInfoConfigOverrideData
}
return ctrl.SetControllerReference(sc, odfInfoConfigOverrideCM, r.Scheme)
})
if err != nil {
r.Log.Error(err, "failed to create or update ODF info config override", RDROdfInfoMapKind, klog.KRef(sc.Namespace, RDROdfInfoConfigMapName))
return reconcile.Result{}, fmt.Errorf("failed to create or update ODF info config override: %w", err)
}
return reconcile.Result{}, nil
}

// ensureDeleted is dummy func for the odfInfoConfig
func (obj *odfInfoConfig) ensureDeleted(_ *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
return reconcile.Result{}, nil
}

func getOdfInfoConfig(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (string, error) {
var odfVersion, cephFSId string
var err error
if odfVersion, err = getOdfVersion(r, sc); err != nil {
return odfVersion, err
}
if cephFSId, err = getCephFsid(r, sc); err != nil {
return cephFSId, err
}
var odfDeploymentType string
if sc.Spec.ExternalStorage.Enable {
odfDeploymentType = RDROdfDeploymentTypeExternal
} else {
odfDeploymentType = RDROdfDeploymentTypeInternal
}
var data = OdfInfoData{
OdfVersion: odfVersion,
OdfDeploymentType: odfDeploymentType,
// Clients array is populated with the onboarding request's fields via server.go
Clients: []ConnectedClient{},
StorageCluster: InfoStorageCluster{
Metadata: types.NamespacedName{Name: sc.Name, Namespace: sc.Namespace},
StorageProviderEndpoint: sc.Status.StorageProviderEndpoint,
CephClusterFSID: cephFSId,
},
}
yamlData, err := json.Marshal(data)
if err != nil {
return "", err
}
return string(yamlData), nil

}

func getOdfVersion(r *StorageClusterReconciler, storageCluster *ocsv1.StorageCluster) (string, error) {
var csvs operatorsv1alpha1.ClusterServiceVersionList
err := r.Client.List(context.TODO(), &csvs, &client.ListOptions{Namespace: storageCluster.Namespace})
if err != nil {
return "", err
}
for _, csv := range csvs.Items {
if strings.HasPrefix(csv.Name, OdfOperatorNamePrefix) {
return csv.Spec.Version.String(), nil
}
}

return "", fmt.Errorf("failed to find csv with prefix %q", OdfOperatorNamePrefix)
}

func getCephFsid(r *StorageClusterReconciler, storageCluster *ocsv1.StorageCluster) (string, error) {
var rookCephMonSecret corev1.Secret
err := r.Client.Get(context.TODO(), types.NamespacedName{Name: RookCephMonSecretName, Namespace: storageCluster.Namespace}, &rookCephMonSecret)
if err != nil {
return "", err
}
if val, ok := rookCephMonSecret.Data[FsidKey]; ok {
return string(val), nil
}

return "", fmt.Errorf("failed to fetch ceph fsid from %q secret", RookCephMonSecretName)
}
2 changes: 2 additions & 0 deletions controllers/storagecluster/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,7 @@ func (r *StorageClusterReconciler) reconcilePhases(
&ocsJobTemplates{},
&ocsCephRbdMirrors{},
&ocsClusterClaim{},
&odfInfoConfig{},
}
} else {
// noobaa-only ensure functions
Expand All @@ -427,6 +428,7 @@ func (r *StorageClusterReconciler) reconcilePhases(
&ocsSnapshotClass{},
&ocsNoobaaSystem{},
&ocsClusterClaim{},
&odfInfoConfig{},
}
}

Expand Down
25 changes: 23 additions & 2 deletions controllers/storagecluster/storagecluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package storagecluster
import (
"context"
"fmt"
version2 "github.com/operator-framework/api/pkg/lib/version"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"net"
"os"
"regexp"
Expand Down Expand Up @@ -1121,7 +1123,23 @@ func createFakeStorageClusterReconciler(t *testing.T, obj ...runtime.Object) Sto
Phase: cephv1.ConditionType(util.PhaseReady),
},
}
obj = append(obj, cbp, cfs)
verOdf, _ := semver.Make(getSemVer(version.Version, 1, true))
csv := &operatorsv1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("odf-operator-%s", sc.Name),
Namespace: namespace,
},
Spec: operatorsv1alpha1.ClusterServiceVersionSpec{
Version: version2.OperatorVersion{Version: verOdf},
},
}
rookCephMonSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "rook-ceph-mon", Namespace: namespace},
Data: map[string][]byte{
"fsid": []byte("b88c2d78-9de9-4227-9313-a63f62f78743"),
},
}
obj = append(obj, cbp, cfs, rookCephMonSecret, csv)
client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(obj...).WithStatusSubresource(sc).Build()

clusters, err := util.GetClusters(context.TODO(), client)
Expand Down Expand Up @@ -1212,7 +1230,10 @@ func createFakeScheme(t *testing.T) *runtime.Scheme {
if err != nil {
assert.Fail(t, "failed to add ocsv1alpha1 scheme")
}

err = operatorsv1alpha1.AddToScheme(scheme)
if err != nil {
assert.Fail(t, "unable to add operatorsv1alpha1 to scheme")
}
return scheme
}

Expand Down
Loading

0 comments on commit 0cbbe5a

Please sign in to comment.