diff --git a/controllers/storagecluster/generate.go b/controllers/storagecluster/generate.go index 4694f421d4..e037b91069 100644 --- a/controllers/storagecluster/generate.go +++ b/controllers/storagecluster/generate.go @@ -100,10 +100,21 @@ func generateNameForSnapshotClass(initData *ocsv1.StorageCluster, snapshotType S return fmt.Sprintf("%s-%splugin-snapclass", initData.Name, snapshotType) } +func generateNameForGroupSnapshotClass(initData *ocsv1.StorageCluster, groupSnapshotType GroupSnapshotterType) string { + return fmt.Sprintf("%s-%splugin-groupsnapclass", initData.Name, groupSnapshotType) +} + func generateNameForSnapshotClassDriver(snapshotType SnapshotterType) string { return fmt.Sprintf("%s.%s.csi.ceph.com", storageclassDriverNamePrefix, snapshotType) } +func setParameterBasedOnSnapshotterType(instance *ocsv1.StorageCluster, groupSnapshotterType GroupSnapshotterType) (string, string) { + if groupSnapshotterType == rbdGroupSnapshotter { + return "pool", generateNameForCephBlockPool(instance) + } + return "fsName", generateNameForCephFilesystem(instance) + +} func generateNameForSnapshotClassSecret(instance *ocsv1.StorageCluster, snapshotType SnapshotterType) string { // nfs uses the same cephfs secrets if snapshotType == "nfs" { diff --git a/controllers/storagecluster/reconcile.go b/controllers/storagecluster/reconcile.go index 87e34b88da..89874b6712 100644 --- a/controllers/storagecluster/reconcile.go +++ b/controllers/storagecluster/reconcile.go @@ -76,6 +76,8 @@ const ( VirtualMachineCrdName = "virtualmachines.kubevirt.io" StorageClientCrdName = "storageclients.ocs.openshift.io" + + VolumeGroupSnapshotClassCrdName = "volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io" ) var storageClusterFinalizer = "storagecluster.ocs.openshift.io" @@ -122,6 +124,7 @@ var validTopologyLabelKeys = []string{ // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;prometheusrules,verbs=get;list;watch;create;update;delete // +kubebuilder:rbac:groups=template.openshift.io,resources=templates,verbs=get;list;watch;create;update;delete // +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;watch;create;update;delete +// +kubebuilder:rbac:groups=groupsnapshot.storage.k8s.io,resources=volumegroupsnapshotclasses,verbs=get;watch;create;update;delete;list // +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures;networks,verbs=get;list;watch // +kubebuilder:rbac:groups=config.openshift.io,resources=clusterversions;networks,verbs=get;list;watch // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update @@ -403,6 +406,15 @@ func (r *StorageClusterReconciler) reconcilePhases( return reconcile.Result{}, nil } + //check for VolumeGroupSnapshotClass crd + vgsc := true + crd := &metav1.PartialObjectMetadata{} + crd.SetGroupVersionKind(extv1.SchemeGroupVersion.WithKind("CustomResourceDefinition")) + crd.Name = VolumeGroupSnapshotClassCrdName + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(crd), crd); client.IgnoreNotFound(err) != nil { + vgsc = false + } + // in-memory conditions should start off empty. It will only ever hold // negative conditions (!Available, Degraded, Progressing) r.conditions = nil @@ -435,6 +447,9 @@ func (r *StorageClusterReconciler) reconcilePhases( &ocsCephRbdMirrors{}, &odfInfoConfig{}, } + if vgsc { + objs = append(objs, &ocsGroupSnapshotClass{}) + } } else { // noobaa-only ensure functions objs = []resourceManager{ @@ -453,6 +468,9 @@ func (r *StorageClusterReconciler) reconcilePhases( &ocsNoobaaSystem{}, &odfInfoConfig{}, } + if vgsc { + objs = append(objs, &ocsGroupSnapshotClass{}) + } } for _, obj := range objs { diff --git a/controllers/storagecluster/storagecluster_controller.go b/controllers/storagecluster/storagecluster_controller.go index 9e7589fe7d..b9da51ddc1 100644 --- a/controllers/storagecluster/storagecluster_controller.go +++ b/controllers/storagecluster/storagecluster_controller.go @@ -8,6 +8,7 @@ import ( "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" + volumegroupsnapshotv1a1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1" routev1 "github.com/openshift/api/route/v1" @@ -244,6 +245,7 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { ). Watches(&storagev1.StorageClass{}, enqueueStorageClusterRequest). Watches(&volumesnapshotv1.VolumeSnapshotClass{}, enqueueStorageClusterRequest). + Watches(&volumegroupsnapshotv1a1.VolumeGroupSnapshotClass{}, enqueueStorageClusterRequest). Watches(&ocsv1.StorageProfile{}, enqueueStorageClusterRequest). Watches(&ocsv1alpha1.StorageConsumer{}, enqueueStorageClusterRequest, builder.WithPredicates(storageConsumerStatusPredicate)) diff --git a/controllers/storagecluster/storagecluster_controller_test.go b/controllers/storagecluster/storagecluster_controller_test.go index 0a8283a1e6..b8a5e13a5b 100644 --- a/controllers/storagecluster/storagecluster_controller_test.go +++ b/controllers/storagecluster/storagecluster_controller_test.go @@ -14,6 +14,7 @@ import ( ocsversion "github.com/red-hat-storage/ocs-operator/v4/version" "github.com/blang/semver/v4" + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1" configv1 "github.com/openshift/api/config/v1" @@ -1253,6 +1254,10 @@ func createFakeScheme(t *testing.T) *runtime.Scheme { if err != nil { assert.Fail(t, "failed to add volume-snapshot scheme") } + err = groupsnapapi.AddToScheme(scheme) + if err != nil { + assert.Fail(t, "failed to add volume-group-snapshot scheme") + } err = monitoringv1.AddToScheme(scheme) if err != nil { assert.Fail(t, "failed to add monitoringv1 scheme") diff --git a/controllers/storagecluster/uninstall_reconciler.go b/controllers/storagecluster/uninstall_reconciler.go index ffaecae525..e7c34f08bb 100644 --- a/controllers/storagecluster/uninstall_reconciler.go +++ b/controllers/storagecluster/uninstall_reconciler.go @@ -331,6 +331,7 @@ func (r *StorageClusterReconciler) deleteResources(sc *ocsv1.StorageCluster) (re &ocsCephFilesystems{}, &ocsCephBlockPools{}, &ocsSnapshotClass{}, + &ocsGroupSnapshotClass{}, &ocsStorageQuota{}, &ocsStorageClass{}, &ocsCephCluster{}, diff --git a/controllers/storagecluster/volumegroupsnapshotterclasses.go b/controllers/storagecluster/volumegroupsnapshotterclasses.go new file mode 100644 index 0000000000..78b86523b1 --- /dev/null +++ b/controllers/storagecluster/volumegroupsnapshotterclasses.go @@ -0,0 +1,167 @@ +package storagecluster + +import ( + "fmt" + "reflect" + + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type GroupSnapshotterType string + +type ocsGroupSnapshotClass struct{} + +const ( + rbdGroupSnapshotter GroupSnapshotterType = "rbd" + cephfsGroupSnapshotter GroupSnapshotterType = "cephfs" +) + +const ( + groupSnapshotterSecretName = "csi.storage.k8s.io/group-snapshotter-secret-name" + groupSnapshotterSecretNamespace = "csi.storage.k8s.io/group-snapshotter-secret-namespace" +) + +type GroupSnapshotClassConfiguration struct { + groupSnapshotClass *groupsnapapi.VolumeGroupSnapshotClass + reconcileStrategy ReconcileStrategy + disable bool +} + +func newVolumeGroupSnapshotClass(instance *ocsv1.StorageCluster, groupSnaphotterType GroupSnapshotterType) *groupsnapapi.VolumeGroupSnapshotClass { + driverName, driverValue := setParameterBasedOnSnapshotterType(instance, groupSnaphotterType) + groupSnapClass := &groupsnapapi.VolumeGroupSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateNameForGroupSnapshotClass(instance, groupSnaphotterType), + }, + Driver: generateNameForSnapshotClassDriver(SnapshotterType(groupSnaphotterType)), + Parameters: map[string]string{ + "clusterID": instance.Namespace, + driverName: driverValue, + groupSnapshotterSecretName: generateNameForSnapshotClassSecret(instance, SnapshotterType(groupSnaphotterType)), + groupSnapshotterSecretNamespace: instance.Namespace, + }, + DeletionPolicy: snapapi.VolumeSnapshotContentDelete, + } + return groupSnapClass +} + +func newCephFilesystemGroupSnapshotClassConfiguration(instance *ocsv1.StorageCluster) GroupSnapshotClassConfiguration { + return GroupSnapshotClassConfiguration{ + groupSnapshotClass: newVolumeGroupSnapshotClass(instance, cephfsGroupSnapshotter), + reconcileStrategy: ReconcileStrategy(instance.Spec.ManagedResources.CephFilesystems.ReconcileStrategy), + } +} + +func newCephBlockPoolGroupSnapshotClassConfiguration(instance *ocsv1.StorageCluster) GroupSnapshotClassConfiguration { + return GroupSnapshotClassConfiguration{ + groupSnapshotClass: newVolumeGroupSnapshotClass(instance, rbdGroupSnapshotter), + reconcileStrategy: ReconcileStrategy(instance.Spec.ManagedResources.CephBlockPools.ReconcileStrategy), + } +} + +func newGroupSnapshotClassConfigurations(instance *ocsv1.StorageCluster) []GroupSnapshotClassConfiguration { + vsccs := []GroupSnapshotClassConfiguration{ + newCephFilesystemGroupSnapshotClassConfiguration(instance), + newCephBlockPoolGroupSnapshotClassConfiguration(instance), + } + return vsccs +} + +func (r *StorageClusterReconciler) createGroupSnapshotClasses(vsccs []GroupSnapshotClassConfiguration) error { + + for _, vscc := range vsccs { + if vscc.reconcileStrategy == ReconcileStrategyIgnore || vscc.disable { + continue + } + + vsc := vscc.groupSnapshotClass + existing := &groupsnapapi.VolumeGroupSnapshotClass{} + err := r.Client.Get(r.ctx, types.NamespacedName{Name: vsc.Name, Namespace: vsc.Namespace}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Since the SnapshotClass is not found, we will create a new one + r.Log.Info("Creating GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", vsc.Name)) + err = r.Client.Create(r.ctx, vsc) + if err != nil { + r.Log.Error(err, "Failed to create GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", vsc.Name)) + return err + } + // no error, continue with the next iteration + continue + } + + r.Log.Error(err, "Failed to 'Get' GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", vsc.Name)) + return err + } + if vscc.reconcileStrategy == ReconcileStrategyInit { + return nil + } + if existing.DeletionTimestamp != nil { + return fmt.Errorf("failed to restore GroupSnapshotClass %q because it is marked for deletion", existing.Name) + } + // if there is a mismatch in the parameters of existing vs created resources, + if !reflect.DeepEqual(vsc.Parameters, existing.Parameters) { + // we have to update the existing SnapshotClass + r.Log.Info("GroupSnapshotClass needs to be updated", "GroupSnapshotClass", klog.KRef("", existing.Name)) + existing.ObjectMeta.OwnerReferences = vsc.ObjectMeta.OwnerReferences + vsc.ObjectMeta = existing.ObjectMeta + if err := r.Client.Update(r.ctx, vsc); err != nil { + r.Log.Error(err, "GroupSnapshotClass updation failed.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + return err + } + } + } + return nil +} + +func (obj *ocsGroupSnapshotClass) ensureCreated(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) { + + vgsc := newGroupSnapshotClassConfigurations(instance) + + err := r.createGroupSnapshotClasses(vgsc) + if err != nil { + return reconcile.Result{}, nil + } + + return reconcile.Result{}, nil +} + +func (obj *ocsGroupSnapshotClass) ensureDeleted(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) { + + vgscs := newGroupSnapshotClassConfigurations(instance) + for _, vgsc := range vgscs { + sc := vgsc.groupSnapshotClass + existing := groupsnapapi.VolumeGroupSnapshotClass{} + err := r.Client.Get(r.ctx, types.NamespacedName{Name: sc.Name, Namespace: sc.Namespace}, &existing) + + switch { + case err == nil: + if existing.DeletionTimestamp != nil { + r.Log.Info("Uninstall: GroupSnapshotClass is already marked for deletion.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + break + } + + r.Log.Info("Uninstall: Deleting GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + existing.ObjectMeta.OwnerReferences = sc.ObjectMeta.OwnerReferences + sc.ObjectMeta = existing.ObjectMeta + + err = r.Client.Delete(r.ctx, sc) + if err != nil && !errors.IsNotFound(err) { + r.Log.Error(err, "Uninstall: Error deleting the GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + return reconcile.Result{}, err + } + case errors.IsNotFound(err): + r.Log.Info("Uninstall: GroupSnapshotClass not found, nothing to do.", "GroupSnapshotClass", klog.KRef("", sc.Name)) + default: + r.Log.Error(err, "Uninstall: Error while getting GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", sc.Name)) + } + } + return reconcile.Result{}, nil +} diff --git a/controllers/storagecluster/volumegroupsnapshotterclasses_test.go b/controllers/storagecluster/volumegroupsnapshotterclasses_test.go new file mode 100644 index 0000000000..eb9f853198 --- /dev/null +++ b/controllers/storagecluster/volumegroupsnapshotterclasses_test.go @@ -0,0 +1,33 @@ +package storagecluster + +import ( + "context" + "testing" + + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestVolumeGroupSnapshotterClasses(t *testing.T) { + t, reconciler, _, request := initStorageClusterResourceCreateUpdateTest(t, nil, nil) + assertVolumeGroupSnapshotterClasses(t, reconciler, request) +} + +func assertVolumeGroupSnapshotterClasses(t *testing.T, reconciler StorageClusterReconciler, + request reconcile.Request) { + rbdVSCName := "ocsinit-rbdplugin-groupsnapclass" + cephfsVSCName := "ocsinit-cephfsplugin-groupsnapclass" + vscNames := []string{cephfsVSCName, rbdVSCName} + for _, eachVSCName := range vscNames { + actualVSC := &groupsnapapi.VolumeGroupSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: eachVSCName, + }, + } + request.Name = eachVSCName + err := reconciler.Client.Get(context.TODO(), request.NamespacedName, actualVSC) + assert.NoError(t, err) + } +} diff --git a/main.go b/main.go index 37ea4d8d9e..dab9fba166 100644 --- a/main.go +++ b/main.go @@ -24,6 +24,7 @@ import ( "runtime" nadscheme "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme" + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" nbapis "github.com/noobaa/noobaa-operator/v5/pkg/apis" openshiftConfigv1 "github.com/openshift/api/config/v1" @@ -86,6 +87,7 @@ func init() { utilruntime.Must(corev1.AddToScheme(scheme)) utilruntime.Must(openshiftv1.AddToScheme(scheme)) utilruntime.Must(snapapi.AddToScheme(scheme)) + utilruntime.Must(groupsnapapi.AddToScheme(scheme)) utilruntime.Must(openshiftConfigv1.AddToScheme(scheme)) utilruntime.Must(extv1.AddToScheme(scheme)) utilruntime.Must(routev1.AddToScheme(scheme))