diff --git a/controllers/storagecluster/generate.go b/controllers/storagecluster/generate.go index d273768618..3cf3c033be 100644 --- a/controllers/storagecluster/generate.go +++ b/controllers/storagecluster/generate.go @@ -102,10 +102,21 @@ func generateNameForSnapshotClass(initData *ocsv1.StorageCluster, snapshotType S return fmt.Sprintf("%s-%splugin-snapclass", initData.Name, snapshotType) } +func generateNameForGroupSnapshotClass(initData *ocsv1.StorageCluster, groupSnapshotType GroupSnapshotterType) string { + return fmt.Sprintf("%s-%splugin-groupsnapclass", initData.Name, groupSnapshotType) +} + func generateNameForSnapshotClassDriver(snapshotType SnapshotterType) string { return fmt.Sprintf("%s.%s.csi.ceph.com", util.StorageClassDriverNamePrefix, snapshotType) } +func setParameterBasedOnSnapshotterType(instance *ocsv1.StorageCluster, groupSnapshotterType GroupSnapshotterType) (string, string) { + if groupSnapshotterType == rbdGroupSnapshotter { + return "pool", generateNameForCephBlockPool(instance) + } + return "fsName", generateNameForCephFilesystem(instance) + +} func generateNameForSnapshotClassSecret(instance *ocsv1.StorageCluster, snapshotType SnapshotterType) string { // nfs uses the same cephfs secrets if snapshotType == "nfs" { diff --git a/controllers/storagecluster/initialization_reconciler_test.go b/controllers/storagecluster/initialization_reconciler_test.go index b91d822218..1d10f5e738 100644 --- a/controllers/storagecluster/initialization_reconciler_test.go +++ b/controllers/storagecluster/initialization_reconciler_test.go @@ -411,12 +411,14 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti ocsProviderService, createVirtualMachineCRD(), createStorageClientCRD(), + createVolumeGroupSnapshotClassCRD(), ) client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjects...).WithStatusSubresource(statusSubresourceObjs...).Build() availCrds := map[string]bool{ - VirtualMachineCrdName: true, - StorageClientCrdName: true, + VirtualMachineCrdName: true, + StorageClientCrdName: true, + VolumeGroupSnapshotClassCrdName: true, } return StorageClusterReconciler{ diff --git a/controllers/storagecluster/reconcile.go b/controllers/storagecluster/reconcile.go index 06d04a6b40..58df77dc0e 100644 --- a/controllers/storagecluster/reconcile.go +++ b/controllers/storagecluster/reconcile.go @@ -77,6 +77,8 @@ const ( VirtualMachineCrdName = "virtualmachines.kubevirt.io" StorageClientCrdName = "storageclients.ocs.openshift.io" + + VolumeGroupSnapshotClassCrdName = "volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io" ) var storageClusterFinalizer = "storagecluster.ocs.openshift.io" @@ -123,6 +125,7 @@ var validTopologyLabelKeys = []string{ // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors;prometheusrules,verbs=get;list;watch;create;update;delete // +kubebuilder:rbac:groups=template.openshift.io,resources=templates,verbs=get;list;watch;create;update;delete // +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;watch;create;update;delete +// +kubebuilder:rbac:groups=groupsnapshot.storage.k8s.io,resources=volumegroupsnapshotclasses,verbs=get;watch;create;update;delete;list // +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures;networks,verbs=get;list;watch // +kubebuilder:rbac:groups=config.openshift.io,resources=clusterversions;networks,verbs=get;list;watch // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create;update @@ -150,7 +153,7 @@ func (r *StorageClusterReconciler) Reconcile(ctx context.Context, request reconc r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) r.ctx = ctrllog.IntoContext(ctx, r.Log) - for _, crdName := range []string{VirtualMachineCrdName, StorageClientCrdName} { + for _, crdName := range []string{VirtualMachineCrdName, StorageClientCrdName, VolumeGroupSnapshotClassCrdName} { crd := &metav1.PartialObjectMetadata{} crd.SetGroupVersionKind(extv1.SchemeGroupVersion.WithKind("CustomResourceDefinition")) crd.Name = crdName @@ -440,6 +443,7 @@ func (r *StorageClusterReconciler) reconcilePhases( &ocsStorageClass{}, &ocsNoobaaSystem{}, &ocsSnapshotClass{}, + &ocsGroupSnapshotClass{}, &ocsJobTemplates{}, &ocsCephRbdMirrors{}, &odfInfoConfig{}, @@ -459,6 +463,7 @@ func (r *StorageClusterReconciler) reconcilePhases( &ocsStorageQuota{}, &ocsCephCluster{}, &ocsSnapshotClass{}, + &ocsGroupSnapshotClass{}, &ocsNoobaaSystem{}, &odfInfoConfig{}, } diff --git a/controllers/storagecluster/storageclasses_test.go b/controllers/storagecluster/storageclasses_test.go index 44e24a5254..95ddcdcb58 100644 --- a/controllers/storagecluster/storageclasses_test.go +++ b/controllers/storagecluster/storageclasses_test.go @@ -107,6 +107,33 @@ var ( }, } } + createVolumeGroupSnapshotClassCRD = func() *extv1.CustomResourceDefinition { + pluralName := "volumegroupsnapshotclasses" + return &extv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + Kind: "CustomResourceDefinition", + APIVersion: extv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: pluralName + "." + "groupsnapshot.storage.k8s.io", + UID: "uid", + }, + Spec: extv1.CustomResourceDefinitionSpec{ + Group: "groupsnapshot.storage.k8s.io", + Scope: extv1.ClusterScoped, + Names: extv1.CustomResourceDefinitionNames{ + Plural: pluralName, + Kind: "VolumeGroupSnapshotClass", + }, + Versions: []extv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Served: true, + }, + }, + }, + } + } ) func TestDefaultStorageClasses(t *testing.T) { diff --git a/controllers/storagecluster/storagecluster_controller.go b/controllers/storagecluster/storagecluster_controller.go index 788af03c44..18e97a7b70 100644 --- a/controllers/storagecluster/storagecluster_controller.go +++ b/controllers/storagecluster/storagecluster_controller.go @@ -246,6 +246,10 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { false, ), ), + builder.WithPredicates( + util.NamePredicate(VolumeGroupSnapshotClassCrdName), + util.CrdCreateAndDeletePredicate(&r.Log, VolumeGroupSnapshotClassCrdName, r.AvailableCrds[VolumeGroupSnapshotClassCrdName]), + ), builder.OnlyMetadata, ). Watches(&storagev1.StorageClass{}, enqueueStorageClusterRequest). diff --git a/controllers/storagecluster/storagecluster_controller_test.go b/controllers/storagecluster/storagecluster_controller_test.go index 0a8283a1e6..b8a5e13a5b 100644 --- a/controllers/storagecluster/storagecluster_controller_test.go +++ b/controllers/storagecluster/storagecluster_controller_test.go @@ -14,6 +14,7 @@ import ( ocsversion "github.com/red-hat-storage/ocs-operator/v4/version" "github.com/blang/semver/v4" + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1" configv1 "github.com/openshift/api/config/v1" @@ -1253,6 +1254,10 @@ func createFakeScheme(t *testing.T) *runtime.Scheme { if err != nil { assert.Fail(t, "failed to add volume-snapshot scheme") } + err = groupsnapapi.AddToScheme(scheme) + if err != nil { + assert.Fail(t, "failed to add volume-group-snapshot scheme") + } err = monitoringv1.AddToScheme(scheme) if err != nil { assert.Fail(t, "failed to add monitoringv1 scheme") diff --git a/controllers/storagecluster/uninstall_reconciler.go b/controllers/storagecluster/uninstall_reconciler.go index ffaecae525..e7c34f08bb 100644 --- a/controllers/storagecluster/uninstall_reconciler.go +++ b/controllers/storagecluster/uninstall_reconciler.go @@ -331,6 +331,7 @@ func (r *StorageClusterReconciler) deleteResources(sc *ocsv1.StorageCluster) (re &ocsCephFilesystems{}, &ocsCephBlockPools{}, &ocsSnapshotClass{}, + &ocsGroupSnapshotClass{}, &ocsStorageQuota{}, &ocsStorageClass{}, &ocsCephCluster{}, diff --git a/controllers/storagecluster/volumegroupsnapshotterclasses.go b/controllers/storagecluster/volumegroupsnapshotterclasses.go new file mode 100644 index 0000000000..93c8599f7b --- /dev/null +++ b/controllers/storagecluster/volumegroupsnapshotterclasses.go @@ -0,0 +1,175 @@ +package storagecluster + +import ( + "fmt" + "reflect" + + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" + ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type GroupSnapshotterType string + +type ocsGroupSnapshotClass struct{} + +const ( + rbdGroupSnapshotter GroupSnapshotterType = "rbd" + cephfsGroupSnapshotter GroupSnapshotterType = "cephfs" +) + +const ( + groupSnapshotterSecretName = "csi.storage.k8s.io/group-snapshotter-secret-name" + groupSnapshotterSecretNamespace = "csi.storage.k8s.io/group-snapshotter-secret-namespace" +) + +type GroupSnapshotClassConfiguration struct { + groupSnapshotClass *groupsnapapi.VolumeGroupSnapshotClass + reconcileStrategy ReconcileStrategy + disable bool +} + +func newVolumeGroupSnapshotClass(instance *ocsv1.StorageCluster, groupSnaphotterType GroupSnapshotterType) *groupsnapapi.VolumeGroupSnapshotClass { + driverName, driverValue := setParameterBasedOnSnapshotterType(instance, groupSnaphotterType) + groupSnapClass := &groupsnapapi.VolumeGroupSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: generateNameForGroupSnapshotClass(instance, groupSnaphotterType), + }, + Driver: generateNameForSnapshotClassDriver(SnapshotterType(groupSnaphotterType)), + Parameters: map[string]string{ + "clusterID": instance.Namespace, + driverName: driverValue, + groupSnapshotterSecretName: generateNameForSnapshotClassSecret(instance, SnapshotterType(groupSnaphotterType)), + groupSnapshotterSecretNamespace: instance.Namespace, + }, + DeletionPolicy: snapapi.VolumeSnapshotContentDelete, + } + return groupSnapClass +} + +func newCephFilesystemGroupSnapshotClassConfiguration(instance *ocsv1.StorageCluster) GroupSnapshotClassConfiguration { + return GroupSnapshotClassConfiguration{ + groupSnapshotClass: newVolumeGroupSnapshotClass(instance, cephfsGroupSnapshotter), + reconcileStrategy: ReconcileStrategy(instance.Spec.ManagedResources.CephFilesystems.ReconcileStrategy), + } +} + +func newCephBlockPoolGroupSnapshotClassConfiguration(instance *ocsv1.StorageCluster) GroupSnapshotClassConfiguration { + return GroupSnapshotClassConfiguration{ + groupSnapshotClass: newVolumeGroupSnapshotClass(instance, rbdGroupSnapshotter), + reconcileStrategy: ReconcileStrategy(instance.Spec.ManagedResources.CephBlockPools.ReconcileStrategy), + } +} + +func newGroupSnapshotClassConfigurations(instance *ocsv1.StorageCluster) []GroupSnapshotClassConfiguration { + vsccs := []GroupSnapshotClassConfiguration{ + newCephFilesystemGroupSnapshotClassConfiguration(instance), + newCephBlockPoolGroupSnapshotClassConfiguration(instance), + } + return vsccs +} + +func (r *StorageClusterReconciler) createGroupSnapshotClasses(vsccs []GroupSnapshotClassConfiguration) error { + + for _, vscc := range vsccs { + if vscc.reconcileStrategy == ReconcileStrategyIgnore || vscc.disable { + continue + } + + vsc := vscc.groupSnapshotClass + existing := &groupsnapapi.VolumeGroupSnapshotClass{} + err := r.Client.Get(r.ctx, types.NamespacedName{Name: vsc.Name, Namespace: vsc.Namespace}, existing) + if err != nil { + if errors.IsNotFound(err) { + // Since the SnapshotClass is not found, we will create a new one + r.Log.Info("Creating GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", vsc.Name)) + err = r.Client.Create(r.ctx, vsc) + if err != nil { + r.Log.Error(err, "Failed to create GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", vsc.Name)) + return err + } + // no error, continue with the next iteration + continue + } + + r.Log.Error(err, "Failed to 'Get' GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", vsc.Name)) + return err + } + if vscc.reconcileStrategy == ReconcileStrategyInit { + return nil + } + if existing.DeletionTimestamp != nil { + return fmt.Errorf("failed to restore GroupSnapshotClass %q because it is marked for deletion", existing.Name) + } + // if there is a mismatch in the parameters of existing vs created resources, + if !reflect.DeepEqual(vsc.Parameters, existing.Parameters) { + // we have to update the existing SnapshotClass + r.Log.Info("GroupSnapshotClass needs to be updated", "GroupSnapshotClass", klog.KRef("", existing.Name)) + existing.ObjectMeta.OwnerReferences = vsc.ObjectMeta.OwnerReferences + vsc.ObjectMeta = existing.ObjectMeta + if err := r.Client.Update(r.ctx, vsc); err != nil { + r.Log.Error(err, "GroupSnapshotClass updation failed.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + return err + } + } + } + return nil +} + +func (obj *ocsGroupSnapshotClass) ensureCreated(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) { + if !r.AvailableCrds[VolumeGroupSnapshotClassCrdName] { + r.Log.Info("VolumeGroupSnapshotClass CRD is not available") + return reconcile.Result{}, nil + } + + vgsc := newGroupSnapshotClassConfigurations(instance) + + err := r.createGroupSnapshotClasses(vgsc) + if err != nil { + return reconcile.Result{}, nil + } + + return reconcile.Result{}, nil +} + +func (obj *ocsGroupSnapshotClass) ensureDeleted(r *StorageClusterReconciler, instance *ocsv1.StorageCluster) (reconcile.Result, error) { + if !r.AvailableCrds[VolumeGroupSnapshotClassCrdName] { + r.Log.Info("VolumeGroupSnapshotClass CRD doesn't exist") + return reconcile.Result{}, nil + } + + vgscs := newGroupSnapshotClassConfigurations(instance) + for _, vgsc := range vgscs { + sc := vgsc.groupSnapshotClass + existing := groupsnapapi.VolumeGroupSnapshotClass{} + err := r.Client.Get(r.ctx, types.NamespacedName{Name: sc.Name, Namespace: sc.Namespace}, &existing) + + switch { + case err == nil: + if existing.DeletionTimestamp != nil { + r.Log.Info("Uninstall: GroupSnapshotClass is already marked for deletion.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + break + } + + r.Log.Info("Uninstall: Deleting GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + existing.ObjectMeta.OwnerReferences = sc.ObjectMeta.OwnerReferences + sc.ObjectMeta = existing.ObjectMeta + + err = r.Client.Delete(r.ctx, sc) + if err != nil && !errors.IsNotFound(err) { + r.Log.Error(err, "Uninstall: Error deleting the GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", existing.Name)) + return reconcile.Result{}, err + } + case errors.IsNotFound(err): + r.Log.Info("Uninstall: GroupSnapshotClass not found, nothing to do.", "GroupSnapshotClass", klog.KRef("", sc.Name)) + default: + r.Log.Error(err, "Uninstall: Error while getting GroupSnapshotClass.", "GroupSnapshotClass", klog.KRef("", sc.Name)) + } + } + return reconcile.Result{}, nil +} diff --git a/controllers/storagecluster/volumegroupsnapshotterclasses_test.go b/controllers/storagecluster/volumegroupsnapshotterclasses_test.go new file mode 100644 index 0000000000..eb9f853198 --- /dev/null +++ b/controllers/storagecluster/volumegroupsnapshotterclasses_test.go @@ -0,0 +1,33 @@ +package storagecluster + +import ( + "context" + "testing" + + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func TestVolumeGroupSnapshotterClasses(t *testing.T) { + t, reconciler, _, request := initStorageClusterResourceCreateUpdateTest(t, nil, nil) + assertVolumeGroupSnapshotterClasses(t, reconciler, request) +} + +func assertVolumeGroupSnapshotterClasses(t *testing.T, reconciler StorageClusterReconciler, + request reconcile.Request) { + rbdVSCName := "ocsinit-rbdplugin-groupsnapclass" + cephfsVSCName := "ocsinit-cephfsplugin-groupsnapclass" + vscNames := []string{cephfsVSCName, rbdVSCName} + for _, eachVSCName := range vscNames { + actualVSC := &groupsnapapi.VolumeGroupSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: eachVSCName, + }, + } + request.Name = eachVSCName + err := reconciler.Client.Get(context.TODO(), request.NamespacedName, actualVSC) + assert.NoError(t, err) + } +} diff --git a/main.go b/main.go index 19ff7324e6..e197cb4e83 100644 --- a/main.go +++ b/main.go @@ -24,6 +24,7 @@ import ( "runtime" nadscheme "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/scheme" + groupsnapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumegroupsnapshot/v1alpha1" snapapi "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" nbapis "github.com/noobaa/noobaa-operator/v5/pkg/apis" openshiftConfigv1 "github.com/openshift/api/config/v1" @@ -87,6 +88,7 @@ func init() { utilruntime.Must(corev1.AddToScheme(scheme)) utilruntime.Must(openshiftv1.AddToScheme(scheme)) utilruntime.Must(snapapi.AddToScheme(scheme)) + utilruntime.Must(groupsnapapi.AddToScheme(scheme)) utilruntime.Must(openshiftConfigv1.AddToScheme(scheme)) utilruntime.Must(extv1.AddToScheme(scheme)) utilruntime.Must(routev1.AddToScheme(scheme))