diff --git a/controllers/storagecluster/cephcluster.go b/controllers/storagecluster/cephcluster.go index d416a0fddb..ff580ee415 100644 --- a/controllers/storagecluster/cephcluster.go +++ b/controllers/storagecluster/cephcluster.go @@ -505,7 +505,10 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, serverVersion *v // If the `monPVCTemplate` is provided, the mons will provisioned on the // provided `monPVCTemplate`. if monPVCTemplate != nil { - cephCluster.Spec.Mon.VolumeClaimTemplate = monPVCTemplate + cephCluster.Spec.Mon.VolumeClaimTemplate = &rookCephv1.VolumeClaimTemplate{ + ObjectMeta: monPVCTemplate.ObjectMeta, + Spec: monPVCTemplate.Spec, + } // If the `monDataDirHostPath` is provided without the `monPVCTemplate`, // the mons will be provisioned on the provided `monDataDirHostPath`. } else if len(monDataDirHostPath) > 0 { @@ -514,10 +517,10 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, serverVersion *v // be provisioned using the PVC template of first StorageDeviceSets if present. } else if len(sc.Spec.StorageDeviceSets) > 0 { ds := sc.Spec.StorageDeviceSets[0] - cephCluster.Spec.Mon.VolumeClaimTemplate = &corev1.PersistentVolumeClaim{ + cephCluster.Spec.Mon.VolumeClaimTemplate = &rookCephv1.VolumeClaimTemplate{ Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: ds.DataPVCTemplate.Spec.StorageClassName, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("50Gi"), }, @@ -877,26 +880,35 @@ func newStorageClassDeviceSets(sc *ocsv1.StorageCluster, serverVersion *version. ds.DataPVCTemplate.Annotations = annotations set := rookCephv1.StorageClassDeviceSet{ - Name: fmt.Sprintf("%s-%d", ds.Name, i), - Count: count, - Resources: resources, - Placement: placement, - PreparePlacement: &preparePlacement, - Config: ds.Config.ToMap(), - VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ds.DataPVCTemplate}, - Portable: portable, - TuneSlowDeviceClass: ds.Config.TuneSlowDeviceClass, - TuneFastDeviceClass: ds.Config.TuneFastDeviceClass, - Encrypted: sc.Spec.Encryption.Enable || sc.Spec.Encryption.ClusterWide, + Name: fmt.Sprintf("%s-%d", ds.Name, i), + Count: count, + Resources: resources, + Placement: placement, + PreparePlacement: &preparePlacement, + Config: ds.Config.ToMap(), + VolumeClaimTemplates: []rookCephv1.VolumeClaimTemplate{{ + ObjectMeta: ds.DataPVCTemplate.ObjectMeta, + Spec: ds.DataPVCTemplate.Spec, + }}, + Portable: portable, + TuneSlowDeviceClass: ds.Config.TuneSlowDeviceClass, + TuneFastDeviceClass: ds.Config.TuneFastDeviceClass, + Encrypted: sc.Spec.Encryption.Enable || sc.Spec.Encryption.ClusterWide, } if ds.MetadataPVCTemplate != nil { ds.MetadataPVCTemplate.ObjectMeta.Name = metadataPVCName - set.VolumeClaimTemplates = append(set.VolumeClaimTemplates, *ds.MetadataPVCTemplate) + set.VolumeClaimTemplates = append(set.VolumeClaimTemplates, rookCephv1.VolumeClaimTemplate{ + ObjectMeta: ds.MetadataPVCTemplate.ObjectMeta, + Spec: ds.MetadataPVCTemplate.Spec, + }) } if ds.WalPVCTemplate != nil { ds.WalPVCTemplate.ObjectMeta.Name = walPVCName - set.VolumeClaimTemplates = append(set.VolumeClaimTemplates, *ds.WalPVCTemplate) + set.VolumeClaimTemplates = append(set.VolumeClaimTemplates, rookCephv1.VolumeClaimTemplate{ + ObjectMeta: ds.WalPVCTemplate.ObjectMeta, + Spec: ds.WalPVCTemplate.Spec, + }) } storageClassDeviceSets = append(storageClassDeviceSets, set) @@ -920,10 +932,16 @@ func newStorageClassDeviceSets(sc *ocsv1.StorageCluster, serverVersion *version. "crushDeviceClass": failureDomainValue, } if !reflect.DeepEqual(sc.Spec.ManagedResources.CephNonResilientPools.VolumeClaimTemplate, corev1.PersistentVolumeClaim{}) { - ds.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{sc.Spec.ManagedResources.CephNonResilientPools.VolumeClaimTemplate} + ds.VolumeClaimTemplates = []rookCephv1.VolumeClaimTemplate{{ + ObjectMeta: sc.Spec.ManagedResources.CephNonResilientPools.VolumeClaimTemplate.ObjectMeta, + Spec: sc.Spec.ManagedResources.CephNonResilientPools.VolumeClaimTemplate.Spec, + }} } else { // If not defined use the spec for volumeclaimtemplate from existing devicesets - ds.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{sc.Spec.StorageDeviceSets[0].DataPVCTemplate} + ds.VolumeClaimTemplates = []rookCephv1.VolumeClaimTemplate{{ + ObjectMeta: sc.Spec.StorageDeviceSets[0].DataPVCTemplate.ObjectMeta, + Spec: sc.Spec.StorageDeviceSets[0].DataPVCTemplate.Spec, + }} } ds.VolumeClaimTemplates[0].Annotations = annotations ds.Portable = sc.Status.FailureDomain != "host" @@ -1066,7 +1084,10 @@ func generateStretchClusterSpec(sc *ocsv1.StorageCluster) *rookCephv1.StretchClu Arbiter: true, } if sc.Spec.Arbiter.ArbiterMonPVCTemplate != nil { - arbiterZoneSpec.VolumeClaimTemplate = sc.Spec.Arbiter.ArbiterMonPVCTemplate + arbiterZoneSpec.VolumeClaimTemplate = &rookCephv1.VolumeClaimTemplate{ + ObjectMeta: sc.Spec.Arbiter.ArbiterMonPVCTemplate.ObjectMeta, + Spec: sc.Spec.Arbiter.ArbiterMonPVCTemplate.Spec, + } } stretchClusterSpec.Zones = append(stretchClusterSpec.Zones, arbiterZoneSpec) diff --git a/controllers/storagecluster/cephcluster_test.go b/controllers/storagecluster/cephcluster_test.go index d2ca035cfb..b1a2955ae4 100644 --- a/controllers/storagecluster/cephcluster_test.go +++ b/controllers/storagecluster/cephcluster_test.go @@ -283,10 +283,11 @@ func TestNewCephClusterMonData(t *testing.T) { assert.Equal(t, c.expectedMonDataPath, actual.Spec.DataDirHostPath) if c.monPVCTemplate != nil { - assert.DeepEqual(t, actual.Spec.Mon.VolumeClaimTemplate, c.sc.Spec.MonPVCTemplate) + assert.DeepEqual(t, actual.Spec.Mon.VolumeClaimTemplate.ObjectMeta, c.sc.Spec.MonPVCTemplate.ObjectMeta) + assert.DeepEqual(t, actual.Spec.Mon.VolumeClaimTemplate.Spec, c.sc.Spec.MonPVCTemplate.Spec) } else { if c.monDataPath != "" { - var emptyPVCSpec *corev1.PersistentVolumeClaim + var emptyPVCSpec *rookCephv1.VolumeClaimTemplate assert.DeepEqual(t, emptyPVCSpec, actual.Spec.Mon.VolumeClaimTemplate) } else { pvcSpec := actual.Spec.Mon.VolumeClaimTemplate.Spec @@ -581,7 +582,8 @@ func TestStorageClassDeviceSetCreation(t *testing.T) { assert.Equal(t, fmt.Sprintf("%s-%d", deviceSet.Name, i), scds.Name) assert.Equal(t, deviceSet.Count/3, scds.Count) assert.DeepEqual(t, defaults.GetProfileDaemonResources("osd", c.sc), scds.Resources) - assert.DeepEqual(t, deviceSet.DataPVCTemplate, scds.VolumeClaimTemplates[0]) + assert.DeepEqual(t, deviceSet.DataPVCTemplate.ObjectMeta, scds.VolumeClaimTemplates[0].ObjectMeta) + assert.DeepEqual(t, deviceSet.DataPVCTemplate.Spec, scds.VolumeClaimTemplates[0].Spec) assert.Equal(t, true, scds.Portable) assert.Equal(t, c.sc.Spec.Encryption.ClusterWide, scds.Encrypted) @@ -647,7 +649,8 @@ func TestStorageClassDeviceSetCreation(t *testing.T) { assert.Equal(t, fmt.Sprintf("%s-%d", deviceSet.Name, i), scds.Name) assert.Equal(t, deviceSet.Count/3, scds.Count) assert.DeepEqual(t, defaults.GetProfileDaemonResources("osd", c.sc), scds.Resources) - assert.DeepEqual(t, deviceSet.DataPVCTemplate, scds.VolumeClaimTemplates[0]) + assert.DeepEqual(t, deviceSet.DataPVCTemplate.ObjectMeta, scds.VolumeClaimTemplates[0].ObjectMeta) + assert.DeepEqual(t, deviceSet.DataPVCTemplate.Spec, scds.VolumeClaimTemplates[0].Spec) assert.Equal(t, true, scds.Portable) assert.Equal(t, c.sc.Spec.Encryption.ClusterWide, scds.Encrypted) if scds.Portable && c.topologyKey == "rack" { @@ -888,7 +891,8 @@ func TestStorageClassDeviceSetCreationForArbiter(t *testing.T) { assert.Equal(t, fmt.Sprintf("%s-%d", deviceSet.Name, i), scds.Name) assert.Equal(t, deviceSet.Count, scds.Count) assert.DeepEqual(t, defaults.GetProfileDaemonResources("osd", c.sc), scds.Resources) - assert.DeepEqual(t, deviceSet.DataPVCTemplate, scds.VolumeClaimTemplates[0]) + assert.DeepEqual(t, deviceSet.DataPVCTemplate.ObjectMeta, scds.VolumeClaimTemplates[0].ObjectMeta) + assert.DeepEqual(t, deviceSet.DataPVCTemplate.Spec, scds.VolumeClaimTemplates[0].Spec) assert.Equal(t, true, scds.Portable) assert.Equal(t, c.sc.Spec.Encryption.ClusterWide, scds.Encrypted) assert.DeepEqual(t, getPlacement(c.sc, "osd-tsc"), scds.Placement) diff --git a/controllers/storagecluster/storagecluster_controller_test.go b/controllers/storagecluster/storagecluster_controller_test.go index 2748e7b5ab..a4f4df7c6a 100644 --- a/controllers/storagecluster/storagecluster_controller_test.go +++ b/controllers/storagecluster/storagecluster_controller_test.go @@ -130,7 +130,7 @@ var mockDataPVCTemplate = corev1.PersistentVolumeClaim{ }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Ti"), }, @@ -143,7 +143,7 @@ var mockDataPVCTemplate = corev1.PersistentVolumeClaim{ var mockMetaDataPVCTemplate = &corev1.PersistentVolumeClaim{ Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Ti"), }, @@ -156,7 +156,7 @@ var mockMetaDataPVCTemplate = &corev1.PersistentVolumeClaim{ var mockWalPVCTemplate = &corev1.PersistentVolumeClaim{ Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse("1Ti"), }, diff --git a/controllers/storagecluster/storagequota_test.go b/controllers/storagecluster/storagequota_test.go index 42198e7148..976b3553e0 100644 --- a/controllers/storagecluster/storagequota_test.go +++ b/controllers/storagecluster/storagequota_test.go @@ -27,7 +27,7 @@ var mockStorageDeviceSets = []api.StorageDeviceSet{ DataPVCTemplate: corev1.PersistentVolumeClaim{ Spec: corev1.PersistentVolumeClaimSpec{ StorageClassName: &mockStorageClassName, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: mockQuantity1T, }, diff --git a/functests/common.go b/functests/common.go index bfd37b61c9..8625b0cbf1 100644 --- a/functests/common.go +++ b/functests/common.go @@ -51,7 +51,7 @@ func GetRandomPVC(storageClass string, quantity string) *k8sv1.PersistentVolumeC StorageClassName: &storageClass, AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce}, - Resources: k8sv1.ResourceRequirements{ + Resources: k8sv1.VolumeResourceRequirements{ Requests: k8sv1.ResourceList{ "storage": storageQuantity, }, diff --git a/pkg/deploy-manager/storagecluster.go b/pkg/deploy-manager/storagecluster.go index c2e88b45a1..f922e3485b 100644 --- a/pkg/deploy-manager/storagecluster.go +++ b/pkg/deploy-manager/storagecluster.go @@ -88,7 +88,7 @@ func (t *DeployManager) DefaultStorageCluster() (*ocsv1.StorageCluster, error) { StorageClassName: &storageClassName, AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce}, - Resources: k8sv1.ResourceRequirements{ + Resources: k8sv1.VolumeResourceRequirements{ Requests: k8sv1.ResourceList{ "storage": monQuantity, }, @@ -155,7 +155,7 @@ func (t *DeployManager) DefaultStorageCluster() (*ocsv1.StorageCluster, error) { AccessModes: []k8sv1.PersistentVolumeAccessMode{k8sv1.ReadWriteOnce}, VolumeMode: &blockVolumeMode, - Resources: k8sv1.ResourceRequirements{ + Resources: k8sv1.VolumeResourceRequirements{ Requests: k8sv1.ResourceList{ "storage": dataQuantity, },