From bf8120c182478f78de9d437aac083dea9ae24322 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Fri, 12 Jul 2024 11:47:13 +0530 Subject: [PATCH] Fix pg_autoscaler won't scale due to overlapping roots There was no CephBlockPool defined for the .mgr pool, therefore it was using the default crush rule (with no deviceClass specified). Also the CephObjectStore & CephFilesystem metadataPool does not define the deviceClass (it is only set in the dataPools). To fix this Create a CephBlockPool CR for the .mgr pool with the same poolSpec as the default CephBlockPool. Also set the deviceClass for the metadataPool in CephObjectStore and CephFilesystem. Signed-off-by: Malay Kumar Parida --- controllers/storagecluster/cephblockpools.go | 16 ++++++++++++++++ .../storagecluster/cephblockpools_test.go | 6 +++--- controllers/storagecluster/cephfilesystem.go | 3 +++ controllers/storagecluster/cephobjectstores.go | 1 + 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/controllers/storagecluster/cephblockpools.go b/controllers/storagecluster/cephblockpools.go index 269429469a..c67e7f8b13 100644 --- a/controllers/storagecluster/cephblockpools.go +++ b/controllers/storagecluster/cephblockpools.go @@ -44,6 +44,8 @@ func (r *StorageClusterReconciler) addPeerSecretsToCephBlockPool(initData *ocsv1 func (r *StorageClusterReconciler) newCephBlockPoolInstances(initData *ocsv1.StorageCluster) ([]*cephv1.CephBlockPool, error) { var mirroringSpec cephv1.MirroringSpec poolName := generateNameForCephBlockPool(initData) + // This name is used by UI to hide this pool from the list of CephBlockPools + builtinMgrPoolName := "builtin-mgr" poolNamespace := initData.Namespace if initData.Spec.Mirroring.Enabled { @@ -69,6 +71,20 @@ func (r *StorageClusterReconciler) newCephBlockPoolInstances(initData *ocsv1.Sto }, }, }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: builtinMgrPoolName, + Namespace: poolNamespace, + }, + Spec: cephv1.NamedBlockPoolSpec{ + Name: ".mgr", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: initData.Status.DefaultCephDeviceClass, + FailureDomain: getFailureDomain(initData), + Replicated: generateCephReplicatedSpec(initData, "metadata"), + }, + }, + }, } // Create Non-Resilient CephBlockPools if enabled diff --git a/controllers/storagecluster/cephblockpools_test.go b/controllers/storagecluster/cephblockpools_test.go index 31f45a6fe5..727ebea556 100644 --- a/controllers/storagecluster/cephblockpools_test.go +++ b/controllers/storagecluster/cephblockpools_test.go @@ -171,8 +171,8 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c expectedAf, err := reconciler.newCephBlockPoolInstances(cr) assert.NoError(t, err) - assert.Equal(t, len(expectedAf[1].OwnerReferences), 1) + assert.Equal(t, len(expectedAf[2].OwnerReferences), 1) - assert.Equal(t, expectedAf[1].ObjectMeta.Name, actualNFSBlockPool.ObjectMeta.Name) - assert.Equal(t, expectedAf[1].Spec, actualNFSBlockPool.Spec) + assert.Equal(t, expectedAf[2].ObjectMeta.Name, actualNFSBlockPool.ObjectMeta.Name) + assert.Equal(t, expectedAf[2].Spec, actualNFSBlockPool.Spec) } diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index 470d50366d..77190578ba 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -79,6 +79,9 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster } } + // set device class for metadata pool from the default data pool + ret.Spec.MetadataPool.DeviceClass = ret.Spec.DataPools[0].PoolSpec.DeviceClass + err := controllerutil.SetControllerReference(initStorageCluster, ret, r.Scheme) if err != nil { r.Log.Error(err, "Unable to set Controller Reference for CephFileSystem.", "CephFileSystem", klog.KRef(ret.Namespace, ret.Name)) diff --git a/controllers/storagecluster/cephobjectstores.go b/controllers/storagecluster/cephobjectstores.go index a36ebd2980..add5a8f1e9 100644 --- a/controllers/storagecluster/cephobjectstores.go +++ b/controllers/storagecluster/cephobjectstores.go @@ -173,6 +173,7 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S Replicated: generateCephReplicatedSpec(initData, "data"), }, MetadataPool: cephv1.PoolSpec{ + DeviceClass: initData.Status.DefaultCephDeviceClass, FailureDomain: initData.Status.FailureDomain, Replicated: generateCephReplicatedSpec(initData, "metadata"), },