From d1fe7112c169d070f30a635f248bff39528f8f24 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Tue, 28 Jan 2025 22:47:08 +0530 Subject: [PATCH] Enable specifying target size ratio for block, file and object pools Configuring the target size ratio enables Ceph to adjust PGs based on the anticipated usage of the pools. Currently all the dataPools (RBD/ CephFS/object) have a target_size_ratio of 0.49. Having same ratios for all data Pools causes under allocation of PGs for some pools & over allocation for others. According to the expected usage of the pools, the target size ratio can be set per pool. Signed-off-by: Malay Kumar Parida --- controllers/storagecluster/cephblockpools.go | 6 ++--- .../storagecluster/cephblockpools_test.go | 4 +-- controllers/storagecluster/cephfilesystem.go | 4 +-- .../storagecluster/cephobjectstores.go | 4 +-- controllers/storagecluster/generate.go | 26 ++++++++++++++++--- 5 files changed, 31 insertions(+), 13 deletions(-) diff --git a/controllers/storagecluster/cephblockpools.go b/controllers/storagecluster/cephblockpools.go index 066e79684f..d5c52b14bf 100644 --- a/controllers/storagecluster/cephblockpools.go +++ b/controllers/storagecluster/cephblockpools.go @@ -91,7 +91,7 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler, cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") + cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data", "block") cephBlockPool.Spec.PoolSpec.EnableRBDStats = true // Since provider mode handles mirroring, we only need to handle for internal mode @@ -151,7 +151,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata") + cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata", "block") util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme) @@ -199,7 +199,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass cephBlockPool.Spec.EnableCrushUpdates = true cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) - cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") + cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data", "nfs-block") cephBlockPool.Spec.PoolSpec.EnableRBDStats = true util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") diff --git a/controllers/storagecluster/cephblockpools_test.go b/controllers/storagecluster/cephblockpools_test.go index b24d2908a0..d67f68ce48 100644 --- a/controllers/storagecluster/cephblockpools_test.go +++ b/controllers/storagecluster/cephblockpools_test.go @@ -157,7 +157,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr DeviceClass: cr.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: getFailureDomain(cr), - Replicated: generateCephReplicatedSpec(cr, "data"), + Replicated: generateCephReplicatedSpec(cr, "data", "block"), EnableRBDStats: true, }, }, @@ -204,7 +204,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c DeviceClass: cr.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: getFailureDomain(cr), - Replicated: generateCephReplicatedSpec(cr, "data"), + Replicated: generateCephReplicatedSpec(cr, "data", "nfs-block"), EnableRBDStats: true, }, Name: ".nfs", diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index 5c451c47b0..fae96d030f 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -31,7 +31,7 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster Spec: cephv1.FilesystemSpec{ MetadataPool: cephv1.NamedPoolSpec{ PoolSpec: cephv1.PoolSpec{ - Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"), + Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata", "file"), FailureDomain: initStorageCluster.Status.FailureDomain, }}, MetadataServer: cephv1.MetadataServerSpec{ @@ -288,7 +288,7 @@ func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec { return cephv1.PoolSpec{ DeviceClass: sc.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, - Replicated: generateCephReplicatedSpec(sc, "data"), + Replicated: generateCephReplicatedSpec(sc, "data", "file"), FailureDomain: sc.Status.FailureDomain, } } diff --git a/controllers/storagecluster/cephobjectstores.go b/controllers/storagecluster/cephobjectstores.go index b63b24a296..9ec6b6a159 100644 --- a/controllers/storagecluster/cephobjectstores.go +++ b/controllers/storagecluster/cephobjectstores.go @@ -172,13 +172,13 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S DeviceClass: initData.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: initData.Status.FailureDomain, - Replicated: generateCephReplicatedSpec(initData, "data"), + Replicated: generateCephReplicatedSpec(initData, "data", "object"), }, MetadataPool: cephv1.PoolSpec{ DeviceClass: initData.Status.DefaultCephDeviceClass, EnableCrushUpdates: true, FailureDomain: initData.Status.FailureDomain, - Replicated: generateCephReplicatedSpec(initData, "metadata"), + Replicated: generateCephReplicatedSpec(initData, "metadata", "object"), }, Gateway: cephv1.GatewaySpec{ Port: 80, diff --git a/controllers/storagecluster/generate.go b/controllers/storagecluster/generate.go index d273768618..878358d6ad 100644 --- a/controllers/storagecluster/generate.go +++ b/controllers/storagecluster/generate.go @@ -136,14 +136,32 @@ func generateNameForCephRbdMirror(initData *ocsv1.StorageCluster) string { // generateCephReplicatedSpec returns the ReplicatedSpec for the cephCluster // based on the StorageCluster configuration -func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string) cephv1.ReplicatedSpec { +func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string, storageType string) cephv1.ReplicatedSpec { crs := cephv1.ReplicatedSpec{} crs.Size = getCephPoolReplicatedSize(initData) crs.ReplicasPerFailureDomain = uint(getReplicasPerFailureDomain(initData)) - //lint:ignore ST1017 required to compare it directly - if "data" == poolType { - crs.TargetSizeRatio = .49 + if poolType == "data" { + defaultTargetSizeRatio := 0.49 + var definedSize float64 + + switch storageType { + case "block": + definedSize = initData.Spec.ManagedResources.CephBlockPools.PoolSpec.Replicated.TargetSizeRatio + case "file": + definedSize = initData.Spec.ManagedResources.CephFilesystems.DataPoolSpec.Replicated.TargetSizeRatio + case "object": + definedSize = initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec.Replicated.TargetSizeRatio + default: + // Handle unexpected storageType + definedSize = defaultTargetSizeRatio + } + + if definedSize != 0.0 { + crs.TargetSizeRatio = definedSize + } else { + crs.TargetSizeRatio = defaultTargetSizeRatio + } } return crs