Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Enable specifying target size ratio for block, file and object pools
Browse files Browse the repository at this point in the history
Configuring the target size ratio enables Ceph to adjust PGs based on
the anticipated usage of the pools. Currently all the dataPools (RBD/
CephFS/object) have a target_size_ratio of 0.49. Having same ratios for
all data Pools causes under allocation of PGs for some pools & over
allocation for others. According to the expected usage of the pools,
the target size ratio can be set per pool.

Signed-off-by: Malay Kumar Parida <mparida@redhat.com>
malayparida2000 committed Jan 29, 2025
1 parent 74dd59c commit d1fe711
Showing 5 changed files with 31 additions and 13 deletions.
6 changes: 3 additions & 3 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
@@ -91,7 +91,7 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler,
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data", "block")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true

// Since provider mode handles mirroring, we only need to handle for internal mode
@@ -151,7 +151,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata", "block")
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
@@ -199,7 +199,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data", "nfs-block")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

4 changes: 2 additions & 2 deletions controllers/storagecluster/cephblockpools_test.go
Original file line number Diff line number Diff line change
@@ -157,7 +157,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, "data", "block"),
EnableRBDStats: true,
},
},
@@ -204,7 +204,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, "data", "nfs-block"),
EnableRBDStats: true,
},
Name: ".nfs",
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
@@ -31,7 +31,7 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
Spec: cephv1.FilesystemSpec{
MetadataPool: cephv1.NamedPoolSpec{
PoolSpec: cephv1.PoolSpec{
Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"),
Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata", "file"),
FailureDomain: initStorageCluster.Status.FailureDomain,
}},
MetadataServer: cephv1.MetadataServerSpec{
@@ -288,7 +288,7 @@ func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
return cephv1.PoolSpec{
DeviceClass: sc.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
Replicated: generateCephReplicatedSpec(sc, "data"),
Replicated: generateCephReplicatedSpec(sc, "data", "file"),
FailureDomain: sc.Status.FailureDomain,
}
}
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
@@ -172,13 +172,13 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "data"),
Replicated: generateCephReplicatedSpec(initData, "data", "object"),
},
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "metadata"),
Replicated: generateCephReplicatedSpec(initData, "metadata", "object"),
},
Gateway: cephv1.GatewaySpec{
Port: 80,
26 changes: 22 additions & 4 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
@@ -136,14 +136,32 @@ func generateNameForCephRbdMirror(initData *ocsv1.StorageCluster) string {

// generateCephReplicatedSpec returns the ReplicatedSpec for the cephCluster
// based on the StorageCluster configuration
func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string) cephv1.ReplicatedSpec {
func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string, storageType string) cephv1.ReplicatedSpec {
crs := cephv1.ReplicatedSpec{}

crs.Size = getCephPoolReplicatedSize(initData)
crs.ReplicasPerFailureDomain = uint(getReplicasPerFailureDomain(initData))
//lint:ignore ST1017 required to compare it directly
if "data" == poolType {
crs.TargetSizeRatio = .49
if poolType == "data" {
defaultTargetSizeRatio := 0.49
var definedSize float64

switch storageType {
case "block":
definedSize = initData.Spec.ManagedResources.CephBlockPools.PoolSpec.Replicated.TargetSizeRatio
case "file":
definedSize = initData.Spec.ManagedResources.CephFilesystems.DataPoolSpec.Replicated.TargetSizeRatio
case "object":
definedSize = initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec.Replicated.TargetSizeRatio
default:
// Handle unexpected storageType
definedSize = defaultTargetSizeRatio
}

if definedSize != 0.0 {
crs.TargetSizeRatio = definedSize
} else {
crs.TargetSizeRatio = defaultTargetSizeRatio
}
}

return crs

0 comments on commit d1fe711

Please sign in to comment.