Skip to content

Commit

Permalink
Use bulk flag for all odf data pools for performance gain
Browse files Browse the repository at this point in the history
The bulk flag makes the autoscaler start with the max number of PGs,
and the autoscaler then decreases the pg count only if the PG usage
starts to skew too much. This could improve performance for ODF users
due to great amount of parallelism during read/write on large clusters.

Signed-off-by: Malay Kumar Parida <[email protected]>
  • Loading branch information
malayparida2000 committed Jan 28, 2025
1 parent edb31a8 commit b521570
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 0 deletions.
3 changes: 3 additions & 0 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler,
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{"bulk": "true"}

// Since provider mode handles mirroring, we only need to handle for internal mode
if storageCluster.Annotations["ocs.openshift.io/deployment-mode"] != "provider" {
Expand Down Expand Up @@ -201,6 +202,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{"bulk": "true"}
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
Expand Down Expand Up @@ -252,6 +254,7 @@ func (o *ocsCephBlockPools) reconcileNonResilientCephBlockPool(r *StorageCluster
cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{
"pg_num": "16",
"pgp_num": "16",
"bulk": "true",
}
cephBlockPool.Spec.PoolSpec.Replicated = cephv1.ReplicatedSpec{
Size: 1,
Expand Down
1 change: 1 addition & 0 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -290,5 +290,6 @@ func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
EnableCrushUpdates: true,
Replicated: generateCephReplicatedSpec(sc, "data"),
FailureDomain: sc.Status.FailureDomain,
Parameters: map[string]string{"bulk": "true"},
}
}
1 change: 1 addition & 0 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "data"),
Parameters: map[string]string{"bulk": "true"},
},
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
Expand Down

0 comments on commit b521570

Please sign in to comment.