Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Use bulk flag for all odf data pools for performance gain
Browse files Browse the repository at this point in the history
The bulk flag makes the autoscaler start with the max number of PGs,
and the autoscaler then decreases the pg count only if the PG usage
starts to skew too much. This could improve performance for ODF users
due to great amount of parallelism during read/write on large clusters.

Signed-off-by: Malay Kumar Parida <mparida@redhat.com>
malayparida2000 committed Jan 28, 2025
1 parent edb31a8 commit bd84812
Showing 5 changed files with 18 additions and 0 deletions.
3 changes: 3 additions & 0 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
@@ -93,6 +93,7 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler,
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{"bulk": "true"}

// Since provider mode handles mirroring, we only need to handle for internal mode
if storageCluster.Annotations["ocs.openshift.io/deployment-mode"] != "provider" {
@@ -201,6 +202,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{"bulk": "true"}
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
@@ -252,6 +254,7 @@ func (o *ocsCephBlockPools) reconcileNonResilientCephBlockPool(r *StorageCluster
cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{
"pg_num": "16",
"pgp_num": "16",
"bulk": "true",
}
cephBlockPool.Spec.PoolSpec.Replicated = cephv1.ReplicatedSpec{
Size: 1,
2 changes: 2 additions & 0 deletions controllers/storagecluster/cephblockpools_test.go
Original file line number Diff line number Diff line change
@@ -159,6 +159,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
EnableRBDStats: true,
Parameters: map[string]string{"bulk": "true"},
},
},
}
@@ -206,6 +207,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
EnableRBDStats: true,
Parameters: map[string]string{"bulk": "true"},
},
Name: ".nfs",
},
5 changes: 5 additions & 0 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
@@ -80,6 +80,10 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
if pool.PoolSpec.FailureDomain == "" {
pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain
}
// Set default parameters if not specified
if pool.PoolSpec.Parameters == nil {
pool.PoolSpec.Parameters = defaultPoolSpec.Parameters
}
}

// set device class for metadata pool from the default data pool
@@ -290,5 +294,6 @@ func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
EnableCrushUpdates: true,
Replicated: generateCephReplicatedSpec(sc, "data"),
FailureDomain: sc.Status.FailureDomain,
Parameters: map[string]string{"bulk": "true"},
}
}
7 changes: 7 additions & 0 deletions controllers/storagecluster/cephfilesystem_test.go
Original file line number Diff line number Diff line change
@@ -182,6 +182,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
},
@@ -222,6 +223,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
},
@@ -261,6 +263,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
EnableCrushUpdates: true,
Replicated: defaultPoolSpec.Replicated,
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
{
@@ -270,6 +273,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
EnableCrushUpdates: true,
Replicated: defaultPoolSpec.Replicated,
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
},
@@ -323,6 +327,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
{
@@ -336,6 +341,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
{
@@ -349,6 +355,7 @@ func TestCephFileSystemDataPools(t *testing.T) {
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
Parameters: defaultPoolSpec.Parameters,
},
},
},
1 change: 1 addition & 0 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
@@ -173,6 +173,7 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "data"),
Parameters: map[string]string{"bulk": "true"},
},
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,

0 comments on commit bd84812

Please sign in to comment.