From bd8481287e65b5c2c0bdca8e79fbe5d6802a1d13 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Tue, 28 Jan 2025 23:09:12 +0530 Subject: [PATCH] Use bulk flag for all odf data pools for performance gain The bulk flag makes the autoscaler start with the max number of PGs, and the autoscaler then decreases the pg count only if the PG usage starts to skew too much. This could improve performance for ODF users due to great amount of parallelism during read/write on large clusters. Signed-off-by: Malay Kumar Parida --- controllers/storagecluster/cephblockpools.go | 3 +++ controllers/storagecluster/cephblockpools_test.go | 2 ++ controllers/storagecluster/cephfilesystem.go | 5 +++++ controllers/storagecluster/cephfilesystem_test.go | 7 +++++++ controllers/storagecluster/cephobjectstores.go | 1 + 5 files changed, 18 insertions(+) diff --git a/controllers/storagecluster/cephblockpools.go b/controllers/storagecluster/cephblockpools.go index 066e79684f..82135d647d 100644 --- a/controllers/storagecluster/cephblockpools.go +++ b/controllers/storagecluster/cephblockpools.go @@ -93,6 +93,7 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler, cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") cephBlockPool.Spec.PoolSpec.EnableRBDStats = true + cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{"bulk": "true"} // Since provider mode handles mirroring, we only need to handle for internal mode if storageCluster.Annotations["ocs.openshift.io/deployment-mode"] != "provider" { @@ -201,6 +202,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster) cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data") cephBlockPool.Spec.PoolSpec.EnableRBDStats = true + cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{"bulk": "true"} util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true") return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme) @@ -252,6 +254,7 @@ func (o *ocsCephBlockPools) reconcileNonResilientCephBlockPool(r *StorageCluster cephBlockPool.Spec.PoolSpec.Parameters = map[string]string{ "pg_num": "16", "pgp_num": "16", + "bulk": "true", } cephBlockPool.Spec.PoolSpec.Replicated = cephv1.ReplicatedSpec{ Size: 1, diff --git a/controllers/storagecluster/cephblockpools_test.go b/controllers/storagecluster/cephblockpools_test.go index b24d2908a0..89bf0929a0 100644 --- a/controllers/storagecluster/cephblockpools_test.go +++ b/controllers/storagecluster/cephblockpools_test.go @@ -159,6 +159,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr FailureDomain: getFailureDomain(cr), Replicated: generateCephReplicatedSpec(cr, "data"), EnableRBDStats: true, + Parameters: map[string]string{"bulk": "true"}, }, }, } @@ -206,6 +207,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c FailureDomain: getFailureDomain(cr), Replicated: generateCephReplicatedSpec(cr, "data"), EnableRBDStats: true, + Parameters: map[string]string{"bulk": "true"}, }, Name: ".nfs", }, diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index 5c451c47b0..d3a9f11da0 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -80,6 +80,10 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster if pool.PoolSpec.FailureDomain == "" { pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain } + // Set default parameters if not specified + if pool.PoolSpec.Parameters == nil { + pool.PoolSpec.Parameters = defaultPoolSpec.Parameters + } } // set device class for metadata pool from the default data pool @@ -290,5 +294,6 @@ func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec { EnableCrushUpdates: true, Replicated: generateCephReplicatedSpec(sc, "data"), FailureDomain: sc.Status.FailureDomain, + Parameters: map[string]string{"bulk": "true"}, } } diff --git a/controllers/storagecluster/cephfilesystem_test.go b/controllers/storagecluster/cephfilesystem_test.go index 41b48e96dc..a8dba684d8 100644 --- a/controllers/storagecluster/cephfilesystem_test.go +++ b/controllers/storagecluster/cephfilesystem_test.go @@ -182,6 +182,7 @@ func TestCephFileSystemDataPools(t *testing.T) { ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, }, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, }, @@ -222,6 +223,7 @@ func TestCephFileSystemDataPools(t *testing.T) { ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, }, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, }, @@ -261,6 +263,7 @@ func TestCephFileSystemDataPools(t *testing.T) { EnableCrushUpdates: true, Replicated: defaultPoolSpec.Replicated, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, { @@ -270,6 +273,7 @@ func TestCephFileSystemDataPools(t *testing.T) { EnableCrushUpdates: true, Replicated: defaultPoolSpec.Replicated, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, }, @@ -323,6 +327,7 @@ func TestCephFileSystemDataPools(t *testing.T) { ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, }, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, { @@ -336,6 +341,7 @@ func TestCephFileSystemDataPools(t *testing.T) { ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, }, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, { @@ -349,6 +355,7 @@ func TestCephFileSystemDataPools(t *testing.T) { ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, }, FailureDomain: defaultPoolSpec.FailureDomain, + Parameters: defaultPoolSpec.Parameters, }, }, }, diff --git a/controllers/storagecluster/cephobjectstores.go b/controllers/storagecluster/cephobjectstores.go index b63b24a296..2551efdcc0 100644 --- a/controllers/storagecluster/cephobjectstores.go +++ b/controllers/storagecluster/cephobjectstores.go @@ -173,6 +173,7 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S EnableCrushUpdates: true, FailureDomain: initData.Status.FailureDomain, Replicated: generateCephReplicatedSpec(initData, "data"), + Parameters: map[string]string{"bulk": "true"}, }, MetadataPool: cephv1.PoolSpec{ DeviceClass: initData.Status.DefaultCephDeviceClass,