Skip to content

Commit

Permalink
Enable specifying target size ratio for block, file and object pools
Browse files Browse the repository at this point in the history
Signed-off-by: Malay Kumar Parida <[email protected]>
  • Loading branch information
malayparida2000 committed Jan 28, 2025
1 parent bc1743a commit 926cd1f
Show file tree
Hide file tree
Showing 5 changed files with 31 additions and 13 deletions.
6 changes: 3 additions & 3 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler,
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data", "block")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true

// Since provider mode handles mirroring, we only need to handle for internal mode
Expand Down Expand Up @@ -151,7 +151,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata", "block")
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
Expand Down Expand Up @@ -199,7 +199,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data", "nfs-block")
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephblockpools_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, "data", "block"),
EnableRBDStats: true,
},
},
Expand Down Expand Up @@ -204,7 +204,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, "data", "nfs-block"),
EnableRBDStats: true,
},
Name: ".nfs",
Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
Spec: cephv1.FilesystemSpec{
MetadataPool: cephv1.NamedPoolSpec{
PoolSpec: cephv1.PoolSpec{
Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"),
Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata", "file"),
FailureDomain: initStorageCluster.Status.FailureDomain,
}},
MetadataServer: cephv1.MetadataServerSpec{
Expand Down Expand Up @@ -288,7 +288,7 @@ func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
return cephv1.PoolSpec{
DeviceClass: sc.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
Replicated: generateCephReplicatedSpec(sc, "data"),
Replicated: generateCephReplicatedSpec(sc, "data", "file"),
FailureDomain: sc.Status.FailureDomain,
}
}
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,13 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "data"),
Replicated: generateCephReplicatedSpec(initData, "data", "object"),
},
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "metadata"),
Replicated: generateCephReplicatedSpec(initData, "metadata", "object"),
},
Gateway: cephv1.GatewaySpec{
Port: 80,
Expand Down
26 changes: 22 additions & 4 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,14 +136,32 @@ func generateNameForCephRbdMirror(initData *ocsv1.StorageCluster) string {

// generateCephReplicatedSpec returns the ReplicatedSpec for the cephCluster
// based on the StorageCluster configuration
func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string) cephv1.ReplicatedSpec {
func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string, storageType string) cephv1.ReplicatedSpec {
crs := cephv1.ReplicatedSpec{}

crs.Size = getCephPoolReplicatedSize(initData)
crs.ReplicasPerFailureDomain = uint(getReplicasPerFailureDomain(initData))
//lint:ignore ST1017 required to compare it directly
if "data" == poolType {
crs.TargetSizeRatio = .49
if poolType == "data" {
defaultTargetSizeRatio := 0.49
var definedSize float64

switch storageType {
case "block":
definedSize = initData.Spec.ManagedResources.CephBlockPools.PoolSpec.Replicated.TargetSizeRatio
case "file":
definedSize = initData.Spec.ManagedResources.CephFilesystems.DataPoolSpec.Replicated.TargetSizeRatio
case "object":
definedSize = initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec.Replicated.TargetSizeRatio
default:
// Handle unexpected storageType
definedSize = defaultTargetSizeRatio
}

if definedSize != 0.0 {
crs.TargetSizeRatio = definedSize
} else {
crs.TargetSizeRatio = defaultTargetSizeRatio
}
}

return crs
Expand Down

0 comments on commit 926cd1f

Please sign in to comment.