Skip to content

Commit

Permalink
Merge pull request #2979 from malayparida2000/target_size_ratio
Browse files Browse the repository at this point in the history
Allow specifying poolSpec for default block, file, object pools from storageCluster
  • Loading branch information
openshift-merge-bot[bot] authored Feb 14, 2025
2 parents 9edaa53 + 427385d commit e5f8d92
Show file tree
Hide file tree
Showing 16 changed files with 1,348 additions and 55 deletions.
4 changes: 4 additions & 0 deletions api/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,8 @@ type ManageCephBlockPools struct {
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
VirtualizationStorageClassName string `json:"virtualizationStorageClassName,omitempty"`
// PoolSpec specifies the pool specification for the default cephBlockPool
PoolSpec rookCephv1.PoolSpec `json:"poolSpec,omitempty"`
}

// ManageCephNonResilientPools defines how to reconcile ceph non-resilient pools
Expand Down Expand Up @@ -287,6 +289,8 @@ type ManageCephObjectStores struct {
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
StorageClassName string `json:"storageClassName,omitempty"`
// DataPoolSpec specifies the pool specification for the default cephObjectStore data pool
DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"`
}

// ManageCephObjectStoreUsers defines how to reconcile CephObjectStoreUsers
Expand Down
4 changes: 3 additions & 1 deletion api/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

424 changes: 424 additions & 0 deletions config/crd/bases/ocs.openshift.io_storageclusters.yaml

Large diffs are not rendered by default.

14 changes: 8 additions & 6 deletions controllers/storagecluster/cephblockpools.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,12 @@ func (o *ocsCephBlockPools) reconcileCephBlockPool(r *StorageClusterReconciler,
}

_, err = ctrl.CreateOrUpdate(r.ctx, r.Client, cephBlockPool, func() error {
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
// Pass the poolSpec from the storageCluster CR

cephBlockPool.Spec.PoolSpec = storageCluster.Spec.ManagedResources.CephBlockPools.PoolSpec

// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(&cephBlockPool.Spec.PoolSpec, storageCluster)
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true

// Since provider mode handles mirroring, we only need to handle for internal mode
Expand Down Expand Up @@ -151,7 +153,7 @@ func (o *ocsCephBlockPools) reconcileMgrCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.PoolSpec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "metadata")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

return controllerutil.SetControllerReference(storageCluster, cephBlockPool, r.Scheme)
Expand Down Expand Up @@ -199,7 +201,7 @@ func (o *ocsCephBlockPools) reconcileNFSCephBlockPool(r *StorageClusterReconcile
cephBlockPool.Spec.PoolSpec.DeviceClass = storageCluster.Status.DefaultCephDeviceClass
cephBlockPool.Spec.EnableCrushUpdates = true
cephBlockPool.Spec.PoolSpec.FailureDomain = getFailureDomain(storageCluster)
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, "data")
cephBlockPool.Spec.PoolSpec.Replicated = generateCephReplicatedSpec(storageCluster, poolTypeMetadata)
cephBlockPool.Spec.PoolSpec.EnableRBDStats = true
util.AddLabel(cephBlockPool, util.ForbidMirroringLabel, "true")

Expand Down
4 changes: 2 additions & 2 deletions controllers/storagecluster/cephblockpools_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func assertCephBlockPools(t *testing.T, reconciler StorageClusterReconciler, cr
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, poolTypeData),
EnableRBDStats: true,
},
},
Expand Down Expand Up @@ -204,7 +204,7 @@ func assertCephNFSBlockPool(t *testing.T, reconciler StorageClusterReconciler, c
DeviceClass: cr.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: getFailureDomain(cr),
Replicated: generateCephReplicatedSpec(cr, "data"),
Replicated: generateCephReplicatedSpec(cr, poolTypeMetadata),
EnableRBDStats: true,
},
Name: ".nfs",
Expand Down
28 changes: 28 additions & 0 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@ const (
diskSpeedFast diskSpeed = "fast"
)

const (
poolTypeData = "data"
poolTypeMetadata = "metadata"
)

type knownDiskType struct {
speed diskSpeed
provisioner StorageClassProvisionerType
Expand Down Expand Up @@ -1422,3 +1427,26 @@ func isEncrptionSettingUpdated(clusterWideEncrytion bool, existingDeviceSet []ro
}
return false
}

// setDefaultDataPoolSpec sets the common pool spec for all data pools as necessary
func setDefaultDataPoolSpec(poolSpec *rookCephv1.PoolSpec, sc *ocsv1.StorageCluster) {
poolSpec.EnableCrushUpdates = true
if poolSpec.DeviceClass == "" {
poolSpec.DeviceClass = sc.Status.DefaultCephDeviceClass
}
if poolSpec.FailureDomain == "" {
poolSpec.FailureDomain = getFailureDomain(sc)
}
// Set default replication settings if necessary
// Always set the default Size & ReplicasPerFailureDomain in arbiter mode
defaultReplicatedSpec := generateCephReplicatedSpec(sc, poolTypeData)
if poolSpec.Replicated.Size == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.Size = defaultReplicatedSpec.Size
}
if poolSpec.Replicated.ReplicasPerFailureDomain == 0 || arbiterEnabled(sc) {
poolSpec.Replicated.ReplicasPerFailureDomain = defaultReplicatedSpec.ReplicasPerFailureDomain
}
if poolSpec.Replicated.TargetSizeRatio == 0.0 {
poolSpec.Replicated.TargetSizeRatio = defaultReplicatedSpec.TargetSizeRatio
}
}
38 changes: 4 additions & 34 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
Spec: cephv1.FilesystemSpec{
MetadataPool: cephv1.NamedPoolSpec{
PoolSpec: cephv1.PoolSpec{
Replicated: generateCephReplicatedSpec(initStorageCluster, "metadata"),
Replicated: generateCephReplicatedSpec(initStorageCluster, poolTypeMetadata),
FailureDomain: initStorageCluster.Status.FailureDomain,
}},
MetadataServer: cephv1.MetadataServerSpec{
Expand All @@ -56,30 +56,10 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
// Append additional pools from specified additional data pools
ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...)

// Iterate over each pool and set default values if necessary
defaultPoolSpec := generateDefaultPoolSpec(initStorageCluster)
for i := range ret.Spec.DataPools {
pool := &ret.Spec.DataPools[i]
// Set default device class if not specified
if pool.PoolSpec.DeviceClass == "" {
pool.PoolSpec.DeviceClass = defaultPoolSpec.DeviceClass
}
// Set EnableCrushUpdates to always be true
pool.PoolSpec.EnableCrushUpdates = true
// Set default replication settings if not specified
if pool.PoolSpec.Replicated.Size == 0 {
pool.PoolSpec.Replicated.Size = defaultPoolSpec.Replicated.Size
}
if pool.PoolSpec.Replicated.ReplicasPerFailureDomain == 0 {
pool.PoolSpec.Replicated.ReplicasPerFailureDomain = defaultPoolSpec.Replicated.ReplicasPerFailureDomain
}
if pool.PoolSpec.Replicated.TargetSizeRatio == 0 {
pool.PoolSpec.Replicated.TargetSizeRatio = defaultPoolSpec.Replicated.TargetSizeRatio
}
// Set default failure domain if not specified
if pool.PoolSpec.FailureDomain == "" {
pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain
}
poolSpec := &ret.Spec.DataPools[i].PoolSpec
// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(poolSpec, initStorageCluster)
}

// set device class for metadata pool from the default data pool
Expand Down Expand Up @@ -282,13 +262,3 @@ func getActiveMetadataServers(sc *ocsv1.StorageCluster) int {

return defaults.CephFSActiveMetadataServers
}

// Define a function to generate default pool specifications
func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
return cephv1.PoolSpec{
DeviceClass: sc.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
Replicated: generateCephReplicatedSpec(sc, "data"),
FailureDomain: sc.Status.FailureDomain,
}
}
8 changes: 7 additions & 1 deletion controllers/storagecluster/cephfilesystem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,13 @@ func TestCephFileSystemDataPools(t *testing.T) {
mocksc := &api.StorageCluster{}
mockStorageCluster.DeepCopyInto(mocksc)
mocksc.Status.FailureDomain = "zone"
defaultPoolSpec := generateDefaultPoolSpec(mocksc)
defaultPoolSpec := cephv1.PoolSpec{
EnableCrushUpdates: true,
DeviceClass: mocksc.Status.DefaultCephDeviceClass,
FailureDomain: getFailureDomain(mocksc),
Replicated: generateCephReplicatedSpec(mocksc, poolTypeData),
}

var cases = []struct {
label string
sc *api.StorageCluster
Expand Down
12 changes: 5 additions & 7 deletions controllers/storagecluster/cephobjectstores.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,17 +168,12 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
},
Spec: cephv1.ObjectStoreSpec{
PreservePoolsOnDelete: false,
DataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "data"),
},
DataPool: initData.Spec.ManagedResources.CephObjectStores.DataPoolSpec, // Pass the poolSpec from the storageCluster CR
MetadataPool: cephv1.PoolSpec{
DeviceClass: initData.Status.DefaultCephDeviceClass,
EnableCrushUpdates: true,
FailureDomain: initData.Status.FailureDomain,
Replicated: generateCephReplicatedSpec(initData, "metadata"),
Replicated: generateCephReplicatedSpec(initData, poolTypeMetadata),
},
Gateway: cephv1.GatewaySpec{
Port: 80,
Expand Down Expand Up @@ -209,6 +204,9 @@ func (r *StorageClusterReconciler) newCephObjectStoreInstances(initData *ocsv1.S
obj.Spec.Gateway.HostNetwork = initData.Spec.ManagedResources.CephObjectStores.HostNetwork
}

// Set default values in the poolSpec as necessary
setDefaultDataPoolSpec(&obj.Spec.DataPool, initData)

// if kmsConfig is not 'nil', add the KMS details to ObjectStore spec
if kmsConfigMap != nil {

Expand Down
3 changes: 1 addition & 2 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,7 @@ func generateCephReplicatedSpec(initData *ocsv1.StorageCluster, poolType string)

crs.Size = getCephPoolReplicatedSize(initData)
crs.ReplicasPerFailureDomain = uint(getReplicasPerFailureDomain(initData))
//lint:ignore ST1017 required to compare it directly
if "data" == poolType {
if poolType == poolTypeData {
crs.TargetSizeRatio = .49
}

Expand Down
Loading

0 comments on commit e5f8d92

Please sign in to comment.