Skip to content

Commit

Permalink
Set values in ocs-operator-config CM for replica-1 in external mode
Browse files Browse the repository at this point in the history
In external mode the replica-1 feature is not enabled via the
storageCluster CR. So detect the feature is enabled by checking if the
replica-1 storageClass exists. In external mode the failure domain is
not set in the storageCluster CR, so determine the failure domain key
by using the parameter of the storageClass. Also add a watch for
storageClass in the ocsinitialization controller to detect the
storageClass creation. Accordingly set the values in the CM.

Signed-off-by: Malay Kumar Parida <[email protected]>
  • Loading branch information
malayparida2000 committed Mar 20, 2024
1 parent 16f3414 commit bf526b4
Show file tree
Hide file tree
Showing 4 changed files with 67 additions and 12 deletions.
51 changes: 47 additions & 4 deletions controllers/ocsinitialization/ocsinitialization_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -208,6 +209,18 @@ func (r *OCSInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error {
},
),
).
// Watcher for storageClass required to update values related to replica-1
// in ocs-operator-config configmap, if storageClass changes
Watches(
&storagev1.StorageClass{},
handler.EnqueueRequestsFromMapFunc(
func(context context.Context, obj client.Object) []reconcile.Request {
return []reconcile.Request{{
NamespacedName: InitNamespacedName(),
}}
},
),
).
// Watcher for rook-ceph-operator-config cm
Watches(
&corev1.ConfigMap{
Expand Down Expand Up @@ -391,22 +404,39 @@ func (r *OCSInitializationReconciler) ensureOcsOperatorConfigExists(initialData

func (r *OCSInitializationReconciler) getEnableTopologyKeyValue() string {

// return true even if one of the storagecluster has enabled it
for _, sc := range r.clusters.GetStorageClusters() {
if sc.Spec.ManagedResources.CephNonResilientPools.Enable {
if !sc.Spec.ExternalStorage.Enable && sc.Spec.ManagedResources.CephNonResilientPools.Enable {
// In internal mode return true even if one of the storageCluster has enabled it via the CR
return "true"
} else if sc.Spec.ExternalStorage.Enable {
// In external mode, check if the non-resilient storageClass exists
scName := util.GenerateNameForNonResilientCephBlockPoolSC(&sc)
storageClass := util.GetStorageClassWithName(r.ctx, r.Client, scName)
if storageClass != nil {
return "true"
}
}
}

return "false"
}

// In case of multiple storageClusters when replica-1 is enabled for both an internal and an external cluster, different failure domain keys can lead to complications.
// To prevent this, when gathering information for the external cluster, ensure that the failure domain is specified to match that of the internal cluster (sc.Status.FailureDomain).
func (r *OCSInitializationReconciler) getTopologyDomainLabelsKeyValue() string {

// return value from the internal storagecluster as failureDomain is set only in internal cluster
for _, sc := range r.clusters.GetStorageClusters() {
if !sc.Spec.ExternalStorage.Enable && sc.Status.FailureDomainKey != "" {
if !sc.Spec.ExternalStorage.Enable && sc.Spec.ManagedResources.CephNonResilientPools.Enable {
// In internal mode return the failure domain key directly from the storageCluster
return sc.Status.FailureDomainKey
} else if sc.Spec.ExternalStorage.Enable {
// In external mode, check if the non-resilient storageClass exists
// determine the failure domain key from the storageClass parameter
scName := util.GenerateNameForNonResilientCephBlockPoolSC(&sc)
storageClass := util.GetStorageClassWithName(r.ctx, r.Client, scName)
if storageClass != nil {
return getFailureDomainKeyFromStorageClassParameter(storageClass)
}
}
}

Expand All @@ -425,6 +455,19 @@ func (r *OCSInitializationReconciler) getEnableNFSKeyValue() string {
return "false"
}

func getFailureDomainKeyFromStorageClassParameter(sc *storagev1.StorageClass) string {
failuredomain := sc.Parameters["topologyFailureDomainLabel"]
if failuredomain == "zone" {
return "topology.kubernetes.io/zone"
} else if failuredomain == "rack" {
return "topology.rook.io/rack"
} else if failuredomain == "hostname" || failuredomain == "host" {
return "kubernetes.io/hostname"
} else {
return ""
}
}

func (r *OCSInitializationReconciler) reconcileUXBackendSecret(initialData *ocsv1.OCSInitialization) error {

var err error
Expand Down
7 changes: 0 additions & 7 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,13 +88,6 @@ func generateNameForCephBlockPoolVirtualizationSC(initData *ocsv1.StorageCluster
return fmt.Sprintf("%s-ceph-rbd-virtualization", initData.Name)
}

func generateNameForNonResilientCephBlockPoolSC(initData *ocsv1.StorageCluster) string {
if initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName != "" {
return initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName
}
return fmt.Sprintf("%s-ceph-non-resilient-rbd", initData.Name)
}

func generateNameForEncryptedCephBlockPoolSC(initData *ocsv1.StorageCluster) string {
if initData.Spec.Encryption.StorageClassName != "" {
return initData.Spec.Encryption.StorageClassName
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/storageclasses.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ func newNonResilientCephBlockPoolStorageClassConfiguration(initData *ocsv1.Stora
return StorageClassConfiguration{
storageClass: &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: generateNameForNonResilientCephBlockPoolSC(initData),
Name: util.GenerateNameForNonResilientCephBlockPoolSC(initData),
Annotations: map[string]string{
"description": "Ceph Non Resilient Pools : Provides RWO Filesystem volumes, and RWO and RWX Block volumes",
},
Expand Down
19 changes: 19 additions & 0 deletions controllers/util/k8sutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@ import (

"github.com/go-logr/logr"
configv1 "github.com/openshift/api/config/v1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -107,6 +109,16 @@ func GetPodsWithLabels(ctx context.Context, kubeClient client.Client, namespace
return podList, nil
}

// GetStorageClassWithName returns the storage class object by name
func GetStorageClassWithName(ctx context.Context, kubeClient client.Client, name string) *storagev1.StorageClass {
sc := &storagev1.StorageClass{}
err := kubeClient.Get(ctx, types.NamespacedName{Name: name}, sc)
if err != nil {
return nil
}
return sc
}

// getCountOfRunningPods gives the count of pods in running state in a given pod list
func GetCountOfRunningPods(podList *corev1.PodList) int {
count := 0
Expand All @@ -117,3 +129,10 @@ func GetCountOfRunningPods(podList *corev1.PodList) int {
}
return count
}

func GenerateNameForNonResilientCephBlockPoolSC(initData *ocsv1.StorageCluster) string {
if initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName != "" {
return initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName
}
return fmt.Sprintf("%s-ceph-non-resilient-rbd", initData.Name)
}

0 comments on commit bf526b4

Please sign in to comment.