Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Set values in ocs-operator-config CM for replica-1 in external mode #2516

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 52 additions & 4 deletions controllers/ocsinitialization/ocsinitialization_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -208,6 +209,23 @@ func (r *OCSInitializationReconciler) SetupWithManager(mgr ctrl.Manager) error {
},
),
).
// Watcher for storageClass required to update values related to replica-1
// in ocs-operator-config configmap, if storageClass changes
Watches(
&storagev1.StorageClass{},
handler.EnqueueRequestsFromMapFunc(
func(context context.Context, obj client.Object) []reconcile.Request {
// Only reconcile if the storageClass has topologyConstrainedPools set
sc := obj.(*storagev1.StorageClass)
if sc.Parameters["topologyConstrainedPools"] != "" {
return []reconcile.Request{{
NamespacedName: InitNamespacedName(),
}}
}
return []reconcile.Request{}
},
),
).
// Watcher for rook-ceph-operator-config cm
Watches(
&corev1.ConfigMap{
Expand Down Expand Up @@ -391,22 +409,39 @@ func (r *OCSInitializationReconciler) ensureOcsOperatorConfigExists(initialData

func (r *OCSInitializationReconciler) getEnableTopologyKeyValue() string {

// return true even if one of the storagecluster has enabled it
for _, sc := range r.clusters.GetStorageClusters() {
if sc.Spec.ManagedResources.CephNonResilientPools.Enable {
if !sc.Spec.ExternalStorage.Enable && sc.Spec.ManagedResources.CephNonResilientPools.Enable {
// In internal mode return true even if one of the storageCluster has enabled it via the CR
return "true"
} else if sc.Spec.ExternalStorage.Enable {
// In external mode, check if the non-resilient storageClass exists
scName := util.GenerateNameForNonResilientCephBlockPoolSC(&sc)
storageClass := util.GetStorageClassWithName(r.ctx, r.Client, scName)
if storageClass != nil {
return "true"
}
}
}

return "false"
}

// In case of multiple storageClusters when replica-1 is enabled for both an internal and an external cluster, different failure domain keys can lead to complications.
// To prevent this, when gathering information for the external cluster, ensure that the failure domain is specified to match that of the internal cluster (sc.Status.FailureDomain).
func (r *OCSInitializationReconciler) getTopologyDomainLabelsKeyValue() string {

// return value from the internal storagecluster as failureDomain is set only in internal cluster
for _, sc := range r.clusters.GetStorageClusters() {
if !sc.Spec.ExternalStorage.Enable && sc.Status.FailureDomainKey != "" {
if !sc.Spec.ExternalStorage.Enable && sc.Spec.ManagedResources.CephNonResilientPools.Enable {
malayparida2000 marked this conversation as resolved.
Show resolved Hide resolved
// In internal mode return the failure domain key directly from the storageCluster
return sc.Status.FailureDomainKey
} else if sc.Spec.ExternalStorage.Enable {
// In external mode, check if the non-resilient storageClass exists
// determine the failure domain key from the storageClass parameter
scName := util.GenerateNameForNonResilientCephBlockPoolSC(&sc)
storageClass := util.GetStorageClassWithName(r.ctx, r.Client, scName)
if storageClass != nil {
return getFailureDomainKeyFromStorageClassParameter(storageClass)
}
}
}

Expand All @@ -425,6 +460,19 @@ func (r *OCSInitializationReconciler) getEnableNFSKeyValue() string {
return "false"
}

func getFailureDomainKeyFromStorageClassParameter(sc *storagev1.StorageClass) string {
failuredomain := sc.Parameters["topologyFailureDomainLabel"]
if failuredomain == "zone" {
return "topology.kubernetes.io/zone"
} else if failuredomain == "rack" {
return "topology.rook.io/rack"
} else if failuredomain == "hostname" || failuredomain == "host" {
return "kubernetes.io/hostname"
} else {
return ""
}
}

func (r *OCSInitializationReconciler) reconcileUXBackendSecret(initialData *ocsv1.OCSInitialization) error {

var err error
Expand Down
7 changes: 0 additions & 7 deletions controllers/storagecluster/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,13 +88,6 @@ func generateNameForCephBlockPoolVirtualizationSC(initData *ocsv1.StorageCluster
return fmt.Sprintf("%s-ceph-rbd-virtualization", initData.Name)
}

func generateNameForNonResilientCephBlockPoolSC(initData *ocsv1.StorageCluster) string {
if initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName != "" {
return initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName
}
return fmt.Sprintf("%s-ceph-non-resilient-rbd", initData.Name)
}

func generateNameForEncryptedCephBlockPoolSC(initData *ocsv1.StorageCluster) string {
if initData.Spec.Encryption.StorageClassName != "" {
return initData.Spec.Encryption.StorageClassName
Expand Down
2 changes: 1 addition & 1 deletion controllers/storagecluster/storageclasses.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ func newNonResilientCephBlockPoolStorageClassConfiguration(initData *ocsv1.Stora
return StorageClassConfiguration{
storageClass: &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: generateNameForNonResilientCephBlockPoolSC(initData),
Name: util.GenerateNameForNonResilientCephBlockPoolSC(initData),
Annotations: map[string]string{
"description": "Ceph Non Resilient Pools : Provides RWO Filesystem volumes, and RWO and RWX Block volumes",
},
Expand Down
19 changes: 19 additions & 0 deletions controllers/util/k8sutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@ import (

"github.com/go-logr/logr"
configv1 "github.com/openshift/api/config/v1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -110,6 +112,16 @@ func GetPodsWithLabels(ctx context.Context, kubeClient client.Client, namespace
return podList, nil
}

// GetStorageClassWithName returns the storage class object by name
func GetStorageClassWithName(ctx context.Context, kubeClient client.Client, name string) *storagev1.StorageClass {
sc := &storagev1.StorageClass{}
err := kubeClient.Get(ctx, types.NamespacedName{Name: name}, sc)
if err != nil {
return nil
}
return sc
}

// getCountOfRunningPods gives the count of pods in running state in a given pod list
func GetCountOfRunningPods(podList *corev1.PodList) int {
count := 0
Expand All @@ -129,3 +141,10 @@ func OwnersIndexFieldFunc(obj client.Object) []string {
}
return owners
}

func GenerateNameForNonResilientCephBlockPoolSC(initData *ocsv1.StorageCluster) string {
if initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName != "" {
return initData.Spec.ManagedResources.CephNonResilientPools.StorageClassName
}
return fmt.Sprintf("%s-ceph-non-resilient-rbd", initData.Name)
}
Loading