From e6c698a0cbb327d3edb431f12373149010834cbd Mon Sep 17 00:00:00 2001 From: Rewant Soni Date: Mon, 10 Feb 2025 20:14:32 +0530 Subject: [PATCH 1/2] provider: update the storageID for ramen update ramen storageID to be: for rbd: combination(cephCluster_FSID,blockpool_id,rns_name) for cephfs: combination(cephCluster_FSID,filesystem_name,svg_name) Signed-off-by: Rewant Soni --- controllers/util/k8sutil.go | 15 +++++++++++ services/provider/server/server.go | 35 +++++++++++++++++++++++-- services/provider/server/server_test.go | 35 ++++++++++++++++++------- 3 files changed, 73 insertions(+), 12 deletions(-) diff --git a/controllers/util/k8sutil.go b/controllers/util/k8sutil.go index fb4f5921f4..9f001b5158 100644 --- a/controllers/util/k8sutil.go +++ b/controllers/util/k8sutil.go @@ -11,6 +11,7 @@ import ( "github.com/go-logr/logr" configv1 "github.com/openshift/api/config/v1" + rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -203,6 +204,20 @@ func GetStorageClusterInNamespace(ctx context.Context, cl client.Client, namespa return &storageClusterList.Items[0], nil } +func GetCephClusterInNamespace(ctx context.Context, cl client.Client, namespace string) (*rookCephv1.CephCluster, error) { + cephClusterList := &rookCephv1.CephClusterList{} + err := cl.List(ctx, cephClusterList, client.InNamespace(namespace), client.Limit(1)) + if err != nil { + return nil, fmt.Errorf("unable to list cephCluster(s) in namespace %s: %v", namespace, err) + } + + if len(cephClusterList.Items) == 0 { + return nil, fmt.Errorf("no cephCluster found in namespace %s", namespace) + } + + return &cephClusterList.Items[0], nil +} + func NewK8sClient(scheme *runtime.Scheme) (client.Client, error) { klog.Info("Setting up k8s client") diff --git a/services/provider/server/server.go b/services/provider/server/server.go index d0c86c606b..a6c5ea3713 100644 --- a/services/provider/server/server.go +++ b/services/provider/server/server.go @@ -741,8 +741,12 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S var extR []*pb.ExternalResource storageRequestHash := getStorageRequestHash(req.StorageConsumerUUID, req.StorageClaimName) - // SID for RamenDR - storageID := storageRequestHash + + cephCluster, err := util.GetCephClusterInNamespace(ctx, s.client, s.namespace) + if err != nil { + return nil, err + } + replicationID := req.StorageClaimName for _, cephRes := range storageRequest.Status.CephResources { @@ -799,6 +803,18 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S rbdStorageClassData["encryptionKMSID"] = storageRequest.Spec.EncryptionMethod } + blockPool := &rookCephv1.CephBlockPool{} + err = s.client.Get(ctx, types.NamespacedName{Name: rns.Spec.BlockPoolName, Namespace: s.namespace}, blockPool) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get %s CephBlockPool. %v", blockPool.Name, err) + } + + // SID for RamenDR + storageID := calculateCephRbdStorageId( + cephCluster.Status.CephStatus.FSID, + strconv.Itoa(blockPool.Status.PoolID), + rns.Name) + extR = append(extR, &pb.ExternalResource{ Name: "ceph-rbd", @@ -915,6 +931,13 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S kernelMountOptions[parts[0]] = parts[1] } + // SID for RamenDR + storageID := calculateCephFsStorageId( + cephCluster.Status.CephStatus.FSID, + subVolumeGroup.Spec.FilesystemName, + subVolumeGroup.Name, + ) + extR = append(extR, &pb.ExternalResource{ Name: "cephfs", @@ -1287,3 +1310,11 @@ func (s *OCSProviderServer) isConsumerMirrorEnabled(ctx context.Context, consume return clientMappingConfig.Data[consumer.Status.Client.ID] != "", nil } + +func calculateCephRbdStorageId(cephfsid, poolID, radosnamespacename string) string { + return util.CalculateMD5Hash([3]string{cephfsid, poolID, radosnamespacename}) +} + +func calculateCephFsStorageId(cephfsid, fileSystemName, subVolumeGroupName string) string { + return util.CalculateMD5Hash([3]string{cephfsid, fileSystemName, subVolumeGroupName}) +} diff --git a/services/provider/server/server_test.go b/services/provider/server/server_test.go index 5863190771..449e09cd5d 100644 --- a/services/provider/server/server_test.go +++ b/services/provider/server/server_test.go @@ -626,7 +626,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "ceph-rbd", Kind: "StorageClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", }, Data: map[string]string{ "clusterID": serverNamespace, @@ -644,8 +644,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "ceph-rbd", Kind: "VolumeSnapshotClass", Labels: map[string]string{ - - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", }, Data: map[string]string{ "csi.storage.k8s.io/snapshotter-secret-name": "ceph-client-provisioner-8d40b6be71600457b5dec219d2ce2d4c", @@ -655,7 +654,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "block-pool-claim-groupsnapclass", Kind: "VolumeGroupSnapshotClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", }, Data: map[string]string{ "csi.storage.k8s.io/group-snapshotter-secret-name": "ceph-client-provisioner-8d40b6be71600457b5dec219d2ce2d4c", @@ -668,7 +667,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Kind: "VolumeReplicationClass", Labels: map[string]string{ "ramendr.openshift.io/replicationid": "block-pool-claim", - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", "ramendr.openshift.io/maintenancemodes": "Failover", }, Annotations: map[string]string{ @@ -690,7 +689,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Labels: map[string]string{ "replication.storage.openshift.io/flatten-mode": "force", "ramendr.openshift.io/replicationid": "block-pool-claim", - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", "ramendr.openshift.io/maintenancemodes": "Failover", }, Data: &replicationv1alpha1.VolumeReplicationClassSpec{ @@ -736,7 +735,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "cephfs", Kind: "StorageClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c", + "ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5", }, Data: map[string]string{ "clusterID": "8d26c7378c1b0ec9c2455d1c3601c4cd", @@ -752,7 +751,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "cephfs", Kind: "VolumeSnapshotClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c", + "ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5", }, Data: map[string]string{ "csi.storage.k8s.io/snapshotter-secret-name": "ceph-client-provisioner-0e8555e6556f70d23a61675af44e880c", @@ -762,7 +761,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "shared-filesystem-claim-groupsnapclass", Kind: "VolumeGroupSnapshotClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c", + "ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5", }, Data: map[string]string{ "csi.storage.k8s.io/group-snapshotter-secret-name": "ceph-client-provisioner-0e8555e6556f70d23a61675af44e880c", @@ -909,6 +908,17 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { AllowRemoteStorageConsumers: true, }, } + cephCluster = &rookCephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mock-storage-cluster-cephcluster", + Namespace: serverNamespace, + }, + Status: rookCephv1.ClusterStatus{ + CephStatus: &rookCephv1.CephStatus{ + FSID: "my-fsid", + }, + }, + } ) ctx := context.TODO() @@ -920,6 +930,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { claimResourceCreating, claimResourceFailed, storageClustersResource, + cephCluster, } // Create a fake client to mock API calls. @@ -1091,7 +1102,11 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Spec: rookCephv1.NamedBlockPoolSpec{ PoolSpec: rookCephv1.PoolSpec{ Mirroring: rookCephv1.MirroringSpec{Enabled: false}, - }}, + }, + }, + Status: &rookCephv1.CephBlockPoolStatus{ + PoolID: 1, + }, } assert.NoError(t, client.Create(ctx, cephBlockPool)) From fb0ba8cc5677e88319989c6c769bab065b31ac04 Mon Sep 17 00:00:00 2001 From: Rewant Soni Date: Mon, 10 Feb 2025 20:15:29 +0530 Subject: [PATCH 2/2] add generated changes Signed-off-by: Rewant Soni --- .../ocs-operator/v4/controllers/util/k8sutil.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go b/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go index fb4f5921f4..9f001b5158 100644 --- a/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go +++ b/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go @@ -11,6 +11,7 @@ import ( "github.com/go-logr/logr" configv1 "github.com/openshift/api/config/v1" + rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -203,6 +204,20 @@ func GetStorageClusterInNamespace(ctx context.Context, cl client.Client, namespa return &storageClusterList.Items[0], nil } +func GetCephClusterInNamespace(ctx context.Context, cl client.Client, namespace string) (*rookCephv1.CephCluster, error) { + cephClusterList := &rookCephv1.CephClusterList{} + err := cl.List(ctx, cephClusterList, client.InNamespace(namespace), client.Limit(1)) + if err != nil { + return nil, fmt.Errorf("unable to list cephCluster(s) in namespace %s: %v", namespace, err) + } + + if len(cephClusterList.Items) == 0 { + return nil, fmt.Errorf("no cephCluster found in namespace %s", namespace) + } + + return &cephClusterList.Items[0], nil +} + func NewK8sClient(scheme *runtime.Scheme) (client.Client, error) { klog.Info("Setting up k8s client")