diff --git a/controllers/util/k8sutil.go b/controllers/util/k8sutil.go index fb4f5921f4..9f001b5158 100644 --- a/controllers/util/k8sutil.go +++ b/controllers/util/k8sutil.go @@ -11,6 +11,7 @@ import ( "github.com/go-logr/logr" configv1 "github.com/openshift/api/config/v1" + rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -203,6 +204,20 @@ func GetStorageClusterInNamespace(ctx context.Context, cl client.Client, namespa return &storageClusterList.Items[0], nil } +func GetCephClusterInNamespace(ctx context.Context, cl client.Client, namespace string) (*rookCephv1.CephCluster, error) { + cephClusterList := &rookCephv1.CephClusterList{} + err := cl.List(ctx, cephClusterList, client.InNamespace(namespace), client.Limit(1)) + if err != nil { + return nil, fmt.Errorf("unable to list cephCluster(s) in namespace %s: %v", namespace, err) + } + + if len(cephClusterList.Items) == 0 { + return nil, fmt.Errorf("no cephCluster found in namespace %s", namespace) + } + + return &cephClusterList.Items[0], nil +} + func NewK8sClient(scheme *runtime.Scheme) (client.Client, error) { klog.Info("Setting up k8s client") diff --git a/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go b/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go index fb4f5921f4..9f001b5158 100644 --- a/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go +++ b/metrics/vendor/github.com/red-hat-storage/ocs-operator/v4/controllers/util/k8sutil.go @@ -11,6 +11,7 @@ import ( "github.com/go-logr/logr" configv1 "github.com/openshift/api/config/v1" + rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" "golang.org/x/exp/maps" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -203,6 +204,20 @@ func GetStorageClusterInNamespace(ctx context.Context, cl client.Client, namespa return &storageClusterList.Items[0], nil } +func GetCephClusterInNamespace(ctx context.Context, cl client.Client, namespace string) (*rookCephv1.CephCluster, error) { + cephClusterList := &rookCephv1.CephClusterList{} + err := cl.List(ctx, cephClusterList, client.InNamespace(namespace), client.Limit(1)) + if err != nil { + return nil, fmt.Errorf("unable to list cephCluster(s) in namespace %s: %v", namespace, err) + } + + if len(cephClusterList.Items) == 0 { + return nil, fmt.Errorf("no cephCluster found in namespace %s", namespace) + } + + return &cephClusterList.Items[0], nil +} + func NewK8sClient(scheme *runtime.Scheme) (client.Client, error) { klog.Info("Setting up k8s client") diff --git a/services/provider/server/server.go b/services/provider/server/server.go index 7b3e9354f8..df3c628769 100644 --- a/services/provider/server/server.go +++ b/services/provider/server/server.go @@ -741,8 +741,12 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S var extR []*pb.ExternalResource storageRequestHash := getStorageRequestHash(req.StorageConsumerUUID, req.StorageClaimName) - // SID for RamenDR - storageID := storageRequestHash + + cephCluster, err := util.GetCephClusterInNamespace(ctx, s.client, s.namespace) + if err != nil { + return nil, err + } + replicationID := req.StorageClaimName for _, cephRes := range storageRequest.Status.CephResources { @@ -799,6 +803,18 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S rbdStorageClassData["encryptionKMSID"] = storageRequest.Spec.EncryptionMethod } + blockPool := &rookCephv1.CephBlockPool{} + err = s.client.Get(ctx, types.NamespacedName{Name: rns.Spec.BlockPoolName, Namespace: s.namespace}, blockPool) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get %s CephBlockPool. %v", blockPool.Name, err) + } + + // SID for RamenDR + storageID := calculateCephRbdStorageId( + cephCluster.Status.CephStatus.FSID, + strconv.Itoa(blockPool.Status.PoolID), + rns.Name) + extR = append(extR, &pb.ExternalResource{ Name: "ceph-rbd", @@ -915,6 +931,13 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S kernelMountOptions[parts[0]] = parts[1] } + // SID for RamenDR + storageID := calculateCephFsStorageId( + cephCluster.Status.CephStatus.FSID, + subVolumeGroup.Spec.FilesystemName, + subVolumeGroup.Name, + ) + extR = append(extR, &pb.ExternalResource{ Name: "cephfs", @@ -1308,3 +1331,11 @@ func (s *OCSProviderServer) isConsumerMirrorEnabled(ctx context.Context, consume return clientMappingConfig.Data[consumer.Status.Client.ID] != "", nil } + +func calculateCephRbdStorageId(cephfsid, poolID, radosnamespacename string) string { + return util.CalculateMD5Hash([3]string{cephfsid, poolID, radosnamespacename}) +} + +func calculateCephFsStorageId(cephfsid, fileSystemName, subVolumeGroupName string) string { + return util.CalculateMD5Hash([3]string{cephfsid, fileSystemName, subVolumeGroupName}) +} diff --git a/services/provider/server/server_test.go b/services/provider/server/server_test.go index b13edc004f..ac08ccedc9 100644 --- a/services/provider/server/server_test.go +++ b/services/provider/server/server_test.go @@ -673,7 +673,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "ceph-rbd", Kind: "StorageClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", }, Data: map[string]string{ "clusterID": serverNamespace, @@ -691,8 +691,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "ceph-rbd", Kind: "VolumeSnapshotClass", Labels: map[string]string{ - - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", }, Data: map[string]string{ "csi.storage.k8s.io/snapshotter-secret-name": "ceph-client-provisioner-8d40b6be71600457b5dec219d2ce2d4c", @@ -702,7 +701,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "block-pool-claim-groupsnapclass", Kind: "VolumeGroupSnapshotClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", }, Data: map[string]string{ "csi.storage.k8s.io/group-snapshotter-secret-name": "ceph-client-provisioner-8d40b6be71600457b5dec219d2ce2d4c", @@ -715,7 +714,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Kind: "VolumeReplicationClass", Labels: map[string]string{ "ramendr.openshift.io/replicationid": "block-pool-claim", - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", "ramendr.openshift.io/maintenancemodes": "Failover", }, Annotations: map[string]string{ @@ -737,7 +736,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Labels: map[string]string{ "replication.storage.openshift.io/flatten-mode": "force", "ramendr.openshift.io/replicationid": "block-pool-claim", - "ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c", + "ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46", "ramendr.openshift.io/maintenancemodes": "Failover", }, Data: &replicationv1alpha1.VolumeReplicationClassSpec{ @@ -783,7 +782,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "cephfs", Kind: "StorageClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c", + "ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5", }, Data: map[string]string{ "clusterID": "8d26c7378c1b0ec9c2455d1c3601c4cd", @@ -799,7 +798,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "cephfs", Kind: "VolumeSnapshotClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c", + "ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5", }, Data: map[string]string{ "csi.storage.k8s.io/snapshotter-secret-name": "ceph-client-provisioner-0e8555e6556f70d23a61675af44e880c", @@ -809,7 +808,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Name: "shared-filesystem-claim-groupsnapclass", Kind: "VolumeGroupSnapshotClass", Labels: map[string]string{ - "ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c", + "ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5", }, Data: map[string]string{ "csi.storage.k8s.io/group-snapshotter-secret-name": "ceph-client-provisioner-0e8555e6556f70d23a61675af44e880c", @@ -956,6 +955,17 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { AllowRemoteStorageConsumers: true, }, } + cephCluster = &rookCephv1.CephCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mock-storage-cluster-cephcluster", + Namespace: serverNamespace, + }, + Status: rookCephv1.ClusterStatus{ + CephStatus: &rookCephv1.CephStatus{ + FSID: "my-fsid", + }, + }, + } ) ctx := context.TODO() @@ -967,6 +977,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { claimResourceCreating, claimResourceFailed, storageClustersResource, + cephCluster, } // Create a fake client to mock API calls. @@ -1138,7 +1149,11 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) { Spec: rookCephv1.NamedBlockPoolSpec{ PoolSpec: rookCephv1.PoolSpec{ Mirroring: rookCephv1.MirroringSpec{Enabled: false}, - }}, + }, + }, + Status: &rookCephv1.CephBlockPoolStatus{ + PoolID: 1, + }, } assert.NoError(t, client.Create(ctx, cephBlockPool))