Skip to content

Commit

Permalink
Merge pull request #3008 from rewantsoni/ramenids
Browse files Browse the repository at this point in the history
provider: update the storageID for ramen
  • Loading branch information
openshift-merge-bot[bot] authored Feb 11, 2025
2 parents f622c19 + fb0ba8c commit 377e36d
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 12 deletions.
15 changes: 15 additions & 0 deletions controllers/util/k8sutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (

"github.com/go-logr/logr"
configv1 "github.com/openshift/api/config/v1"
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"golang.org/x/exp/maps"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
Expand Down Expand Up @@ -203,6 +204,20 @@ func GetStorageClusterInNamespace(ctx context.Context, cl client.Client, namespa
return &storageClusterList.Items[0], nil
}

func GetCephClusterInNamespace(ctx context.Context, cl client.Client, namespace string) (*rookCephv1.CephCluster, error) {
cephClusterList := &rookCephv1.CephClusterList{}
err := cl.List(ctx, cephClusterList, client.InNamespace(namespace), client.Limit(1))
if err != nil {
return nil, fmt.Errorf("unable to list cephCluster(s) in namespace %s: %v", namespace, err)
}

if len(cephClusterList.Items) == 0 {
return nil, fmt.Errorf("no cephCluster found in namespace %s", namespace)
}

return &cephClusterList.Items[0], nil
}

func NewK8sClient(scheme *runtime.Scheme) (client.Client, error) {
klog.Info("Setting up k8s client")

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

35 changes: 33 additions & 2 deletions services/provider/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -741,8 +741,12 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S
var extR []*pb.ExternalResource

storageRequestHash := getStorageRequestHash(req.StorageConsumerUUID, req.StorageClaimName)
// SID for RamenDR
storageID := storageRequestHash

cephCluster, err := util.GetCephClusterInNamespace(ctx, s.client, s.namespace)
if err != nil {
return nil, err
}

replicationID := req.StorageClaimName

for _, cephRes := range storageRequest.Status.CephResources {
Expand Down Expand Up @@ -799,6 +803,18 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S
rbdStorageClassData["encryptionKMSID"] = storageRequest.Spec.EncryptionMethod
}

blockPool := &rookCephv1.CephBlockPool{}
err = s.client.Get(ctx, types.NamespacedName{Name: rns.Spec.BlockPoolName, Namespace: s.namespace}, blockPool)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to get %s CephBlockPool. %v", blockPool.Name, err)
}

// SID for RamenDR
storageID := calculateCephRbdStorageId(
cephCluster.Status.CephStatus.FSID,
strconv.Itoa(blockPool.Status.PoolID),
rns.Name)

extR = append(extR,
&pb.ExternalResource{
Name: "ceph-rbd",
Expand Down Expand Up @@ -915,6 +931,13 @@ func (s *OCSProviderServer) GetStorageClaimConfig(ctx context.Context, req *pb.S
kernelMountOptions[parts[0]] = parts[1]
}

// SID for RamenDR
storageID := calculateCephFsStorageId(
cephCluster.Status.CephStatus.FSID,
subVolumeGroup.Spec.FilesystemName,
subVolumeGroup.Name,
)

extR = append(extR,
&pb.ExternalResource{
Name: "cephfs",
Expand Down Expand Up @@ -1308,3 +1331,11 @@ func (s *OCSProviderServer) isConsumerMirrorEnabled(ctx context.Context, consume

return clientMappingConfig.Data[consumer.Status.Client.ID] != "", nil
}

func calculateCephRbdStorageId(cephfsid, poolID, radosnamespacename string) string {
return util.CalculateMD5Hash([3]string{cephfsid, poolID, radosnamespacename})
}

func calculateCephFsStorageId(cephfsid, fileSystemName, subVolumeGroupName string) string {
return util.CalculateMD5Hash([3]string{cephfsid, fileSystemName, subVolumeGroupName})
}
35 changes: 25 additions & 10 deletions services/provider/server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -673,7 +673,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Name: "ceph-rbd",
Kind: "StorageClass",
Labels: map[string]string{
"ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c",
"ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46",
},
Data: map[string]string{
"clusterID": serverNamespace,
Expand All @@ -691,8 +691,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Name: "ceph-rbd",
Kind: "VolumeSnapshotClass",
Labels: map[string]string{

"ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c",
"ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46",
},
Data: map[string]string{
"csi.storage.k8s.io/snapshotter-secret-name": "ceph-client-provisioner-8d40b6be71600457b5dec219d2ce2d4c",
Expand All @@ -702,7 +701,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Name: "block-pool-claim-groupsnapclass",
Kind: "VolumeGroupSnapshotClass",
Labels: map[string]string{
"ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c",
"ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46",
},
Data: map[string]string{
"csi.storage.k8s.io/group-snapshotter-secret-name": "ceph-client-provisioner-8d40b6be71600457b5dec219d2ce2d4c",
Expand All @@ -715,7 +714,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Kind: "VolumeReplicationClass",
Labels: map[string]string{
"ramendr.openshift.io/replicationid": "block-pool-claim",
"ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c",
"ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46",
"ramendr.openshift.io/maintenancemodes": "Failover",
},
Annotations: map[string]string{
Expand All @@ -737,7 +736,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Labels: map[string]string{
"replication.storage.openshift.io/flatten-mode": "force",
"ramendr.openshift.io/replicationid": "block-pool-claim",
"ramendr.openshift.io/storageid": "8d40b6be71600457b5dec219d2ce2d4c",
"ramendr.openshift.io/storageid": "854666c7477123fb05f20bf615e69a46",
"ramendr.openshift.io/maintenancemodes": "Failover",
},
Data: &replicationv1alpha1.VolumeReplicationClassSpec{
Expand Down Expand Up @@ -783,7 +782,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Name: "cephfs",
Kind: "StorageClass",
Labels: map[string]string{
"ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c",
"ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5",
},
Data: map[string]string{
"clusterID": "8d26c7378c1b0ec9c2455d1c3601c4cd",
Expand All @@ -799,7 +798,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Name: "cephfs",
Kind: "VolumeSnapshotClass",
Labels: map[string]string{
"ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c",
"ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5",
},
Data: map[string]string{
"csi.storage.k8s.io/snapshotter-secret-name": "ceph-client-provisioner-0e8555e6556f70d23a61675af44e880c",
Expand All @@ -809,7 +808,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Name: "shared-filesystem-claim-groupsnapclass",
Kind: "VolumeGroupSnapshotClass",
Labels: map[string]string{
"ramendr.openshift.io/storageid": "0e8555e6556f70d23a61675af44e880c",
"ramendr.openshift.io/storageid": "5b53ada3302d6e0d1025a7948ce45ba5",
},
Data: map[string]string{
"csi.storage.k8s.io/group-snapshotter-secret-name": "ceph-client-provisioner-0e8555e6556f70d23a61675af44e880c",
Expand Down Expand Up @@ -956,6 +955,17 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
AllowRemoteStorageConsumers: true,
},
}
cephCluster = &rookCephv1.CephCluster{
ObjectMeta: metav1.ObjectMeta{
Name: "mock-storage-cluster-cephcluster",
Namespace: serverNamespace,
},
Status: rookCephv1.ClusterStatus{
CephStatus: &rookCephv1.CephStatus{
FSID: "my-fsid",
},
},
}
)

ctx := context.TODO()
Expand All @@ -967,6 +977,7 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
claimResourceCreating,
claimResourceFailed,
storageClustersResource,
cephCluster,
}

// Create a fake client to mock API calls.
Expand Down Expand Up @@ -1138,7 +1149,11 @@ func TestOCSProviderServerGetStorageClaimConfig(t *testing.T) {
Spec: rookCephv1.NamedBlockPoolSpec{
PoolSpec: rookCephv1.PoolSpec{
Mirroring: rookCephv1.MirroringSpec{Enabled: false},
}},
},
},
Status: &rookCephv1.CephBlockPoolStatus{
PoolID: 1,
},
}
assert.NoError(t, client.Create(ctx, cephBlockPool))

Expand Down

0 comments on commit 377e36d

Please sign in to comment.