From 0b2a6cd9e1d9a39d1442d5001ac6bf574e4d4dd9 Mon Sep 17 00:00:00 2001 From: Praveen M Date: Thu, 3 Oct 2024 16:05:23 +0530 Subject: [PATCH] e2e: cephfs rados namespace test Signed-off-by: Praveen M --- e2e/cephfs.go | 112 +++++++++++++++++++++++++++++++++++++++++++ e2e/cephfs_helper.go | 9 ++++ e2e/configmap.go | 3 ++ e2e/utils.go | 40 +++++++--------- 4 files changed, 141 insertions(+), 23 deletions(-) diff --git a/e2e/cephfs.go b/e2e/cephfs.go index c6050441b930..a5a110407eb1 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -2491,6 +2491,118 @@ var _ = Describe(cephfsType, func() { } }) + By("verify rados objects are within a namespace", func() { + updateRadosNamespace := func(radosNamespaceName string) { + framework.Logf("updating configmap with rados namespace %s", radosNamespace) + radosNamespace = radosNamespaceName + err := deleteConfigMap(cephFSDirPath) + if err != nil { + framework.Failf("failed to delete configmap:: %v", err) + } + err = createConfigMap(cephFSDirPath, f.ClientSet, f) + if err != nil { + framework.Failf("failed to create configmap: %v", err) + } + + // delete csi pods + err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)", + cephCSINamespace, false) + if err != nil { + framework.Failf("failed to delete pods with labels: %v", err) + } + // wait for csi pods to come up + err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) + if err != nil { + framework.Failf("timeout waiting for daemonset pods: %v", err) + } + err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout) + if err != nil { + framework.Failf("timeout waiting for deployment pods: %v", err) + } + } + + // radosNamespace is a global variable, so we need to save the old value + // and restore it after the test. + oldRadosNamespace := radosNamespace + newRadosNamespace := "cephfs-ns" + + updateRadosNamespace(newRadosNamespace) + defer func() { + updateRadosNamespace(oldRadosNamespace) + }() + + err := deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, true, nil) + if err != nil { + framework.Failf("failed to create CephFS storageclass: %v", err) + } + // create a PVC and bind it to an app + pvc, pod, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + framework.Failf("failed to validate CephFS pvc and application binding: %v", err) + } + + // snapshot test + err = createCephFSSnapshotClass(f) + if err != nil { + framework.Failf("failed to create CephFS snapshot class: %v", err) + } + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + snap.Name = f.UniqueName + err = createSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to create snapshot (%s): %v", snap.Name, err) + } + + // restore pvc test + pvcClone, err := loadPVC(pvcClonePath) + if err != nil { + framework.Failf("failed to load PVC: %v", err) + } + pvcClone.Namespace = f.UniqueName + pvcClone.Spec.DataSource.Name = snap.Name + // create PVC from the snapshot + err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) + if err != nil { + framework.Failf("failed to create pvc clone: %v", err) + } + + // validate OMAP count + validateOmapCount(f, 2, cephfsType, metadataPool, volumesType) + validateOmapCount(f, 1, cephfsType, metadataPool, snapsType) + + // delete resources + err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout) + if err != nil { + framework.Failf("failed to delete application: %v", err) + } + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + framework.Failf("failed to delete PVC: %v", err) + } + err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout) + if err != nil { + framework.Failf("failed to delete pvc clone: %v", err) + } + err = deleteSnapshot(&snap, deployTimeout) + if err != nil { + framework.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err) + } + err = deleteResource(cephFSExamplePath + "storageclass.yaml") + if err != nil { + framework.Failf("failed to delete CephFS storageclass: %v", err) + } + + // validate OMAP count + validateOmapCount(f, 0, cephfsType, metadataPool, volumesType) + validateOmapCount(f, 0, cephfsType, metadataPool, snapsType) + }) + // FIXME: in case NFS testing is done, prevent deletion // of the CephFS filesystem and related pool. This can // probably be addressed in a nicer way, making sure diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go index ae6fb9f3901f..6deb976d10a8 100644 --- a/e2e/cephfs_helper.go +++ b/e2e/cephfs_helper.go @@ -187,6 +187,15 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC return nil } +func cephfsOptions(pool string) string { + if radosNamespace != "" { + return "--pool=" + pool + " --namespace=" + radosNamespace + } + + // default namespace is csi + return "--pool=" + pool + " --namespace=csi" +} + type cephfsSubVolume struct { Name string `json:"name"` } diff --git a/e2e/configmap.go b/e2e/configmap.go index 6ad3978c5108..b230210503b6 100644 --- a/e2e/configmap.go +++ b/e2e/configmap.go @@ -60,6 +60,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra RBD: cephcsi.RBD{ RadosNamespace: radosNamespace, }, + CephFS: cephcsi.CephFS{ + RadosNamespace: radosNamespace, + }, ReadAffinity: cephcsi.ReadAffinity{ Enabled: true, CrushLocationLabels: []string{ diff --git a/e2e/utils.go b/e2e/utils.go index 0e29f53c1683..7e0f94d61288 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -174,13 +174,11 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str { volumeMode: volumesType, driverType: cephfsType, - radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool), - radosLsCmdFilter: fmt.Sprintf( - "rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.", - pool), - radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool), - radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l", - pool), + radosLsCmd: "rados ls " + cephfsOptions(pool), + radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.", + cephfsOptions(pool)), + radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + cephfsOptions(pool), + radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", cephfsOptions(pool)), }, { volumeMode: volumesType, @@ -193,14 +191,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)), }, { - volumeMode: snapsType, - driverType: cephfsType, - radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool), - radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.", - pool), - radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool), - radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l", - pool), + volumeMode: snapsType, + driverType: cephfsType, + radosLsCmd: "rados ls " + cephfsOptions(pool), + radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.snap.", cephfsOptions(pool)), + radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + cephfsOptions(pool), + radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", cephfsOptions(pool)), }, { volumeMode: snapsType, @@ -211,14 +207,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)), }, { - volumeMode: groupSnapsType, - driverType: cephfsType, - radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool), - radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.", - pool), - radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool), - radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l", - pool), + volumeMode: groupSnapsType, + driverType: cephfsType, + radosLsCmd: "rados ls" + cephfsOptions(pool), + radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.group.", cephfsOptions(pool)), + radosLsKeysCmd: "rados listomapkeys csi.groups.default " + cephfsOptions(pool), + radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default %s | wc -l", cephfsOptions(pool)), }, }