diff --git a/ci/ci-test.sh b/ci/ci-test.sh index 5e9c8b21..cae7c33d 100755 --- a/ci/ci-test.sh +++ b/ci/ci-test.sh @@ -81,14 +81,14 @@ cleanup() { # setup the lvm volume group to create the volume cleanup_lvmvg -truncate -s 1024G /tmp/openebs_ci_disk.img +truncate -s 100G /tmp/openebs_ci_disk.img disk="$(sudo losetup -f /tmp/openebs_ci_disk.img --show)" sudo pvcreate "${disk}" sudo vgcreate lvmvg "${disk}" # setup a foreign lvm to test cleanup_foreign_lvmvg -truncate -s 1024G /tmp/openebs_ci_foreign_disk.img +truncate -s 100G /tmp/openebs_ci_foreign_disk.img foreign_disk="$(sudo losetup -f /tmp/openebs_ci_foreign_disk.img --show)" sudo pvcreate "${foreign_disk}" sudo vgcreate foreign_lvmvg "${foreign_disk}" --config="${FOREIGN_LVM_CONFIG}" @@ -97,6 +97,10 @@ sudo vgcreate foreign_lvmvg "${foreign_disk}" --config="${FOREIGN_LVM_CONFIG}" sudo modprobe dm-snapshot sudo modprobe dm_thin_pool +# Set the configuration for thin pool autoextend in lvm.conf +sudo sed -i '/^[^#]*thin_pool_autoextend_threshold/ s/= .*/= 50/' /etc/lvm/lvm.conf +sudo sed -i '/^[^#]*thin_pool_autoextend_percent/ s/= .*/= 20/' /etc/lvm/lvm.conf + # Prepare env for running BDD tests # Minikube is already running kubectl apply -f "${LVM_OPERATOR}" diff --git a/pkg/collector/lv_collector.go b/pkg/collector/lv_collector.go index 874f0289..d7fd8556 100644 --- a/pkg/collector/lv_collector.go +++ b/pkg/collector/lv_collector.go @@ -119,7 +119,7 @@ func (c *lvCollector) Describe(ch chan<- *prometheus.Desc) { } func (c *lvCollector) Collect(ch chan<- prometheus.Metric) { - lvList, err := lvm.ListLVMLogicalVolume() + lvList, err := lvm.ListLVMLogicalVolume("") if err != nil { klog.Errorf("error in getting the list of lvm logical volumes: %v", err) } else { diff --git a/pkg/lvm/lvm_util.go b/pkg/lvm/lvm_util.go index ec24d434..af8e900e 100644 --- a/pkg/lvm/lvm_util.go +++ b/pkg/lvm/lvm_util.go @@ -53,6 +53,7 @@ const ( LVCreate = "lvcreate" LVRemove = "lvremove" LVExtend = "lvextend" + LVChange = "lvchange" LVList = "lvs" PVList = "pvs" @@ -60,6 +61,7 @@ const ( YES = "yes" LVThinPool = "thin-pool" + SUDO = "sudo" ) var ( @@ -868,13 +870,46 @@ func decodeLvsJSON(raw []byte) ([]LogicalVolume, error) { return lvs, nil } -func ListLVMLogicalVolume() ([]LogicalVolume, error) { +// Change the attributes of an LV. Let the caller provide the args string +// since we won't know what attrs and to what values need changing. +func ChangeLVMLogicalVolumeAttrs(lvname string, args []string) error { + cmd := LVChange + + // append lvname to args + args = append([]string{lvname}, args...) + + // Are we running privileged or not? + if os.Geteuid() != 0 { + args = append([]string{cmd}, args...) + cmd = SUDO + } + + _, _, err := RunCommandSplit(cmd, args...) + if err != nil { + klog.Errorf("lvm: error while running command %s %v: %v", LVChange, args, err) + return err + } + + return nil +} + +// Get all, or a particular LVM LV. If lvname is nil, all LVs are listed, otherwise the requested one. +func ListLVMLogicalVolume(lvname string) ([]LogicalVolume, error) { + cmd := LVList args := []string{ "--options", "lv_all,vg_name,segtype", "--reportformat", "json", "--units", "b", + lvname, + } + + // Are we running privileged or not? + if os.Geteuid() != 0 { + args = append([]string{cmd}, args...) + cmd = SUDO } - output, _, err := RunCommandSplit(LVList, args...) + + output, _, err := RunCommandSplit(cmd, args...) if err != nil { klog.Errorf("lvm: error while running command %s %v: %v", LVList, args, err) return nil, err @@ -891,12 +926,20 @@ func ListLVMPhysicalVolume() ([]PhysicalVolume, error) { return nil, err } + cmd := PVList args := []string{ "--options", "pv_all,vg_name", "--reportformat", "json", "--units", "b", } - output, _, err := RunCommandSplit(PVList, args...) + + // Are we running privileged or not? + if os.Geteuid() != 0 { + args = append([]string{cmd}, args...) + cmd = SUDO + } + + output, _, err := RunCommandSplit(cmd, args...) if err != nil { klog.Errorf("lvm: error while running command %s %v: %v", PVList, args, err) return nil, err diff --git a/tests/provision_test.go b/tests/provision_test.go index 56c6e728..5fa319d0 100644 --- a/tests/provision_test.go +++ b/tests/provision_test.go @@ -94,6 +94,20 @@ func thinVolCreationTest() { By("Deleting thinProvision storage class", deleteStorageClass) } +func thinVolCapacityTest() { + By("Creating thinProvision storage class", createThinStorageClass) + By("creating and verifying PVC bound status", createAndVerifyPVC) + By("creating and verifying PVC bound status", enableThinpoolMonitoring) + By("Creating and deploying app pod", createDeployVerifyApp) + By("verifying thinpool auto-extended", VerifyThinpoolExtend) + By("verifying LVMVolume object", VerifyLVMVolume) + By("Deleting application deployment") + deleteAppDeployment(appName) + By("Deleting pvc") + deleteAndVerifyPVC(pvcName) + By("Deleting thinProvision storage class", deleteStorageClass) +} + func leakProtectionTest() { By("Creating default storage class", createStorageClass) ds := deleteNodeDaemonSet() // ensure that provisioning remains in pending state. @@ -116,8 +130,9 @@ func leakProtectionTest() { } func volumeCreationTest() { - By("Running volume creation test", fsVolCreationTest) + By("Running filesystem volume creation test", fsVolCreationTest) By("Running block volume creation test", blockVolCreationTest) By("Running thin volume creation test", thinVolCreationTest) + By("Running thin volume capacity test", thinVolCapacityTest) By("Running leak protection test", leakProtectionTest) } diff --git a/tests/suite_test.go b/tests/suite_test.go index bbb96f8f..2e950139 100644 --- a/tests/suite_test.go +++ b/tests/suite_test.go @@ -65,15 +65,16 @@ var ( nodeDaemonSet = "openebs-lvm-node" controllerDeployment = "openebs-lvm-controller" - nsObj *corev1.Namespace - scObj *storagev1.StorageClass - deployObj *appsv1.Deployment - pvcObj *corev1.PersistentVolumeClaim - appPod *corev1.PodList - accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} - capacity = "5368709120" // 5Gi - KubeConfigPath string - OpenEBSNamespace string + nsObj *corev1.Namespace + scObj *storagev1.StorageClass + deployObj *appsv1.Deployment + pvcObj *corev1.PersistentVolumeClaim + appPod *corev1.PodList + accessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} + capacity = "5368709120" // 5Gi + expanded_capacity = "6442450944" // 6Gi + KubeConfigPath string + OpenEBSNamespace string ) func init() { diff --git a/tests/utils.go b/tests/utils.go index c7b42428..821a8593 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -19,6 +19,7 @@ package tests import ( "context" "fmt" + "strconv" "time" "github.com/onsi/ginkgo" @@ -706,3 +707,38 @@ func createNodeDaemonSet(ds *appsv1.DaemonSet) { gomega.BeNil(), "creating node plugin daemonset %v", nodeDaemonSet) } + +func enableThinpoolMonitoring() { + lv := VOLGROUP + "/" + pvcObj.Spec.VolumeName + vol, err := lvm.ListLVMLogicalVolume(lv) + + gomega.Expect(err).To(gomega.BeNil(), "list LVM LV") + gomega.Expect(len(vol)).To(gomega.Equal(1), "retrieve LVM LV") + + thinpool := VOLGROUP + "/" + vol[0].PoolName + + args := []string{ + "--monitor", "y", + } + + err = lvm.ChangeLVMLogicalVolumeAttrs(thinpool, args) + gomega.Expect(err).To(gomega.BeNil(), "run lvchange command") +} + +func VerifyThinpoolExtend() { + expect_size, _ := strconv.ParseInt(expanded_capacity, 10, 64) + lv := VOLGROUP + "/" + pvcObj.Spec.VolumeName + vol, err := lvm.ListLVMLogicalVolume(lv) + + gomega.Expect(err).To(gomega.BeNil(), "list LVM LV") + gomega.Expect(len(vol)).To(gomega.Equal(1), "retrieve LVM LV") + + thinpool := VOLGROUP + "/" + vol[0].PoolName + vol, err = lvm.ListLVMLogicalVolume(thinpool) + gomega.Expect(err).To(gomega.BeNil(), "list LVM thinpool LV") + gomega.Expect(len(vol)).To(gomega.Equal(1), "retrieve LVM thinpool LV") + + // This expectation is a factor of the lvm.conf settings we do from ci-test.sh + // and the original volume size. + gomega.Expect(vol[0].Size).To(gomega.Equal(expect_size)) +}