From 5012cee4ebe4535ba116f8d084e95fa53860c1be Mon Sep 17 00:00:00 2001 From: Adrian Moisey Date: Mon, 16 Dec 2024 07:23:51 +0200 Subject: [PATCH 1/3] VPA Disable v1beta2 API --- .../pkg/apis/autoscaling.k8s.io/v1beta2/doc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/doc.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/doc.go index 961d29e25242..235e9d31b1cf 100644 --- a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/doc.go +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/doc.go @@ -18,5 +18,5 @@ limitations under the License. // Package v1beta2 contains definitions of Vertical Pod Autoscaler related objects. // +groupName=autoscaling.k8s.io -// +kubebuilder:object:generate=true +// +kubebuilder:skip package v1beta2 From 9c703de85b7c254e92909e3b8775b2b9327c5be7 Mon Sep 17 00:00:00 2001 From: Adrian Moisey Date: Mon, 16 Dec 2024 07:27:11 +0200 Subject: [PATCH 2/3] Remove v1beta2 from e2e tests --- .../e2e/v1beta2/actuation.go | 615 ----------------- .../e2e/v1beta2/admission_controller.go | 573 ---------------- .../e2e/v1beta2/autoscaling_utils.go | 458 ------------- vertical-pod-autoscaler/e2e/v1beta2/common.go | 621 ------------------ vertical-pod-autoscaler/e2e/v1beta2/e2e.go | 345 ---------- .../e2e/v1beta2/e2e_test.go | 77 --- .../e2e/v1beta2/full_vpa.go | 217 ------ .../e2e/v1beta2/recommender.go | 417 ------------ .../e2e/v1beta2/updater.go | 93 --- vertical-pod-autoscaler/hack/run-e2e-tests.sh | 8 +- 10 files changed, 1 insertion(+), 3423 deletions(-) delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/actuation.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/common.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/e2e.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/recommender.go delete mode 100644 vertical-pod-autoscaler/e2e/v1beta2/updater.go diff --git a/vertical-pod-autoscaler/e2e/v1beta2/actuation.go b/vertical-pod-autoscaler/e2e/v1beta2/actuation.go deleted file mode 100644 index f5ac6c9fbe7c..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/actuation.go +++ /dev/null @@ -1,615 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "context" - "fmt" - "time" - - appsv1 "k8s.io/api/apps/v1" - autoscaling "k8s.io/api/autoscaling/v1" - apiv1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/autoscaler/vertical-pod-autoscaler/e2e/utils" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/annotations" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment" - framework_job "k8s.io/kubernetes/test/e2e/framework/job" - framework_rc "k8s.io/kubernetes/test/e2e/framework/rc" - framework_rs "k8s.io/kubernetes/test/e2e/framework/replicaset" - framework_ss "k8s.io/kubernetes/test/e2e/framework/statefulset" - testutils "k8s.io/kubernetes/test/utils" - podsecurity "k8s.io/pod-security-admission/api" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = ActuationSuiteE2eDescribe("Actuation", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.It("stops when pods get pending", func() { - - ginkgo.By("Setting up a hamster deployment") - d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas) - - ginkgo.By("Setting up a VPA CRD with ridiculous request") - SetupVPA(f, "9999", vpa_types.UpdateModeAuto, hamsterTargetRef) // Request 9999 CPUs to make POD pending - - ginkgo.By("Waiting for pods to be restarted and stuck pending") - err := assertPodsPendingForDuration(f.ClientSet, d, 1, 2*time.Minute) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - }) - - ginkgo.It("never applies recommendations when update mode is Off", func() { - ginkgo.By("Setting up a hamster deployment") - d := SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas) - cpuRequest := getCPURequest(d.Spec.Template.Spec) - podList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - podSet := MakePodSet(podList) - - ginkgo.By("Setting up a VPA CRD in mode Off") - SetupVPA(f, "200m", vpa_types.UpdateModeOff, hamsterTargetRef) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, podSet) - ginkgo.By("Forcefully killing one pod") - killPod(f, podList) - - ginkgo.By("Checking the requests were not modified") - updatedPodList, err := GetHamsterPods(f) - for _, pod := range updatedPodList.Items { - gomega.Expect(getCPURequest(pod.Spec)).To(gomega.Equal(cpuRequest)) - } - }) - - ginkgo.It("applies recommendations only on restart when update mode is Initial", func() { - ginkgo.By("Setting up a hamster deployment") - SetupHamsterDeployment(f, "100m", "100Mi", defaultHamsterReplicas) - podList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - podSet := MakePodSet(podList) - - ginkgo.By("Setting up a VPA CRD in mode Initial") - SetupVPA(f, "200m", vpa_types.UpdateModeInitial, hamsterTargetRef) - updatedCPURequest := ParseQuantityOrDie("200m") - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, podSet) - ginkgo.By("Forcefully killing one pod") - killPod(f, podList) - - ginkgo.By("Checking that request was modified after forceful restart") - updatedPodList, err := GetHamsterPods(f) - foundUpdated := 0 - for _, pod := range updatedPodList.Items { - podRequest := getCPURequest(pod.Spec) - framework.Logf("podReq: %v", podRequest) - if podRequest.Cmp(updatedCPURequest) == 0 { - foundUpdated += 1 - } - } - gomega.Expect(foundUpdated).To(gomega.Equal(1)) - }) - - ginkgo.It("evicts pods in a Deployment", func() { - testEvictsPods(f, &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "hamster-deployment", - }) - }) - - ginkgo.It("evicts pods in a Replication Controller", func() { - testEvictsPods(f, &autoscaling.CrossVersionObjectReference{ - APIVersion: "v1", - Kind: "ReplicationController", - Name: "hamster-rc", - }) - }) - - ginkgo.It("evicts pods in a Job", func() { - testEvictsPods(f, &autoscaling.CrossVersionObjectReference{ - APIVersion: "batch/v1", - Kind: "Job", - Name: "hamster-job", - }) - }) - - ginkgo.It("evicts pods in a CronJob", func() { - testEvictsPods(f, &autoscaling.CrossVersionObjectReference{ - APIVersion: "batch/v1", - Kind: "CronJob", - Name: "hamster-cronjob", - }) - }) - - ginkgo.It("evicts pods in a ReplicaSet", func() { - testEvictsPods(f, &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "ReplicaSet", - Name: "hamster-rs", - }) - }) - - ginkgo.It("evicts pods in a StatefulSet", func() { - testEvictsPods(f, &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "StatefulSet", - Name: "hamster-stateful", - }) - }) - - ginkgo.It("observes pod disruption budget", func() { - - ginkgo.By("Setting up a hamster deployment") - c := f.ClientSet - ns := f.Namespace.Name - - SetupHamsterDeployment(f, "10m", "10Mi", 10) - podList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - podSet := MakePodSet(podList) - - ginkgo.By("Setting up prohibitive PDB for hamster deployment") - pdb := setupPDB(f, "hamster-pdb", 0 /* maxUnavailable */) - - ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "25m", vpa_types.UpdateModeAuto, hamsterTargetRef) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, podSet) - - ginkgo.By("Updating the PDB to allow for multiple pods to be evicted") - // We will check that 7 replicas are evicted in 3 minutes, which translates - // to 3 updater loops. This gives us relatively good confidence that updater - // evicts more than one pod in a loop if PDB allows it. - permissiveMaxUnavailable := 7 - // Creating new PDB and removing old one, since PDBs are immutable at the moment - setupPDB(f, "hamster-pdb-2", permissiveMaxUnavailable) - err = c.PolicyV1().PodDisruptionBudgets(ns).Delete(context.TODO(), pdb.Name, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, sleep for %s", VpaEvictionTimeout.String())) - time.Sleep(VpaEvictionTimeout) - ginkgo.By("Checking enough pods were evicted.") - currentPodList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - evictedCount := GetEvictedPodsCount(MakePodSet(currentPodList), podSet) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) - }) - - ginkgo.It("observes container max in LimitRange", func() { - ginkgo.By("Setting up a hamster deployment") - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ - ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) - podList := startDeploymentPods(f, d) - - ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) - - // Max CPU limit is 300m and ratio is 3., so max request is 100m, while - // recommendation is 200m - // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, "300m", "1T", apiv1.LimitTypeContainer) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, MakePodSet(podList)) - }) - - ginkgo.It("observes container min in LimitRange", func() { - ginkgo.By("Setting up a hamster deployment") - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ - ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) - podList := startDeploymentPods(f, d) - - ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) - - // Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min - // request is 100m request and 300m limit - // Min memory limit is 0 and ratio is 2., so min request is 0 - InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, MakePodSet(podList)) - }) - - ginkgo.It("observes pod max in LimitRange", func() { - ginkgo.By("Setting up a hamster deployment") - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ - ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) - d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) - d.Spec.Template.Spec.Containers[1].Name = "hamster2" - podList := startDeploymentPods(f, d) - - ginkgo.By("Setting up a VPA CRD") - SetupVPAForNHamsters(f, 2, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) - - // Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m, - // while recommendation is 200m - // Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, MakePodSet(podList)) - }) - - ginkgo.It("observes pod min in LimitRange", func() { - ginkgo.By("Setting up a hamster deployment") - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ - ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) - d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) - d.Spec.Template.Spec.Containers[1].Name = "hamster2" - podList := startDeploymentPods(f, d) - - ginkgo.By("Setting up a VPA CRD") - SetupVPAForNHamsters(f, 2, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) - - // Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both - // to limit and request so min request is 100m request and 300m limit - // Min memory limit is 0 and ratio is 2., so min request is 0 - InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, MakePodSet(podList)) - }) - - ginkgo.It("does not act on injected sidecars", func() { - const ( - agnhostImage = "registry.k8s.io/e2e-test-images/agnhost:2.40" - sidecarParam = "--sidecar-image=registry.k8s.io/pause:3.1" - sidecarName = "webhook-added-sidecar" - servicePort = int32(8443) - containerPort = int32(8444) - ) - - ginkgo.By("Setting up Webhook for sidecar injection") - - client := f.ClientSet - namespaceName := f.Namespace.Name - defer utils.CleanWebhookTest(client, namespaceName) - - // Make sure the namespace created for the test is labeled to be selected by the webhooks. - utils.LabelNamespace(f, f.Namespace.Name) - utils.CreateWebhookConfigurationReadyNamespace(f) - - ginkgo.By("Setting up server cert") - context := utils.SetupWebhookCert(namespaceName) - utils.CreateAuthReaderRoleBinding(f, namespaceName) - - utils.DeployWebhookAndService(f, agnhostImage, context, servicePort, containerPort, sidecarParam) - - // Webhook must be placed after vpa webhook. Webhooks are registered alphabetically. - // Use name that starts with "z". - webhookCleanup := utils.RegisterMutatingWebhookForPod(f, "z-sidecar-injection-webhook", context, servicePort) - defer webhookCleanup() - - ginkgo.By("Setting up a hamster vpa") - - mode := vpa_types.UpdateModeAuto - hamsterResourceList := apiv1.ResourceList{apiv1.ResourceCPU: ParseQuantityOrDie("100m")} - sidecarResourceList := apiv1.ResourceList{apiv1.ResourceCPU: ParseQuantityOrDie("5000m")} - - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode - - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{ - { - ContainerName: GetHamsterContainerNameByIndex(0), - Target: hamsterResourceList, - LowerBound: hamsterResourceList, - UpperBound: hamsterResourceList, - }, - { - ContainerName: sidecarName, - Target: sidecarResourceList, - LowerBound: sidecarResourceList, - UpperBound: sidecarResourceList, - }, - }, - } - - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m"), ParseQuantityOrDie("100Mi")) - podList := startDeploymentPods(f, d) - for _, pod := range podList.Items { - observedContainers, ok := pod.GetAnnotations()[annotations.VpaObservedContainersLabel] - gomega.Expect(ok).To(gomega.Equal(true)) - containers, err := annotations.ParseVpaObservedContainersValue(observedContainers) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(containers).To(gomega.HaveLen(1)) - gomega.Expect(pod.Spec.Containers).To(gomega.HaveLen(2)) - } - - podSet := MakePodSet(podList) - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, podSet) - }) -}) - -func getCPURequest(podSpec apiv1.PodSpec) resource.Quantity { - return podSpec.Containers[0].Resources.Requests[apiv1.ResourceCPU] -} - -func killPod(f *framework.Framework, podList *apiv1.PodList) { - f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podList.Items[0].Name, metav1.DeleteOptions{}) - err := WaitForPodsRestarted(f, podList) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -// assertPodsPendingForDuration checks that at most pendingPodsNum pods are pending for pendingDuration -func assertPodsPendingForDuration(c clientset.Interface, deployment *appsv1.Deployment, pendingPodsNum int, pendingDuration time.Duration) error { - - pendingPods := make(map[string]time.Time) - - err := wait.PollImmediate(pollInterval, pollTimeout+pendingDuration, func() (bool, error) { - var err error - currentPodList, err := framework_deployment.GetPodsForDeployment(context.TODO(), c, deployment) - if err != nil { - return false, err - } - - missingPods := make(map[string]bool) - for podName := range pendingPods { - missingPods[podName] = true - } - - now := time.Now() - for _, pod := range currentPodList.Items { - delete(missingPods, pod.Name) - switch pod.Status.Phase { - case apiv1.PodPending: - _, ok := pendingPods[pod.Name] - if !ok { - pendingPods[pod.Name] = now - } - default: - delete(pendingPods, pod.Name) - } - } - - for missingPod := range missingPods { - delete(pendingPods, missingPod) - } - - if len(pendingPods) < pendingPodsNum { - return false, nil - } - - if len(pendingPods) > pendingPodsNum { - return false, fmt.Errorf("%v pending pods seen - expecting %v", len(pendingPods), pendingPodsNum) - } - - for p, t := range pendingPods { - fmt.Println("task", now, p, t, now.Sub(t), pendingDuration) - if now.Sub(t) < pendingDuration { - return false, nil - } - } - - return true, nil - }) - - if err != nil { - return fmt.Errorf("assertion failed for pending pods in %v: %v", deployment.Name, err) - } - return nil -} - -func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) { - ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind)) - setupHamsterController(f, controller.Kind, "100m", "100Mi", defaultHamsterReplicas) - podList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "200m", vpa_types.UpdateModeAuto, controller) - - ginkgo.By("Waiting for pods to be evicted") - err = WaitForPodsEvicted(f, podList) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -func setupHamsterController(f *framework.Framework, controllerKind, cpu, memory string, replicas int32) *apiv1.PodList { - switch controllerKind { - case "Deployment": - SetupHamsterDeployment(f, cpu, memory, replicas) - case "ReplicationController": - setupHamsterReplicationController(f, cpu, memory, replicas) - case "Job": - setupHamsterJob(f, cpu, memory, replicas) - case "CronJob": - SetupHamsterCronJob(f, "*/2 * * * *", cpu, memory, replicas) - case "ReplicaSet": - setupHamsterRS(f, cpu, memory, replicas) - case "StatefulSet": - setupHamsterStateful(f, cpu, memory, replicas) - default: - framework.Failf("Unknown controller kind: %v", controllerKind) - return nil - } - pods, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - return pods -} - -func setupHamsterReplicationController(f *framework.Framework, cpu, memory string, replicas int32) { - hamsterContainer := SetupHamsterContainer(cpu, memory) - rc := framework_rc.ByNameContainer("hamster-rc", replicas, hamsterLabels, hamsterContainer, nil) - - rc.Namespace = f.Namespace.Name - err := testutils.CreateRCWithRetries(f.ClientSet, f.Namespace.Name, rc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = waitForRCPodsRunning(f, rc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -func waitForRCPodsRunning(f *framework.Framework, rc *apiv1.ReplicationController) error { - return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - podList, err := GetHamsterPods(f) - if err != nil { - framework.Logf("Error listing pods, retrying: %v", err) - return false, nil - } - podsRunning := int32(0) - for _, pod := range podList.Items { - if pod.Status.Phase == apiv1.PodRunning { - podsRunning += 1 - } - } - return podsRunning == *rc.Spec.Replicas, nil - }) -} - -func setupHamsterJob(f *framework.Framework, cpu, memory string, replicas int32) { - job := framework_job.NewTestJob("notTerminate", "hamster-job", apiv1.RestartPolicyOnFailure, - replicas, replicas, nil, 10) - job.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory) - for label, value := range hamsterLabels { - job.Spec.Template.Labels[label] = value - } - _, err := framework_job.CreateJob(context.TODO(), f.ClientSet, f.Namespace.Name, job) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = framework_job.WaitForJobPodsRunning(context.TODO(), f.ClientSet, f.Namespace.Name, job.Name, replicas) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -func setupHamsterRS(f *framework.Framework, cpu, memory string, replicas int32) { - rs := newReplicaSet("hamster-rs", f.Namespace.Name, replicas, hamsterLabels, "", "") - rs.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory) - err := createReplicaSetWithRetries(f.ClientSet, f.Namespace.Name, rs) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = framework_rs.WaitForReadyReplicaSet(context.TODO(), f.ClientSet, f.Namespace.Name, rs.Name) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -func setupHamsterStateful(f *framework.Framework, cpu, memory string, replicas int32) { - stateful := framework_ss.NewStatefulSet("hamster-stateful", f.Namespace.Name, - "hamster-service", replicas, nil, nil, hamsterLabels) - - stateful.Spec.Template.Spec.Containers[0] = SetupHamsterContainer(cpu, memory) - err := createStatefulSetSetWithRetries(f.ClientSet, f.Namespace.Name, stateful) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework_ss.WaitForRunningAndReady(context.TODO(), f.ClientSet, *stateful.Spec.Replicas, stateful) -} - -func setupPDB(f *framework.Framework, name string, maxUnavailable int) *policyv1.PodDisruptionBudget { - maxUnavailableIntstr := intstr.FromInt(maxUnavailable) - pdb := &policyv1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: policyv1.PodDisruptionBudgetSpec{ - MaxUnavailable: &maxUnavailableIntstr, - Selector: &metav1.LabelSelector{ - MatchLabels: hamsterLabels, - }, - }, - } - _, err := f.ClientSet.PolicyV1().PodDisruptionBudgets(f.Namespace.Name).Create(context.TODO(), pdb, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - return pdb -} - -func getCurrentPodSetForDeployment(c clientset.Interface, d *appsv1.Deployment) PodSet { - podList, err := framework_deployment.GetPodsForDeployment(context.TODO(), c, d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - return MakePodSet(podList) -} - -func createReplicaSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.ReplicaSet) error { - if obj == nil { - return fmt.Errorf("object provided to create is empty") - } - createFunc := func() (bool, error) { - _, err := c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) - if err == nil || apierrs.IsAlreadyExists(err) { - return true, nil - } - return false, fmt.Errorf("failed to create object with non-retriable error: %v", err) - } - return testutils.RetryWithExponentialBackOff(createFunc) -} - -func createStatefulSetSetWithRetries(c clientset.Interface, namespace string, obj *appsv1.StatefulSet) error { - if obj == nil { - return fmt.Errorf("object provided to create is empty") - } - createFunc := func() (bool, error) { - _, err := c.AppsV1().StatefulSets(namespace).Create(context.TODO(), obj, metav1.CreateOptions{}) - if err == nil || apierrs.IsAlreadyExists(err) { - return true, nil - } - return false, fmt.Errorf("failed to create object with non-retriable error: %v", err) - } - return testutils.RetryWithExponentialBackOff(createFunc) -} - -// newReplicaSet returns a new ReplicaSet. -func newReplicaSet(name, namespace string, replicas int32, podLabels map[string]string, imageName, image string) *appsv1.ReplicaSet { - return &appsv1.ReplicaSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "ReplicaSet", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - Spec: appsv1.ReplicaSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: podLabels, - }, - Replicas: &replicas, - Template: apiv1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: podLabels, - }, - Spec: apiv1.PodSpec{ - Containers: []apiv1.Container{ - { - Name: imageName, - Image: image, - SecurityContext: &apiv1.SecurityContext{}, - }, - }, - }, - }, - }, - } -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go deleted file mode 100644 index 6a674a50e1a4..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ /dev/null @@ -1,573 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "context" - "fmt" - "time" - - appsv1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - "k8s.io/kubernetes/test/e2e/framework" - framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment" - podsecurity "k8s.io/pod-security-admission/api" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.It("starts pods with new recommended request", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to recommended 250m CPU and 200Mi of memory. - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - } - }) - - ginkgo.It("doesn't block patches", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - ginkgo.By("Verifying hamster deployment") - for i, pod := range podList.Items { - podInfo := fmt.Sprintf("pod %s at index %d", pod.Name, i) - cpuDescription := fmt.Sprintf("%s: originally Pods had 100m CPU, admission controller should change it to recommended 250m CPU", podInfo) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")), cpuDescription) - memDescription := fmt.Sprintf("%s: originally Pods had 100Mi of memory, admission controller should change it to recommended 200Mi memory", podInfo) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")), memDescription) - } - - ginkgo.By("Modifying recommendation.") - PatchVpaRecommendation(f, vpaCRD, &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("100m"), - apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), - }, - }}, - }) - - podName := podList.Items[0].Name - ginkgo.By(fmt.Sprintf("Modifying pod %v.", podName)) - AnnotatePod(f, podName, "someAnnotation", "someValue") - }) - - ginkgo.It("keeps limits equal to request", func() { - d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - } - }) - - ginkgo.It("keeps limits to request ratio constant", func() { - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ - ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. - for _, pod := range podList.Items { - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) - } - }) - - ginkgo.It("caps request according to container max limit set in LimitRange", func() { - startCpuRequest := ParseQuantityOrDie("100m") - startCpuLimit := ParseQuantityOrDie("150m") - startMemRequest := ParseQuantityOrDie("100Mi") - startMemLimit := ParseQuantityOrDie("200Mi") - cpuRecommendation := ParseQuantityOrDie("250m") - memRecommendation := ParseQuantityOrDie("200Mi") - - d := NewHamsterDeploymentWithResourcesAndLimits(f, startCpuRequest, startMemRequest, startCpuLimit, startMemLimit) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: cpuRecommendation, - apiv1.ResourceMemory: memRecommendation, - }, - }}, - } - InstallVPA(f, vpaCRD) - - // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while - // recommendation is 250m - // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi - maxCpu := ParseQuantityOrDie("300m") - InstallLimitRangeWithMax(f, maxCpu.String(), "1Gi", apiv1.LimitTypeContainer) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - ginkgo.By("Verifying hamster deployment") - for i, pod := range podList.Items { - podInfo := fmt.Sprintf("pod %s at index %d", pod.Name, i) - - cpuRequestMsg := fmt.Sprintf("%s: CPU request didn't increase to the recommendation capped to max limit in LimitRange", podInfo) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")), cpuRequestMsg) - - cpuLimitMsg := fmt.Sprintf("%s: CPU limit above max in LimitRange", podInfo) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", maxCpu.MilliValue()), cpuLimitMsg) - - cpuRatioMsg := fmt.Sprintf("%s: CPU limit / request ratio isn't approximately equal to the original ratio", podInfo) - cpuRatio := float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()) - gomega.Expect(cpuRatio).To(gomega.BeNumerically("~", 1.5), cpuRatioMsg) - - memRequestMsg := fmt.Sprintf("%s: memory request didn't increase to the recommendation capped to max limit in LimitRange", podInfo) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests.Memory().Value()).To(gomega.Equal(memRecommendation.Value()), memRequestMsg) - memLimitMsg := fmt.Sprintf("%s: memory limit above max limit in LimitRange", podInfo) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024), memLimitMsg) - - memRatioMsg := fmt.Sprintf("%s: memory limit / request ratio isn't approximately equal to the original ratio", podInfo) - memRatio := float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value()) - gomega.Expect(memRatio).To(gomega.BeNumerically("~", 2.), memRatioMsg) - } - }) - - ginkgo.It("raises request according to container min limit set in LimitRange", func() { - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ - ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled - }, - }}, - } - InstallVPA(f, vpaCRD) - - // Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min - // request is 50m and min limit is 75 - // Min memory limit is 250Mi and it applies to both limit and request. Recommendation is 100Mi. - // It should be scaled up to 250Mi. - InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 200Mi of memory, but admission controller - // should change it to 250m CPU and 125Mi of memory, since this is the lowest - // request that limitrange allows. - // Limit to request ratio should stay unchanged. - for _, pod := range podList.Items { - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) - } - }) - - ginkgo.It("caps request according to pod max limit set in LimitRange", func() { - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ - ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) - d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) - d.Spec.Template.Spec.Containers[1].Name = "hamster2" - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{ - { - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }, - { - ContainerName: "hamster2", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }, - }, - } - InstallVPA(f, vpaCRD) - - // Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m, - // while recommendation is 250m - // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi - InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 200m CPU (as this is the recommendation - // capped according to max limit in LimitRange) and 200Mi of memory, - // which is uncapped. Limit to request ratio should stay unchanged. - for _, pod := range podList.Items { - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) - } - }) - - ginkgo.It("raises request according to pod min limit set in LimitRange", func() { - d := NewHamsterDeploymentWithResourcesAndLimits(f, - ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ - ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) - d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) - d.Spec.Template.Spec.Containers[1].Name = "hamster2" - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{ - { - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("120m"), - apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled - }, - }, - { - ContainerName: "hamster2", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("120m"), - apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled - }, - }, - }, - } - InstallVPA(f, vpaCRD) - - // Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and - // request so min request is 50m and min limit is 75 - // Min memory limit is 500Mi per pod, 250 per container and it applies to both limit and request. - // Recommendation is 100Mi it should be scaled up to 250Mi. - InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 200Mi of memory, but admission controller - // should change it to 250m CPU and 125Mi of memory, since this is the lowest - // request that limitrange allows. - // Limit to request ratio should stay unchanged. - for _, pod := range podList.Items { - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) - gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) - } - }) - - ginkgo.It("caps request to max set in VPA", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ - ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ - ContainerName: "hamster", - MaxAllowed: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("233m"), - apiv1.ResourceMemory: ParseQuantityOrDie("150Mi"), - }, - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 233m CPU and 150Mi of memory (as this is the recommendation - // capped to max specified in VPA) - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("233m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("150Mi"))) - } - }) - - ginkgo.It("raises request to min set in VPA", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("50m"), - apiv1.ResourceMemory: ParseQuantityOrDie("60Mi"), - }, - }}, - } - vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ - ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ - ContainerName: "hamster", - MinAllowed: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("90m"), - apiv1.ResourceMemory: ParseQuantityOrDie("80Mi"), - }, - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to recommended 90m CPU and 800Mi of memory (as this the - // recommendation raised to min specified in VPA) - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("90m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("80Mi"))) - } - }) - - ginkgo.It("leaves users request when no recommendation", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // VPA has no recommendation, so user's request is passed through - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("100m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("100Mi"))) - } - }) - - ginkgo.It("passes empty request when no recommendation and no user-specified request", func() { - d := NewHamsterDeployment(f) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // VPA has no recommendation, deployment has no request specified - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests).To(gomega.BeEmpty()) - } - }) - - ginkgo.It("accepts valid and rejects invalid VPA object", func() { - ginkgo.By("Setting up valid VPA object") - validVPA := []byte(`{ - "kind": "VerticalPodAutoscaler", - "apiVersion": "autoscaling.k8s.io/v1beta2", - "metadata": {"name": "hamster-vpa-valid"}, - "spec": { - "targetRef": { - "apiVersion": "apps/v1", - "kind": "Deployment", - "name":"hamster" - }, - "resourcePolicy": { - "containerPolicies": [{"containerName": "*", "minAllowed":{"cpu":"50m"}}] - } - } - }`) - err := InstallRawVPA(f, validVPA) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Valid VPA object rejected") - - ginkgo.By("Setting up invalid VPA object") - // The invalid object differs by name and minAllowed - there is an invalid "requests" field. - invalidVPA := []byte(`{ - "kind": "VerticalPodAutoscaler", - "apiVersion": "autoscaling.k8s.io/v1beta2", - "metadata": {"name": "hamster-vpa-invalid"}, - "spec": { - "targetRef": { - "apiVersion": "apps/v1", - "kind": "Deployment", - "name":"hamster" - }, - "resourcePolicy": { - "containerPolicies": [{"containerName": "*", "minAllowed":{"requests":{"cpu":"50m"}}}] - } - } - }`) - err2 := InstallRawVPA(f, invalidVPA) - gomega.Expect(err2).To(gomega.HaveOccurred(), "Invalid VPA object accepted") - gomega.Expect(err2.Error()).To(gomega.MatchRegexp(`.*admission webhook .*vpa.* denied the request: .*`)) - }) - -}) - -func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList { - // Apiserver watch can lag depending on cached object count and apiserver resource usage. - // We assume that watch can lag up to 5 seconds. - const apiserverWatchLag = 5 * time.Second - // In admission controller e2e tests a recommendation is created before deployment. - // Creating deployment with size greater than 0 would create a race between information - // about pods and information about deployment getting to the admission controller. - // Any pods that get processed by AC before it receives information about the deployment - // don't receive recommendation. - // To avoid this create deployment with size 0, then scale it up to the desired size. - desiredPodCount := *deployment.Spec.Replicas - zero := int32(0) - deployment.Spec.Replicas = &zero - c, ns := f.ClientSet, f.Namespace.Name - deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when creating deployment with size 0") - - err = framework_deployment.WaitForDeploymentComplete(c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for empty deployment to create") - // If admission controller receives pod before controller it will not apply recommendation and test will fail. - // Wait after creating deployment to ensure VPA knows about it, then scale up. - // Normally watch lag is not a problem in terms of correctness: - // - Mode "Auto": created pod without assigned resources will be handled by the eviction loop. - // - Mode "Initial": calculating recommendations takes more than potential ectd lag. - // - Mode "Off": pods are not handled by the admission controller. - // In e2e admission controller tests we want to focus on scenarios without considering watch lag. - // TODO(#2631): Remove sleep when issue is fixed. - time.Sleep(apiserverWatchLag) - - scale := autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: deployment.ObjectMeta.Name, - Namespace: deployment.ObjectMeta.Namespace, - }, - Spec: autoscalingv1.ScaleSpec{ - Replicas: desiredPodCount, - }, - } - afterScale, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deployment.Name, &scale, metav1.UpdateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(afterScale.Spec.Replicas).To(gomega.Equal(desiredPodCount), fmt.Sprintf("expected %d replicas after scaling", desiredPodCount)) - - // After scaling deployment we need to retrieve current version with updated replicas count. - deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when getting scaled deployment") - err = framework_deployment.WaitForDeploymentComplete(c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when waiting for deployment to resize") - - podList, err := framework_deployment.GetPodsForDeployment(context.TODO(), c, deployment) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "when listing pods after deployment resize") - return podList -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go b/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go deleted file mode 100644 index de3340ced25e..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/autoscaling_utils.go +++ /dev/null @@ -1,458 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This is a cut down fork of k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go - -package autoscaling - -import ( - "context" - "fmt" - "strconv" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" - e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - e2erc "k8s.io/kubernetes/test/e2e/framework/rc" - "k8s.io/kubernetes/test/e2e/framework/resource" - e2eservice "k8s.io/kubernetes/test/e2e/framework/service" - testutils "k8s.io/kubernetes/test/utils" - - ginkgo "github.com/onsi/ginkgo/v2" - - scaleclient "k8s.io/client-go/scale" - imageutils "k8s.io/kubernetes/test/utils/image" -) - -const ( - dynamicConsumptionTimeInSeconds = 30 - dynamicRequestSizeInMillicores = 20 - dynamicRequestSizeInMegabytes = 100 - dynamicRequestSizeCustomMetric = 10 - port = 80 - targetPort = 8080 - timeoutRC = 120 * time.Second - startServiceTimeout = time.Minute - startServiceInterval = 5 * time.Second - rcIsNil = "ERROR: replicationController = nil" - deploymentIsNil = "ERROR: deployment = nil" - rsIsNil = "ERROR: replicaset = nil" - invalidKind = "ERROR: invalid workload kind for resource consumer" - customMetricName = "QPS" - serviceInitializationTimeout = 2 * time.Minute - serviceInitializationInterval = 15 * time.Second - stressImage = "registry.k8s.io/e2e-test-images/agnhost:2.53" -) - -var ( - resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer) - stressCommand = []string{"/agnhost", "stress", "--mem-total", "10000000000", "--mem-alloc-size", "8000"} -) - -var ( - // KindRC is the GVK for ReplicationController - KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"} - // KindDeployment is the GVK for Deployment - KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"} - // KindReplicaSet is the GVK for ReplicaSet - KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"} -) - -/* -ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported) -typical use case: -rc.ConsumeCPU(600) -// ... check your assumption here -rc.ConsumeCPU(300) -// ... check your assumption here -*/ -type ResourceConsumer struct { - name string - controllerName string - kind schema.GroupVersionKind - nsName string - clientSet clientset.Interface - scaleClient scaleclient.ScalesGetter - cpu chan int - mem chan int - customMetric chan int - stopCPU chan int - stopMem chan int - stopCustomMetric chan int - stopWaitGroup sync.WaitGroup - consumptionTimeInSeconds int - sleepTime time.Duration - requestSizeInMillicores int - requestSizeInMegabytes int - requestSizeCustomMetric int -} - -// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer -func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { - return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil) -} - -/* -NewResourceConsumer creates new ResourceConsumer -initCPUTotal argument is in millicores -initMemoryTotal argument is in megabytes -memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod -cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod -*/ -func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, - requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { - if podAnnotations == nil { - podAnnotations = make(map[string]string) - } - if serviceAnnotations == nil { - serviceAnnotations = make(map[string]string) - } - runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations) - rc := &ResourceConsumer{ - name: name, - controllerName: name + "-ctrl", - kind: kind, - nsName: nsName, - clientSet: clientset, - scaleClient: scaleClient, - cpu: make(chan int), - mem: make(chan int), - customMetric: make(chan int), - stopCPU: make(chan int), - stopMem: make(chan int), - stopCustomMetric: make(chan int), - consumptionTimeInSeconds: consumptionTimeInSeconds, - sleepTime: time.Duration(consumptionTimeInSeconds) * time.Second, - requestSizeInMillicores: requestSizeInMillicores, - requestSizeInMegabytes: requestSizeInMegabytes, - requestSizeCustomMetric: requestSizeCustomMetric, - } - - go rc.makeConsumeCPURequests() - rc.ConsumeCPU(initCPUTotal) - - go rc.makeConsumeMemRequests() - rc.ConsumeMem(initMemoryTotal) - go rc.makeConsumeCustomMetric() - rc.ConsumeCustomMetric(initCustomMetric) - return rc -} - -// ConsumeCPU consumes given number of CPU -func (rc *ResourceConsumer) ConsumeCPU(millicores int) { - framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores) - rc.cpu <- millicores -} - -// ConsumeMem consumes given number of Mem -func (rc *ResourceConsumer) ConsumeMem(megabytes int) { - framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes) - rc.mem <- megabytes -} - -// ConsumeCustomMetric consumes given number of custom metric -func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) { - framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount) - rc.customMetric <- amount -} - -func (rc *ResourceConsumer) makeConsumeCPURequests() { - defer ginkgo.GinkgoRecover() - rc.stopWaitGroup.Add(1) - defer rc.stopWaitGroup.Done() - sleepTime := time.Duration(0) - millicores := 0 - for { - select { - case millicores = <-rc.cpu: - framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores) - case <-time.After(sleepTime): - framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores) - rc.sendConsumeCPURequest(millicores) - sleepTime = rc.sleepTime - case <-rc.stopCPU: - framework.Logf("RC %s: stopping CPU consumer", rc.name) - return - } - } -} - -func (rc *ResourceConsumer) makeConsumeMemRequests() { - defer ginkgo.GinkgoRecover() - rc.stopWaitGroup.Add(1) - defer rc.stopWaitGroup.Done() - sleepTime := time.Duration(0) - megabytes := 0 - for { - select { - case megabytes = <-rc.mem: - framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes) - case <-time.After(sleepTime): - framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes) - rc.sendConsumeMemRequest(megabytes) - sleepTime = rc.sleepTime - case <-rc.stopMem: - framework.Logf("RC %s: stopping mem consumer", rc.name) - return - } - } -} - -func (rc *ResourceConsumer) makeConsumeCustomMetric() { - defer ginkgo.GinkgoRecover() - rc.stopWaitGroup.Add(1) - defer rc.stopWaitGroup.Done() - sleepTime := time.Duration(0) - delta := 0 - for { - select { - case delta = <-rc.customMetric: - framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta) - case <-time.After(sleepTime): - framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName) - rc.sendConsumeCustomMetric(delta) - sleepTime = rc.sleepTime - case <-rc.stopCustomMetric: - framework.Logf("RC %s: stopping metric consumer", rc.name) - return - } - } -} - -func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) - defer cancel() - - err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) - framework.ExpectNoError(err) - req := proxyRequest.Namespace(rc.nsName). - Name(rc.controllerName). - Suffix("ConsumeCPU"). - Param("millicores", strconv.Itoa(millicores)). - Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). - Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores)) - framework.Logf("ConsumeCPU URL: %v", *req.URL()) - _, err = req.DoRaw(ctx) - if err != nil { - framework.Logf("ConsumeCPU failure: %v", err) - return false, nil - } - return true, nil - }) - - framework.ExpectNoError(err) -} - -// sendConsumeMemRequest sends POST request for memory consumption -func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) - defer cancel() - - err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) - framework.ExpectNoError(err) - req := proxyRequest.Namespace(rc.nsName). - Name(rc.controllerName). - Suffix("ConsumeMem"). - Param("megabytes", strconv.Itoa(megabytes)). - Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). - Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes)) - framework.Logf("ConsumeMem URL: %v", *req.URL()) - _, err = req.DoRaw(ctx) - if err != nil { - framework.Logf("ConsumeMem failure: %v", err) - return false, nil - } - return true, nil - }) - - framework.ExpectNoError(err) -} - -// sendConsumeCustomMetric sends POST request for custom metric consumption -func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { - ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) - defer cancel() - - err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - proxyRequest, err := e2eservice.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post()) - framework.ExpectNoError(err) - req := proxyRequest.Namespace(rc.nsName). - Name(rc.controllerName). - Suffix("BumpMetric"). - Param("metric", customMetricName). - Param("delta", strconv.Itoa(delta)). - Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)). - Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric)) - framework.Logf("ConsumeCustomMetric URL: %v", *req.URL()) - _, err = req.DoRaw(ctx) - if err != nil { - framework.Logf("ConsumeCustomMetric failure: %v", err) - return false, nil - } - return true, nil - }) - framework.ExpectNoError(err) -} - -// CleanUp clean up the background goroutines responsible for consuming resources. -func (rc *ResourceConsumer) CleanUp() { - ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name)) - close(rc.stopCPU) - close(rc.stopMem) - close(rc.stopCustomMetric) - rc.stopWaitGroup.Wait() - // Wait some time to ensure all child goroutines are finished. - time.Sleep(10 * time.Second) - kind := rc.kind.GroupKind() - framework.ExpectNoError(resource.DeleteResourceAndWaitForGC(context.TODO(), rc.clientSet, kind, rc.nsName, rc.name)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{})) - framework.ExpectNoError(resource.DeleteResourceAndWaitForGC(context.TODO(), rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) - framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, metav1.DeleteOptions{})) -} - -func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuRequestMillis, memRequestMb int64, podAnnotations, serviceAnnotations map[string]string) { - ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) - _, err := c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: serviceAnnotations, - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{{ - Port: port, - TargetPort: intstr.FromInt(targetPort), - }}, - - Selector: map[string]string{ - "name": name, - }, - }, - }, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - rcConfig := testutils.RCConfig{ - Client: c, - Image: resourceConsumerImage, - Name: name, - Namespace: ns, - Timeout: timeoutRC, - Replicas: replicas, - CpuRequest: cpuRequestMillis, - MemRequest: memRequestMb * 1024 * 1024, // MemRequest is in bytes - Annotations: podAnnotations, - } - - switch kind { - case KindRC: - framework.ExpectNoError(e2erc.RunRC(context.TODO(), rcConfig)) - case KindDeployment: - dpConfig := testutils.DeploymentConfig{ - RCConfig: rcConfig, - } - ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) - dpConfig.NodeDumpFunc = e2edebug.DumpNodeDebugInfo - dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers - framework.ExpectNoError(testutils.RunDeployment(context.TODO(), dpConfig)) - case KindReplicaSet: - rsConfig := testutils.ReplicaSetConfig{ - RCConfig: rcConfig, - } - ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace)) - framework.ExpectNoError(runReplicaSet(rsConfig)) - default: - framework.Failf(invalidKind) - } - - ginkgo.By(fmt.Sprintf("Running controller")) - controllerName := name + "-ctrl" - _, err = c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: controllerName, - }, - Spec: v1.ServiceSpec{ - Ports: []v1.ServicePort{{ - Port: port, - TargetPort: intstr.FromInt(targetPort), - }}, - - Selector: map[string]string{ - "name": controllerName, - }, - }, - }, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - dnsClusterFirst := v1.DNSClusterFirst - controllerRcConfig := testutils.RCConfig{ - Client: c, - Image: imageutils.GetE2EImage(imageutils.Agnhost), - Name: controllerName, - Namespace: ns, - Timeout: timeoutRC, - Replicas: 1, - Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, - DNSPolicy: &dnsClusterFirst, - } - framework.ExpectNoError(e2erc.RunRC(context.TODO(), controllerRcConfig)) - - // Wait for endpoints to propagate for the controller service. - framework.ExpectNoError(framework.WaitForServiceEndpointsNum( - context.TODO(), c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) -} - -// runReplicaSet launches (and verifies correctness) of a replicaset. -func runReplicaSet(config testutils.ReplicaSetConfig) error { - ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) - config.NodeDumpFunc = e2edebug.DumpNodeDebugInfo - config.ContainerDumpFunc = e2ekubectl.LogFailedContainers - return testutils.RunReplicaSet(context.TODO(), config) -} - -func runOomingReplicationController(c clientset.Interface, ns, name string, replicas int) { - ginkgo.By(fmt.Sprintf("Running OOMing RC %s with %v replicas", name, replicas)) - - rcConfig := testutils.RCConfig{ - Client: c, - Image: stressImage, - Command: stressCommand, - Name: name, - Namespace: ns, - Timeout: timeoutRC, - Replicas: replicas, - Annotations: make(map[string]string), - MemRequest: 1024 * 1024 * 1024, - MemLimit: 1024 * 1024 * 1024, - } - - dpConfig := testutils.DeploymentConfig{ - RCConfig: rcConfig, - } - ginkgo.By(fmt.Sprintf("Creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) - dpConfig.NodeDumpFunc = e2edebug.DumpNodeDebugInfo - dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers - framework.ExpectNoError(testutils.RunDeployment(context.TODO(), dpConfig)) -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go deleted file mode 100644 index 094c660fb271..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ /dev/null @@ -1,621 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "context" - "encoding/json" - "fmt" - "time" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" - autoscaling "k8s.io/api/autoscaling/v1" - batchv1 "k8s.io/api/batch/v1" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - framework_deployment "k8s.io/kubernetes/test/e2e/framework/deployment" -) - -const ( - recommenderComponent = "recommender" - updateComponent = "updater" - admissionControllerComponent = "admission-controller" - fullVpaSuite = "full-vpa" - actuationSuite = "actuation" - pollInterval = 10 * time.Second - pollTimeout = 15 * time.Minute - cronJobsWaitTimeout = 15 * time.Minute - // VpaEvictionTimeout is a timeout for VPA to restart a pod if there are no - // mechanisms blocking it (for example PDB). - VpaEvictionTimeout = 3 * time.Minute - - defaultHamsterReplicas = int32(3) - defaultHamsterBackoffLimit = int32(10) -) - -var hamsterTargetRef = &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "hamster-deployment", -} - -var hamsterLabels = map[string]string{"app": "hamster"} - -// SIGDescribe adds sig-autoscaling tag to test description. -func SIGDescribe(text string, body func()) bool { - return ginkgo.Describe(fmt.Sprintf("[sig-autoscaling] %v", text), body) -} - -// E2eDescribe describes a VPA e2e test. -func E2eDescribe(scenario, name string, body func()) bool { - return SIGDescribe(fmt.Sprintf("[VPA] [%s] [v1beta2] %s", scenario, name), body) -} - -// RecommenderE2eDescribe describes a VPA recommender e2e test. -func RecommenderE2eDescribe(name string, body func()) bool { - return E2eDescribe(recommenderComponent, name, body) -} - -// UpdaterE2eDescribe describes a VPA updater e2e test. -func UpdaterE2eDescribe(name string, body func()) bool { - return E2eDescribe(updateComponent, name, body) -} - -// AdmissionControllerE2eDescribe describes a VPA admission controller e2e test. -func AdmissionControllerE2eDescribe(name string, body func()) bool { - return E2eDescribe(admissionControllerComponent, name, body) -} - -// FullVpaE2eDescribe describes a VPA full stack e2e test. -func FullVpaE2eDescribe(name string, body func()) bool { - return E2eDescribe(fullVpaSuite, name, body) -} - -// ActuationSuiteE2eDescribe describes a VPA actuation e2e test. -func ActuationSuiteE2eDescribe(name string, body func()) bool { - return E2eDescribe(actuationSuite, name, body) -} - -// GetHamsterContainerNameByIndex returns name of i-th hamster container. -func GetHamsterContainerNameByIndex(i int) string { - switch { - case i < 0: - panic("negative index") - case i == 0: - return "hamster" - default: - return fmt.Sprintf("hamster%d", i+1) - } -} - -// SetupHamsterDeployment creates and installs a simple hamster deployment -// for e2e test purposes, then makes sure the deployment is running. -func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas int32) *appsv1.Deployment { - cpuQuantity := ParseQuantityOrDie(cpu) - memoryQuantity := ParseQuantityOrDie(memory) - - d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) - d.Spec.Replicas = &replicas - d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(context.TODO(), d, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation") - err = framework_deployment.WaitForDeploymentComplete(f.ClientSet, d) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish") - return d -} - -// NewHamsterDeployment creates a simple hamster deployment for e2e test purposes. -func NewHamsterDeployment(f *framework.Framework) *appsv1.Deployment { - return NewNHamstersDeployment(f, 1) -} - -// NewNHamstersDeployment creates a simple hamster deployment with n containers -// for e2e test purposes. -func NewNHamstersDeployment(f *framework.Framework, n int) *appsv1.Deployment { - if n < 1 { - panic("container count should be greater than 0") - } - d := framework_deployment.NewDeployment( - "hamster-deployment", /*deploymentName*/ - defaultHamsterReplicas, /*replicas*/ - hamsterLabels, /*podLabels*/ - GetHamsterContainerNameByIndex(0), /*imageName*/ - "registry.k8s.io/ubuntu-slim:0.14", /*image*/ - appsv1.RollingUpdateDeploymentStrategyType, /*strategyType*/ - ) - d.ObjectMeta.Namespace = f.Namespace.Name - d.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh"} - d.Spec.Template.Spec.Containers[0].Args = []string{"-c", "/usr/bin/yes >/dev/null"} - for i := 1; i < n; i++ { - d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) - d.Spec.Template.Spec.Containers[i].Name = GetHamsterContainerNameByIndex(i) - } - return d -} - -// NewHamsterDeploymentWithResources creates a simple hamster deployment with specific -// resource requests for e2e test purposes. -func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { - d := NewHamsterDeployment(f) - d.Spec.Template.Spec.Containers[0].Resources.Requests = apiv1.ResourceList{ - apiv1.ResourceCPU: cpuQuantity, - apiv1.ResourceMemory: memoryQuantity, - } - return d -} - -// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific -// resource requests for e2e test purposes. Since the container in the pod specifies resource limits -// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed -// QoS class. -func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { - d := NewHamsterDeployment(f) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: cpuQuantity, - apiv1.ResourceMemory: memoryQuantity, - } - return d -} - -// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific -// resource requests and limits for e2e test purposes. -func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { - d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: cpuQuantityLimit, - apiv1.ResourceMemory: memoryQuantityLimit, - } - return d -} - -func getPodSelectorExcludingDonePodsOrDie() string { - stringSelector := "status.phase!=" + string(apiv1.PodSucceeded) + - ",status.phase!=" + string(apiv1.PodFailed) - selector := fields.ParseSelectorOrDie(stringSelector) - return selector.String() -} - -// GetHamsterPods returns running hamster pods (matched by hamsterLabels) -func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { - label := labels.SelectorFromSet(labels.Set(hamsterLabels)) - options := metav1.ListOptions{LabelSelector: label.String(), FieldSelector: getPodSelectorExcludingDonePodsOrDie()} - return f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), options) -} - -// NewTestCronJob returns a CronJob for test purposes. -func NewTestCronJob(name, schedule string) *batchv1.CronJob { - replicas := defaultHamsterReplicas - backoffLimit := defaultHamsterBackoffLimit - sj := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - TypeMeta: metav1.TypeMeta{ - Kind: "CronJob", - }, - Spec: batchv1.CronJobSpec{ - Schedule: schedule, - ConcurrencyPolicy: batchv1.AllowConcurrent, - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Parallelism: &replicas, - Completions: &replicas, - BackoffLimit: &backoffLimit, - Template: apiv1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"job": name}, - }, - Spec: apiv1.PodSpec{ - RestartPolicy: apiv1.RestartPolicyOnFailure, - }, - }, - }, - }, - }, - } - - return sj -} - -func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error { - return wait.Poll(framework.Poll, cronJobsWaitTimeout, func() (bool, error) { - curr, err := getCronJob(c, ns, cronJobName) - if err != nil { - return false, err - } - return len(curr.Status.Active) >= active, nil - }) -} - -func createCronJob(c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) { - return c.BatchV1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{}) -} - -func getCronJob(c clientset.Interface, ns, name string) (*batchv1.CronJob, error) { - return c.BatchV1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{}) -} - -// SetupHamsterCronJob creates and sets up a new CronJob -func SetupHamsterCronJob(f *framework.Framework, schedule, cpu, memory string, replicas int32) { - cronJob := NewTestCronJob("hamster-cronjob", schedule) - cronJob.Spec.JobTemplate.Spec.Template.Spec.Containers = []apiv1.Container{SetupHamsterContainer(cpu, memory)} - for label, value := range hamsterLabels { - cronJob.Spec.JobTemplate.Spec.Template.Labels[label] = value - } - cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) -} - -// SetupHamsterContainer returns container with given amount of cpu and memory -func SetupHamsterContainer(cpu, memory string) apiv1.Container { - cpuQuantity := ParseQuantityOrDie(cpu) - memoryQuantity := ParseQuantityOrDie(memory) - - return apiv1.Container{ - Name: "hamster", - Image: "registry.k8s.io/ubuntu-slim:0.14", - Resources: apiv1.ResourceRequirements{ - Requests: apiv1.ResourceList{ - apiv1.ResourceCPU: cpuQuantity, - apiv1.ResourceMemory: memoryQuantity, - }, - }, - Command: []string{"/bin/sh"}, - Args: []string{"-c", "while true; do sleep 10 ; done"}, - } -} - -// SetupVPA creates and installs a simple hamster VPA for e2e test purposes. -func SetupVPA(f *framework.Framework, cpu string, mode vpa_types.UpdateMode, targetRef *autoscaling.CrossVersionObjectReference) { - SetupVPAForNHamsters(f, 1, cpu, mode, targetRef) -} - -// SetupVPAForNHamsters creates and installs a simple pod with n hamster containers for e2e test purposes. -func SetupVPAForNHamsters(f *framework.Framework, n int, cpu string, mode vpa_types.UpdateMode, targetRef *autoscaling.CrossVersionObjectReference) { - vpaCRD := NewVPA(f, "hamster-vpa", targetRef) - vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode - - cpuQuantity := ParseQuantityOrDie(cpu) - resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity} - - containerRecommendations := []vpa_types.RecommendedContainerResources{} - for i := 0; i < n; i++ { - containerRecommendations = append(containerRecommendations, - vpa_types.RecommendedContainerResources{ - ContainerName: GetHamsterContainerNameByIndex(i), - Target: resourceList, - LowerBound: resourceList, - UpperBound: resourceList, - }, - ) - } - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: containerRecommendations, - } - - InstallVPA(f, vpaCRD) -} - -// NewVPA creates a VPA object for e2e test purposes. -func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVersionObjectReference) *vpa_types.VerticalPodAutoscaler { - updateMode := vpa_types.UpdateModeAuto - vpa := vpa_types.VerticalPodAutoscaler{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: f.Namespace.Name, - }, - Spec: vpa_types.VerticalPodAutoscalerSpec{ - TargetRef: targetRef, - UpdatePolicy: &vpa_types.PodUpdatePolicy{ - UpdateMode: &updateMode, - }, - ResourcePolicy: &vpa_types.PodResourcePolicy{ - ContainerPolicies: []vpa_types.ContainerResourcePolicy{}, - }, - }, - } - return &vpa -} - -type patchRecord struct { - Op string `json:"op,inline"` - Path string `json:"path,inline"` - Value interface{} `json:"value"` -} - -func getVpaClientSet(f *framework.Framework) vpa_clientset.Interface { - config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework") - return vpa_clientset.NewForConfigOrDie(config) -} - -// InstallVPA installs a VPA object in the test cluster. -func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { - vpaClientSet := getVpaClientSet(f) - _, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalers(f.Namespace.Name).Create(context.TODO(), vpa, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA") - // apiserver ignore status in vpa create, so need to update status - if !isStatusEmpty(&vpa.Status) { - if vpa.Status.Recommendation != nil { - PatchVpaRecommendation(f, vpa, vpa.Status.Recommendation) - } - } -} - -func isStatusEmpty(status *vpa_types.VerticalPodAutoscalerStatus) bool { - if status == nil { - return true - } - - if len(status.Conditions) == 0 && status.Recommendation == nil { - return true - } - return false -} - -// InstallRawVPA installs a VPA object passed in as raw json in the test cluster. -func InstallRawVPA(f *framework.Framework, obj interface{}) error { - vpaClientSet := getVpaClientSet(f) - err := vpaClientSet.AutoscalingV1beta2().RESTClient().Post(). - Namespace(f.Namespace.Name). - Resource("verticalpodautoscalers"). - Body(obj). - Do(context.TODO()) - return err.Error() -} - -// PatchVpaRecommendation installs a new reocmmendation for VPA object. -func PatchVpaRecommendation(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler, - recommendation *vpa_types.RecommendedPodResources) { - newStatus := vpa.Status.DeepCopy() - newStatus.Recommendation = recommendation - bytes, err := json.Marshal([]patchRecord{{ - Op: "replace", - Path: "/status", - Value: *newStatus, - }}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - _, err = getVpaClientSet(f).AutoscalingV1beta2().VerticalPodAutoscalers(f.Namespace.Name).Patch(context.TODO(), vpa.Name, types.JSONPatchType, bytes, metav1.PatchOptions{}, "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch VPA.") -} - -// AnnotatePod adds annotation for an existing pod. -func AnnotatePod(f *framework.Framework, podName, annotationName, annotationValue string) { - bytes, err := json.Marshal([]patchRecord{{ - Op: "add", - Path: fmt.Sprintf("/metadata/annotations/%v", annotationName), - Value: annotationValue, - }}) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Patch(context.TODO(), podName, types.JSONPatchType, bytes, metav1.PatchOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to patch pod.") - gomega.Expect(pod.Annotations[annotationName]).To(gomega.Equal(annotationValue)) -} - -// ParseQuantityOrDie parses quantity from string and dies with an error if -// unparsable. -func ParseQuantityOrDie(text string) resource.Quantity { - quantity, err := resource.ParseQuantity(text) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error parsing quantity: %s", text) - return quantity -} - -// PodSet is a simplified representation of PodList mapping names to UIDs. -type PodSet map[string]types.UID - -// MakePodSet converts PodList to podset for easier comparison of pod collections. -func MakePodSet(pods *apiv1.PodList) PodSet { - result := make(PodSet) - if pods == nil { - return result - } - for _, p := range pods.Items { - result[p.Name] = p.UID - } - return result -} - -// WaitForPodsRestarted waits until some pods from the list are restarted. -func WaitForPodsRestarted(f *framework.Framework, podList *apiv1.PodList) error { - initialPodSet := MakePodSet(podList) - - err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - currentPodList, err := GetHamsterPods(f) - if err != nil { - return false, err - } - currentPodSet := MakePodSet(currentPodList) - return WerePodsSuccessfullyRestarted(currentPodSet, initialPodSet), nil - }) - - if err != nil { - return fmt.Errorf("waiting for set of pods changed: %v", err) - } - return nil -} - -// WaitForPodsEvicted waits until some pods from the list are evicted. -func WaitForPodsEvicted(f *framework.Framework, podList *apiv1.PodList) error { - initialPodSet := MakePodSet(podList) - - err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - currentPodList, err := GetHamsterPods(f) - if err != nil { - return false, err - } - currentPodSet := MakePodSet(currentPodList) - return GetEvictedPodsCount(currentPodSet, initialPodSet) > 0, nil - }) - - if err != nil { - return fmt.Errorf("waiting for set of pods changed: %v", err) - } - return nil -} - -// WerePodsSuccessfullyRestarted returns true if some pods from initialPodSet have been -// successfully restarted comparing to currentPodSet (pods were evicted and -// are running). -func WerePodsSuccessfullyRestarted(currentPodSet PodSet, initialPodSet PodSet) bool { - if len(currentPodSet) < len(initialPodSet) { - // If we have less pods running than in the beginning, there is a restart - // in progress - a pod was evicted but not yet recreated. - framework.Logf("Restart in progress") - return false - } - evictedCount := GetEvictedPodsCount(currentPodSet, initialPodSet) - framework.Logf("%v of initial pods were already evicted", evictedCount) - return evictedCount > 0 -} - -// GetEvictedPodsCount returns the count of pods from initialPodSet that have -// been evicted comparing to currentPodSet. -func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int { - diffs := 0 - for name, initialUID := range initialPodSet { - currentUID, inCurrent := currentPodSet[name] - if !inCurrent { - diffs += 1 - } else if initialUID != currentUID { - diffs += 1 - } - } - return diffs -} - -// CheckNoPodsEvicted waits for long enough period for VPA to start evicting -// pods and checks that no pods were restarted. -func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) { - time.Sleep(VpaEvictionTimeout) - currentPodList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when listing hamster pods to check number of pod evictions") - restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet) - gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions") -} - -// WaitForVPAMatch pools VPA object until match function returns true. Returns -// polled vpa object. On timeout returns error. -func WaitForVPAMatch(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler, match func(vpa *vpa_types.VerticalPodAutoscaler) bool) (*vpa_types.VerticalPodAutoscaler, error) { - var polledVpa *vpa_types.VerticalPodAutoscaler - err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - var err error - polledVpa, err = c.AutoscalingV1beta2().VerticalPodAutoscalers(vpa.Namespace).Get(context.TODO(), vpa.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - - if match(polledVpa) { - return true, nil - } - - return false, nil - }) - - if err != nil { - return nil, fmt.Errorf("error waiting for recommendation present in %v: %v", vpa.Name, err) - } - return polledVpa, nil -} - -// WaitForRecommendationPresent pools VPA object until recommendations are not empty. Returns -// polled vpa object. On timeout returns error. -func WaitForRecommendationPresent(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler) (*vpa_types.VerticalPodAutoscaler, error) { - return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool { - return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0 - }) -} - -// WaitForUncappedCPURecommendationAbove pools VPA object until uncapped recommendation is above specified value. -// Returns polled VPA object. On timeout returns error. -func WaitForUncappedCPURecommendationAbove(c vpa_clientset.Interface, vpa *vpa_types.VerticalPodAutoscaler, minMilliCPU int64) (*vpa_types.VerticalPodAutoscaler, error) { - return WaitForVPAMatch(c, vpa, func(vpa *vpa_types.VerticalPodAutoscaler) bool { - if vpa.Status.Recommendation == nil || len(vpa.Status.Recommendation.ContainerRecommendations) == 0 { - return false - } - uncappedCpu := vpa.Status.Recommendation.ContainerRecommendations[0].UncappedTarget[apiv1.ResourceCPU] - return uncappedCpu.MilliValue() > minMilliCPU - }) -} - -func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity, lrType apiv1.LimitType) { - lr := &apiv1.LimitRange{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: f.Namespace.Name, - Name: "hamster-lr", - }, - Spec: apiv1.LimitRangeSpec{ - Limits: []apiv1.LimitRangeItem{}, - }, - } - - if maxMemoryLimit != nil || maxCpuLimit != nil { - lrItem := apiv1.LimitRangeItem{ - Type: lrType, - Max: apiv1.ResourceList{}, - } - if maxCpuLimit != nil { - lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit - } - if maxMemoryLimit != nil { - lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit - } - lr.Spec.Limits = append(lr.Spec.Limits, lrItem) - } - - if minMemoryLimit != nil || minCpuLimit != nil { - lrItem := apiv1.LimitRangeItem{ - Type: lrType, - Min: apiv1.ResourceList{}, - } - if minCpuLimit != nil { - lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit - } - if minMemoryLimit != nil { - lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit - } - lr.Spec.Limits = append(lr.Spec.Limits, lrItem) - } - _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(context.TODO(), lr, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") -} - -// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. -func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string, lrType apiv1.LimitType) { - ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) - maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) - maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) - installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity, lrType) -} - -// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. -func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string, lrType apiv1.LimitType) { - ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) - minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) - minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) - installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil, lrType) -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/e2e.go b/vertical-pod-autoscaler/e2e/v1beta2/e2e.go deleted file mode 100644 index 8ca685cdf671..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/e2e.go +++ /dev/null @@ -1,345 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -// This file is a cut down fork of k8s/test/e2e/e2e.go - -import ( - "context" - "fmt" - "io/ioutil" - "path" - "testing" - "time" - - klog "k8s.io/klog/v2" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtimeutils "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/component-base/logs" - "k8s.io/component-base/version" - "k8s.io/kubernetes/test/e2e/framework" - e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" - e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - "k8s.io/kubernetes/test/e2e/framework/manifest" - e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - testutils "k8s.io/kubernetes/test/utils" - utilnet "k8s.io/utils/net" - - clientset "k8s.io/client-go/kubernetes" - // ensure auth plugins are loaded - _ "k8s.io/client-go/plugin/pkg/client/auth" - - // ensure that cloud providers are loaded - _ "k8s.io/kubernetes/test/e2e/framework/providers/gce" -) - -const ( - // namespaceCleanupTimeout is how long to wait for the namespace to be deleted. - // If there are any orphaned namespaces to clean up, this test is running - // on a long lived cluster. A long wait here is preferably to spurious test - // failures caused by leaked resources from a previous test run. - namespaceCleanupTimeout = 15 * time.Minute -) - -var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { - setupSuite() - return nil -}, func(data []byte) { - // Run on all Ginkgo nodes - setupSuitePerGinkgoNode() -}) - -var _ = ginkgo.SynchronizedAfterSuite(func() { - CleanupSuite() -}, func() { - AfterSuiteActions() -}) - -// RunE2ETests checks configuration parameters (specified through flags) and then runs -// E2E tests using the Ginkgo runner. -// If a "report directory" is specified, one or more JUnit test reports will be -// generated in this directory, and cluster logs will also be saved. -// This function is called on each Ginkgo node in parallel mode. -func RunE2ETests(t *testing.T) { - runtimeutils.ReallyCrash = true - logs.InitLogs() - defer logs.FlushLogs() - - gomega.RegisterFailHandler(framework.Fail) - suiteConfig, _ := ginkgo.GinkgoConfiguration() - // Disable skipped tests unless they are explicitly requested. - if len(suiteConfig.FocusStrings) == 0 && len(suiteConfig.SkipStrings) == 0 { - suiteConfig.SkipStrings = []string{`\[Flaky\]|\[Feature:.+\]`} - } - ginkgo.RunSpecs(t, "Kubernetes e2e suite") -} - -// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it -// to flip to Ready, log its output and delete it. -func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { - path := "test/images/clusterapi-tester/pod.yaml" - framework.Logf("Parsing pod from %v", path) - p, err := manifest.PodFromManifest(path) - if err != nil { - framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) - return - } - p.Namespace = ns - if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p, metav1.CreateOptions{}); err != nil { - framework.Logf("Failed to create %v: %v", p.Name, err) - return - } - defer func() { - if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}); err != nil { - framework.Logf("Failed to delete pod %v: %v", p.Name, err) - } - }() - timeout := 5 * time.Minute - if err := e2epod.WaitForPodCondition(context.TODO(), c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { - framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) - return - } - logs, err := e2epod.GetPodLogs(context.TODO(), c, ns, p.Name, p.Spec.Containers[0].Name) - if err != nil { - framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err) - } else { - framework.Logf("Output of clusterapi-tester:\n%v", logs) - } -} - -// getDefaultClusterIPFamily obtains the default IP family of the cluster -// using the Cluster IP address of the kubernetes service created in the default namespace -// This unequivocally identifies the default IP family because services are single family -// TODO: dual-stack may support multiple families per service -// but we can detect if a cluster is dual stack because pods have two addresses (one per family) -func getDefaultClusterIPFamily(c clientset.Interface) string { - // Get the ClusterIP of the kubernetes service created in the default namespace - svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{}) - if err != nil { - framework.Failf("Failed to get kubernetes service ClusterIP: %v", err) - } - - if utilnet.IsIPv6String(svc.Spec.ClusterIP) { - return "ipv6" - } - return "ipv4" -} - -// waitForDaemonSets for all daemonsets in the given namespace to be ready -// (defined as all but 'allowedNotReadyNodes' pods associated with that -// daemonset are ready). -func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error { - start := time.Now() - framework.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", - timeout, ns) - - return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) { - dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) - return false, err - } - var notReadyDaemonSets []string - for _, ds := range dsList.Items { - framework.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) - if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes { - notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name) - } - } - - if len(notReadyDaemonSets) > 0 { - framework.Logf("there are not ready daemonsets: %v", notReadyDaemonSets) - return false, nil - } - - return true, nil - }) -} - -// setupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. -// There are certain operations we only want to run once per overall test invocation -// (such as deleting old namespaces, or verifying that all system pods are running. -// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite -// to ensure that these operations only run on the first parallel Ginkgo node. -// -// This function takes two parameters: one function which runs on only the first Ginkgo node, -// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes, -// accepting the byte array. -func setupSuite() { - // Run only on Ginkgo node 1 - c, err := framework.LoadClientset() - if err != nil { - klog.Fatal("Error loading client: ", err) - } - - // Delete any namespaces except those created by the system. This ensures no - // lingering resources are left over from a previous test run. - if framework.TestContext.CleanStart { - deleted, err := framework.DeleteNamespaces(context.TODO(), c, nil, /* deleteFilter */ - []string{ - metav1.NamespaceSystem, - metav1.NamespaceDefault, - metav1.NamespacePublic, - v1.NamespaceNodeLease, - }) - if err != nil { - framework.Failf("Error deleting orphaned namespaces: %v", err) - } - framework.Logf("Waiting for deletion of the following namespaces: %v", deleted) - if err := framework.WaitForNamespacesDeleted(context.TODO(), c, deleted, namespaceCleanupTimeout); err != nil { - framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) - } - } - - timeoutCtx := framework.NewTimeoutContext() - - // In large clusters we may get to this point but still have a bunch - // of nodes without Routes created. Since this would make a node - // unschedulable, we need to wait until all of them are schedulable. - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(context.TODO(), c, timeoutCtx.NodeSchedulable)) - - // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted - if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { - nodes, err := e2enode.GetReadySchedulableNodes(context.TODO(), c) - framework.ExpectNoError(err) - framework.TestContext.CloudConfig.NumNodes = len(nodes.Items) - } - - // Ensure all pods are running and ready before starting tests (otherwise, - // cluster infrastructure pods that are being pulled or started can block - // test pods from running, and tests that ensure all pods are running and - // ready will fail). - podStartupTimeout := timeoutCtx.SystemPodsStartup - // TODO: In large clusters, we often observe a non-starting pods due to - // #41007. To avoid those pods preventing the whole test runs (and just - // wasting the whole run), we allow for some not-ready pods (with the - // number equal to the number of allowed not-ready nodes). - if err := e2epod.WaitForPodsRunningReady(context.TODO(), c, metav1.NamespaceSystem, framework.TestContext.MinStartupPods, podStartupTimeout); err != nil { - e2edebug.DumpAllNamespaceInfo(context.TODO(), c, metav1.NamespaceSystem) - e2ekubectl.LogFailedContainers(context.TODO(), c, metav1.NamespaceSystem, framework.Logf) - runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) - framework.Failf("Error waiting for all pods to be running and ready: %v", err) - } - - if err := waitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), timeoutCtx.SystemDaemonsetStartup); err != nil { - framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) - } - - // Log the version of the server and this client. - framework.Logf("e2e test version: %s", version.Get().GitVersion) - - dc := c.DiscoveryClient - - serverVersion, serverErr := dc.ServerVersion() - if serverErr != nil { - framework.Logf("Unexpected server error retrieving version: %v", serverErr) - } - if serverVersion != nil { - framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion) - } - - if framework.TestContext.NodeKiller.Enabled { - nodeKiller := e2enode.NewNodeKiller(framework.TestContext.NodeKiller, c, framework.TestContext.Provider) - go nodeKiller.Run(framework.TestContext.NodeKiller.NodeKillerStopCtx) - } -} - -// setupSuitePerGinkgoNode is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. -// There are certain operations we only want to run once per overall test invocation on each Ginkgo node -// such as making some global variables accessible to all parallel executions -// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite -// Ref: https://onsi.github.io/ginkgo/#parallel-specs -func setupSuitePerGinkgoNode() { - // Obtain the default IP family of the cluster - // Some e2e test are designed to work on IPv4 only, this global variable - // allows to adapt those tests to work on both IPv4 and IPv6 - // TODO: dual-stack - // the dual stack clusters can be ipv4-ipv6 or ipv6-ipv4, order matters, - // and services use the primary IP family by default - c, err := framework.LoadClientset() - if err != nil { - klog.Fatal("Error loading client: ", err) - } - framework.TestContext.IPFamily = getDefaultClusterIPFamily(c) - framework.Logf("Cluster IP family: %s", framework.TestContext.IPFamily) -} - -// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step. -// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs). -// Here, the order of functions is reversed; first, the function which runs everywhere, -// and then the function that only runs on the first Ginkgo node. -func CleanupSuite() { - // Run on all Ginkgo nodes - framework.Logf("Running AfterSuite actions on all nodes") -} - -// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite -func AfterSuiteActions() { - // Run only Ginkgo on node 1 - framework.Logf("Running AfterSuite actions on node 1") - if framework.TestContext.ReportDir != "" { - framework.CoreDump(framework.TestContext.ReportDir) - } - if framework.TestContext.GatherSuiteMetricsAfterTest { - if err := gatherTestSuiteMetrics(); err != nil { - framework.Logf("Error gathering metrics: %v", err) - } - } - if framework.TestContext.NodeKiller.Enabled { - framework.TestContext.NodeKiller.NodeKillerStop() - } -} - -func gatherTestSuiteMetrics() error { - framework.Logf("Gathering metrics") - c, err := framework.LoadClientset() - if err != nil { - return fmt.Errorf("error loading client: %v", err) - } - - // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). - grabber, err := e2emetrics.NewMetricsGrabber(context.TODO(), c, nil, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false) - if err != nil { - return fmt.Errorf("failed to create MetricsGrabber: %v", err) - } - - received, err := grabber.Grab(context.TODO()) - if err != nil { - return fmt.Errorf("failed to grab metrics: %v", err) - } - - metricsForE2E := (*e2emetrics.ComponentCollection)(&received) - metricsJSON := metricsForE2E.PrintJSON() - if framework.TestContext.ReportDir != "" { - filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") - if err := ioutil.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { - return fmt.Errorf("error writing to %q: %v", filePath, err) - } - } else { - framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) - } - - return nil -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go b/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go deleted file mode 100644 index 98819575c43a..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/e2e_test.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "flag" - "fmt" - "math/rand" - "os" - "testing" - "time" - - // Never, ever remove the line with "/ginkgo". Without it, - // the ginkgo test runner will not detect that this - // directory contains a Ginkgo test suite. - // See https://github.com/kubernetes/kubernetes/issues/74827 - // "github.com/onsi/ginkgo" - - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/testfiles" - "k8s.io/kubernetes/test/utils/image" -) - -var viperConfig = flag.String("viper-config", "", "The name of a viper config file (https://github.com/spf13/viper#what-is-viper). All e2e command line parameters can also be configured in such a file. May contain a path and may or may not contain the file suffix. The default is to look for an optional file with `e2e` as base name. If a file is specified explicitly, it must be present.") - -// handleFlags sets up all flags and parses the command line. -func handleFlags() { - config.CopyFlags(config.Flags, flag.CommandLine) - framework.RegisterCommonFlags(flag.CommandLine) - framework.RegisterClusterFlags(flag.CommandLine) - flag.Parse() -} - -func TestMain(m *testing.M) { - // Register test flags, then parse flags. - handleFlags() - - if framework.TestContext.ListImages { - for _, v := range image.GetImageConfigs() { - fmt.Println(v.GetE2EImage()) - } - os.Exit(0) - } - - framework.AfterReadingAllFlags(&framework.TestContext) - - // TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987. - // Right now it is still needed, for example by - // test/e2e/framework/ingress/ingress_utils.go - // for providing the optional secret.yaml file and by - // test/e2e/framework/util.go for cluster/log-dump. - if framework.TestContext.RepoRoot != "" { - testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot}) - } - - rand.Seed(time.Now().UnixNano()) - os.Exit(m.Run()) -} - -func TestE2E(t *testing.T) { - RunE2ETests(t) -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go b/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go deleted file mode 100644 index 63d69687119e..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/full_vpa.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "context" - "fmt" - "time" - - autoscaling "k8s.io/api/autoscaling/v1" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - "k8s.io/kubernetes/test/e2e/framework" - podsecurity "k8s.io/pod-security-admission/api" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -const ( - minimalCPULowerBound = "0m" - minimalCPUUpperBound = "100m" - minimalMemoryLowerBound = "0Mi" - minimalMemoryUpperBound = "300Mi" - // the initial values should be outside minimal bounds - initialCPU = int64(10) // mCPU - initialMemory = int64(10) // MB - oomTestTimeout = 8 * time.Minute -) - -var _ = FullVpaE2eDescribe("Pods under VPA", func() { - var ( - rc *ResourceConsumer - vpaClientSet *vpa_clientset.Clientset - vpaCRD *vpa_types.VerticalPodAutoscaler - ) - replicas := 3 - - ginkgo.AfterEach(func() { - rc.CleanUp() - }) - - // This schedules AfterEach block that needs to run after the AfterEach above and - // BeforeEach that needs to run before the BeforeEach below - thus the order of these matters. - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.BeforeEach(func() { - ns := f.Namespace.Name - ginkgo.By("Setting up a hamster deployment") - rc = NewDynamicResourceConsumer("hamster", ns, KindDeployment, - replicas, - 1, /*initCPUTotal*/ - 10, /*initMemoryTotal*/ - 1, /*initCustomMetric*/ - initialCPU, /*requestCPU*/ - initialMemory, /*requestMemory*/ - f.ClientSet, - f.ScalesGetter) - - ginkgo.By("Setting up a VPA CRD") - config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - vpaCRD = NewVPA(f, "hamster-vpa", &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "hamster", - }) - - vpaClientSet = vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.AutoscalingV1beta2() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(context.TODO(), vpaCRD, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - }) - - ginkgo.It("have cpu requests growing with usage", func() { - // initial CPU usage is low so a minimal recommendation is expected - err := waitForResourceRequestInRangeInPods( - f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU, - ParseQuantityOrDie(minimalCPULowerBound), ParseQuantityOrDie(minimalCPUUpperBound)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // consume more CPU to get a higher recommendation - rc.ConsumeCPU(600 * replicas) - err = waitForResourceRequestInRangeInPods( - f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceCPU, - ParseQuantityOrDie("500m"), ParseQuantityOrDie("1300m")) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.It("have memory requests growing with usage", func() { - // initial memory usage is low so a minimal recommendation is expected - err := waitForResourceRequestInRangeInPods( - f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory, - ParseQuantityOrDie(minimalMemoryLowerBound), ParseQuantityOrDie(minimalMemoryUpperBound)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - // consume more memory to get a higher recommendation - // NOTE: large range given due to unpredictability of actual memory usage - rc.ConsumeMem(1024 * replicas) - err = waitForResourceRequestInRangeInPods( - f, pollTimeout, metav1.ListOptions{LabelSelector: "name=hamster"}, apiv1.ResourceMemory, - ParseQuantityOrDie("900Mi"), ParseQuantityOrDie("4000Mi")) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) -}) - -var _ = FullVpaE2eDescribe("OOMing pods under VPA", func() { - var ( - vpaClientSet *vpa_clientset.Clientset - vpaCRD *vpa_types.VerticalPodAutoscaler - ) - const replicas = 3 - - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.BeforeEach(func() { - ns := f.Namespace.Name - ginkgo.By("Setting up a hamster deployment") - - runOomingReplicationController( - f.ClientSet, - ns, - "hamster", - replicas) - ginkgo.By("Setting up a VPA CRD") - config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - vpaCRD = NewVPA(f, "hamster-vpa", &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "hamster", - }) - - vpaClientSet = vpa_clientset.NewForConfigOrDie(config) - vpaClient := vpaClientSet.AutoscalingV1beta2() - _, err = vpaClient.VerticalPodAutoscalers(ns).Create(context.TODO(), vpaCRD, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.It("have memory requests growing with OOMs", func() { - listOptions := metav1.ListOptions{ - LabelSelector: "name=hamster", - FieldSelector: getPodSelectorExcludingDonePodsOrDie(), - } - err := waitForResourceRequestInRangeInPods( - f, oomTestTimeout, listOptions, apiv1.ResourceMemory, - ParseQuantityOrDie("1400Mi"), ParseQuantityOrDie("10000Mi")) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) -}) - -func waitForPodsMatch(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, matcher func(pod apiv1.Pod) bool) error { - return wait.PollImmediate(pollInterval, timeout, func() (bool, error) { - - ns := f.Namespace.Name - c := f.ClientSet - - podList, err := c.CoreV1().Pods(ns).List(context.TODO(), listOptions) - if err != nil { - return false, err - } - - if len(podList.Items) == 0 { - return false, nil - } - - // Run matcher on all pods, even if we find pod that doesn't match early. - // This allows the matcher to write logs for all pods. This in turns makes - // it easier to spot some problems (for example unexpected pods in the list - // results). - result := true - for _, pod := range podList.Items { - if !matcher(pod) { - result = false - } - } - return result, nil - - }) -} - -func waitForResourceRequestInRangeInPods(f *framework.Framework, timeout time.Duration, listOptions metav1.ListOptions, resourceName apiv1.ResourceName, lowerBound, upperBound resource.Quantity) error { - err := waitForPodsMatch(f, timeout, listOptions, - func(pod apiv1.Pod) bool { - resourceRequest, found := pod.Spec.Containers[0].Resources.Requests[resourceName] - framework.Logf("Comparing %v request %v against range of (%v, %v)", resourceName, resourceRequest, lowerBound, upperBound) - return found && resourceRequest.MilliValue() > lowerBound.MilliValue() && resourceRequest.MilliValue() < upperBound.MilliValue() - }) - - if err != nil { - return fmt.Errorf("error waiting for %s request in range of (%v,%v) for pods: %+v", resourceName, lowerBound, upperBound, listOptions) - } - return nil -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/recommender.go b/vertical-pod-autoscaler/e2e/v1beta2/recommender.go deleted file mode 100644 index 09f391911fc6..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/recommender.go +++ /dev/null @@ -1,417 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "context" - "fmt" - "strings" - "time" - - autoscaling "k8s.io/api/autoscaling/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - klog "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e/framework" - podsecurity "k8s.io/pod-security-admission/api" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -type resourceRecommendation struct { - target, lower, upper int64 -} - -func (r *resourceRecommendation) sub(other *resourceRecommendation) resourceRecommendation { - return resourceRecommendation{ - target: r.target - other.target, - lower: r.lower - other.lower, - upper: r.upper - other.upper, - } - -} - -func getResourceRecommendation(containerRecommendation *vpa_types.RecommendedContainerResources, r apiv1.ResourceName) resourceRecommendation { - getOrZero := func(resourceList apiv1.ResourceList) int64 { - value, found := resourceList[r] - if found { - return value.Value() - } - return 0 - } - return resourceRecommendation{ - target: getOrZero(containerRecommendation.Target), - lower: getOrZero(containerRecommendation.LowerBound), - upper: getOrZero(containerRecommendation.UpperBound), - } -} - -type recommendationChange struct { - oldMissing, newMissing bool - diff resourceRecommendation -} - -type observer struct { - channel chan recommendationChange -} - -func (*observer) OnAdd(obj interface{}, isInInitialList bool) {} -func (*observer) OnDelete(obj interface{}) {} - -func (o *observer) OnUpdate(oldObj, newObj interface{}) { - get := func(vpa *vpa_types.VerticalPodAutoscaler) (result resourceRecommendation, found bool) { - if vpa.Status.Recommendation == nil || len(vpa.Status.Recommendation.ContainerRecommendations) == 0 { - found = false - result = resourceRecommendation{} - } else { - found = true - result = getResourceRecommendation(&vpa.Status.Recommendation.ContainerRecommendations[0], apiv1.ResourceCPU) - } - return - } - oldVPA, _ := oldObj.(*vpa_types.VerticalPodAutoscaler) - NewVPA, _ := newObj.(*vpa_types.VerticalPodAutoscaler) - oldRecommendation, oldFound := get(oldVPA) - newRecommendation, newFound := get(NewVPA) - result := recommendationChange{ - oldMissing: !oldFound, - newMissing: !newFound, - diff: newRecommendation.sub(&oldRecommendation), - } - go func() { o.channel <- result }() -} - -func getVpaObserver(vpaClientSet vpa_clientset.Interface) *observer { - vpaListWatch := cache.NewListWatchFromClient(vpaClientSet.AutoscalingV1beta2().RESTClient(), "verticalpodautoscalers", apiv1.NamespaceAll, fields.Everything()) - vpaObserver := observer{channel: make(chan recommendationChange)} - _, controller := cache.NewIndexerInformer(vpaListWatch, - &vpa_types.VerticalPodAutoscaler{}, - 1*time.Hour, - &vpaObserver, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - go controller.Run(make(chan struct{})) - if !cache.WaitForCacheSync(make(chan struct{}), controller.HasSynced) { - klog.Fatalf("Failed to sync VPA cache during initialization") - } else { - klog.InfoS("Initial VPA synced successfully") - } - return &vpaObserver -} - -var _ = RecommenderE2eDescribe("Checkpoints", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.It("with missing VPA objects are garbage collected", func() { - ns := f.Namespace.Name - vpaClientSet := getVpaClientSet(f) - - checkpoint := vpa_types.VerticalPodAutoscalerCheckpoint{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: ns, - }, - Spec: vpa_types.VerticalPodAutoscalerCheckpointSpec{ - VPAObjectName: "some-vpa", - }, - } - - _, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalerCheckpoints(ns).Create(context.TODO(), &checkpoint, metav1.CreateOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - klog.InfoS("Sleeping for up to 15 minutes...") - - maxRetries := 90 - retryDelay := 10 * time.Second - for i := 0; i < maxRetries; i++ { - list, err := vpaClientSet.AutoscalingV1().VerticalPodAutoscalerCheckpoints(ns).List(context.TODO(), metav1.ListOptions{}) - if err == nil && len(list.Items) == 0 { - break - } - klog.InfoS("Still waiting...") - time.Sleep(retryDelay) - } - - list, err := vpaClientSet.AutoscalingV1beta2().VerticalPodAutoscalerCheckpoints(ns).List(context.TODO(), metav1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(list.Items).To(gomega.BeEmpty()) - }) -}) - -var _ = RecommenderE2eDescribe("VPA CRD object", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.It("serves recommendation for CronJob", func() { - ginkgo.By("Setting up hamster CronJob") - SetupHamsterCronJob(f, "*/5 * * * *", "100m", "100Mi", defaultHamsterReplicas) - - vpaClientSet := getVpaClientSet(f) - - ginkgo.By("Setting up VPA") - vpaCRD := NewVPA(f, "hamster-cronjob-vpa", &autoscaling.CrossVersionObjectReference{ - APIVersion: "batch/v1", - Kind: "CronJob", - Name: "hamster-cronjob", - }) - - InstallVPA(f, vpaCRD) - - ginkgo.By("Waiting for recommendation to be filled") - _, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) -}) - -var _ = RecommenderE2eDescribe("VPA CRD object", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - var ( - vpaCRD *vpa_types.VerticalPodAutoscaler - vpaClientSet vpa_clientset.Interface - ) - - ginkgo.BeforeEach(func() { - ginkgo.By("Setting up a hamster deployment") - _ = SetupHamsterDeployment( - f, /* framework */ - "100m", /* cpu */ - "100Mi", /* memory */ - 1, /* number of replicas */ - ) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD = NewVPA(f, "hamster-vpa", hamsterTargetRef) - InstallVPA(f, vpaCRD) - - vpaClientSet = getVpaClientSet(f) - }) - - ginkgo.It("serves recommendation", func() { - ginkgo.By("Waiting for recommendation to be filled") - _, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.It("doesn't drop lower/upper after recommender's restart", func() { - - o := getVpaObserver(vpaClientSet) - - ginkgo.By("Waiting for recommendation to be filled") - _, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - ginkgo.By("Drain diffs") - out: - for { - select { - case recommendationDiff := <-o.channel: - fmt.Println("Dropping recommendation diff", recommendationDiff) - default: - break out - } - } - ginkgo.By("Deleting recommender") - gomega.Expect(deleteRecommender(f.ClientSet)).To(gomega.BeNil()) - ginkgo.By("Accumulating diffs after restart, sleeping for 5 minutes...") - time.Sleep(5 * time.Minute) - changeDetected := false - finish: - for { - select { - case recommendationDiff := <-o.channel: - fmt.Println("checking recommendation diff", recommendationDiff) - changeDetected = true - gomega.Expect(recommendationDiff.oldMissing).To(gomega.Equal(false)) - gomega.Expect(recommendationDiff.newMissing).To(gomega.Equal(false)) - gomega.Expect(recommendationDiff.diff.lower).Should(gomega.BeNumerically(">=", 0)) - gomega.Expect(recommendationDiff.diff.upper).Should(gomega.BeNumerically("<=", 0)) - default: - break finish - } - } - gomega.Expect(changeDetected).To(gomega.Equal(true)) - }) -}) - -var _ = RecommenderE2eDescribe("VPA CRD object", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - var ( - vpaClientSet vpa_clientset.Interface - ) - - ginkgo.BeforeEach(func() { - ginkgo.By("Setting up a hamster deployment") - _ = SetupHamsterDeployment( - f, /* framework */ - "100m", /* cpu */ - "100Mi", /* memory */ - 1, /* number of replicas */ - ) - - vpaClientSet = getVpaClientSet(f) - }) - - ginkgo.It("respects min allowed recommendation", func() { - const minMilliCpu = 10000 - ginkgo.By("Setting up a VPA CRD") - minAllowed := apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie(fmt.Sprintf("%dm", minMilliCpu)), - } - vpaCRD := createVpaCRDWithMinMaxAllowed(f, minAllowed, nil) - - ginkgo.By("Waiting for recommendation to be filled") - vpa, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(1)) - cpu := getMilliCpu(vpa.Status.Recommendation.ContainerRecommendations[0].Target) - gomega.Expect(cpu).Should(gomega.BeNumerically(">=", minMilliCpu), - fmt.Sprintf("target cpu recommendation should be greater than or equal to %dm", minMilliCpu)) - cpuUncapped := getMilliCpu(vpa.Status.Recommendation.ContainerRecommendations[0].UncappedTarget) - gomega.Expect(cpuUncapped).Should(gomega.BeNumerically("<", minMilliCpu), - fmt.Sprintf("uncapped target cpu recommendation should be less than %dm", minMilliCpu)) - }) - - ginkgo.It("respects max allowed recommendation", func() { - const maxMilliCpu = 1 - ginkgo.By("Setting up a VPA CRD") - maxAllowed := apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie(fmt.Sprintf("%dm", maxMilliCpu)), - } - vpaCRD := createVpaCRDWithMinMaxAllowed(f, nil, maxAllowed) - - ginkgo.By("Waiting for recommendation to be filled") - vpa, err := WaitForUncappedCPURecommendationAbove(vpaClientSet, vpaCRD, maxMilliCpu) - gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf( - "Timed out waiting for uncapped cpu recommendation above %d mCPU", maxMilliCpu)) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(1)) - cpu := getMilliCpu(vpa.Status.Recommendation.ContainerRecommendations[0].Target) - gomega.Expect(cpu).Should(gomega.BeNumerically("<=", maxMilliCpu), - fmt.Sprintf("target cpu recommendation should be less than or equal to %dm", maxMilliCpu)) - }) -}) - -func getMilliCpu(resources apiv1.ResourceList) int64 { - cpu := resources[apiv1.ResourceCPU] - return cpu.MilliValue() -} - -// createVpaCRDWithMinMaxAllowed creates vpa object with min and max resources allowed. -func createVpaCRDWithMinMaxAllowed(f *framework.Framework, minAllowed, maxAllowed apiv1.ResourceList) *vpa_types.VerticalPodAutoscaler { - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - containerResourcePolicies := []vpa_types.ContainerResourcePolicy{ - { - ContainerName: GetHamsterContainerNameByIndex(0), - MinAllowed: minAllowed, - MaxAllowed: maxAllowed, - }, - } - vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ - ContainerPolicies: containerResourcePolicies, - } - InstallVPA(f, vpaCRD) - return vpaCRD -} - -var _ = RecommenderE2eDescribe("VPA CRD object", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - var vpaClientSet vpa_clientset.Interface - - ginkgo.BeforeEach(func() { - vpaClientSet = getVpaClientSet(f) - }) - - ginkgo.It("with no containers opted out all containers get recommendations", func() { - ginkgo.By("Setting up a hamster deployment") - d := NewNHamstersDeployment(f, 2 /*number of containers*/) - _ = startDeploymentPods(f, d) - ginkgo.By("Setting up VPA CRD") - vpaCRD := createVpaCRDWithContainerScalingModes(f, vpa_types.ContainerScalingModeAuto, vpa_types.ContainerScalingModeAuto) - - ginkgo.By("Waiting for recommendation to be filled for both containers") - vpa, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(2)) - }) - - ginkgo.It("only containers not-opted-out get recommendations", func() { - ginkgo.By("Setting up a hamster deployment") - d := NewNHamstersDeployment(f, 2 /*number of containers*/) - _ = startDeploymentPods(f, d) - vpaCRD := createVpaCRDWithContainerScalingModes(f, vpa_types.ContainerScalingModeOff, vpa_types.ContainerScalingModeAuto) - - ginkgo.By("Waiting for recommendation to be filled for just one container") - vpa, err := WaitForRecommendationPresent(vpaClientSet, vpaCRD) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - errMsg := fmt.Sprintf("%s container has recommendations turned off. We expect expect only recommendations for %s", - GetHamsterContainerNameByIndex(0), - GetHamsterContainerNameByIndex(1)) - gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations).Should(gomega.HaveLen(1), errMsg) - gomega.Expect(vpa.Status.Recommendation.ContainerRecommendations[0].ContainerName).To(gomega.Equal(GetHamsterContainerNameByIndex(1)), errMsg) - }) -}) - -// createVpaCRDWithContainerScalingModes creates vpa object with containers policies -// having assigned given scaling modes respectively. -func createVpaCRDWithContainerScalingModes(f *framework.Framework, modes ...vpa_types.ContainerScalingMode) *vpa_types.VerticalPodAutoscaler { - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - containerResourcePolicies := make([]vpa_types.ContainerResourcePolicy, len(modes), len(modes)) - for i := range modes { - containerResourcePolicies[i] = vpa_types.ContainerResourcePolicy{ - ContainerName: GetHamsterContainerNameByIndex(i), - Mode: &modes[i], - } - } - vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ - ContainerPolicies: containerResourcePolicies, - } - InstallVPA(f, vpaCRD) - return vpaCRD -} - -func deleteRecommender(c clientset.Interface) error { - namespace := "kube-system" - listOptions := metav1.ListOptions{} - podList, err := c.CoreV1().Pods(namespace).List(context.TODO(), listOptions) - if err != nil { - fmt.Println("Could not list pods.", err) - return err - } - fmt.Println("Pods list items:", len(podList.Items)) - for _, pod := range podList.Items { - if strings.HasPrefix(pod.Name, "vpa-recommender") { - fmt.Println("Deleting pod.", namespace, pod.Name) - err := c.CoreV1().Pods(namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) - if err != nil { - return err - } - return nil - } - } - return fmt.Errorf("vpa recommender not found") -} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go deleted file mode 100644 index 5b8b894d5658..000000000000 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "context" - "fmt" - "time" - - autoscaling "k8s.io/api/autoscaling/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/status" - "k8s.io/kubernetes/test/e2e/framework" - podsecurity "k8s.io/pod-security-admission/api" - - ginkgo "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -var _ = UpdaterE2eDescribe("Updater", func() { - f := framework.NewDefaultFramework("vertical-pod-autoscaling") - f.NamespacePodSecurityEnforceLevel = podsecurity.LevelBaseline - - ginkgo.It("evicts pods when Admission Controller status available", func() { - const statusUpdateInterval = 10 * time.Second - - ginkgo.By("Setting up the Admission Controller status") - stopCh := make(chan struct{}) - statusUpdater := status.NewUpdater( - f.ClientSet, - status.AdmissionControllerStatusName, - status.AdmissionControllerStatusNamespace, - statusUpdateInterval, - "e2e test", - ) - defer func() { - // Schedule a cleanup of the Admission Controller status. - // Status is created outside the test namespace. - ginkgo.By("Deleting the Admission Controller status") - close(stopCh) - err := f.ClientSet.CoordinationV1().Leases(status.AdmissionControllerStatusNamespace). - Delete(context.TODO(), status.AdmissionControllerStatusName, metav1.DeleteOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }() - statusUpdater.Run(stopCh) - - podList := setupPodsForEviction(f) - - ginkgo.By("Waiting for pods to be evicted") - err := WaitForPodsEvicted(f, podList) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - }) - - ginkgo.It("doesn't evict pods when Admission Controller status unavailable", func() { - podList := setupPodsForEviction(f) - - ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) - CheckNoPodsEvicted(f, MakePodSet(podList)) - }) -}) - -func setupPodsForEviction(f *framework.Framework) *apiv1.PodList { - controller := &autoscaling.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "hamster-deployment", - } - ginkgo.By(fmt.Sprintf("Setting up a hamster %v", controller.Kind)) - setupHamsterController(f, controller.Kind, "100m", "100Mi", defaultHamsterReplicas) - podList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - - ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "200m", vpa_types.UpdateModeAuto, controller) - - return podList -} diff --git a/vertical-pod-autoscaler/hack/run-e2e-tests.sh b/vertical-pod-autoscaler/hack/run-e2e-tests.sh index c87cfd8bf37f..e0970907a6d9 100755 --- a/vertical-pod-autoscaler/hack/run-e2e-tests.sh +++ b/vertical-pod-autoscaler/hack/run-e2e-tests.sh @@ -50,20 +50,14 @@ case ${SUITE} in recommender|updater|admission-controller|actuation|full-vpa) export KUBECONFIG=$HOME/.kube/config pushd ${SCRIPT_ROOT}/e2e - go test ./v1beta2/*go -v --test.timeout=90m --args --ginkgo.v=true --ginkgo.focus="\[VPA\] \[${SUITE}\]" --report-dir=${WORKSPACE} --disable-log-dump --ginkgo.timeout=90m - V1BETA2_RESULT=$? go test ./v1/*go -v --test.timeout=90m --args --ginkgo.v=true --ginkgo.focus="\[VPA\] \[${SUITE}\]" --report-dir=${WORKSPACE} --disable-log-dump --ginkgo.timeout=90m V1_RESULT=$? popd - echo v1beta2 test result: ${V1BETA2_RESULT} - if [ $V1BETA2_RESULT -gt 0 ]; then - echo "Please check v1beta2 \"go test\" logs!" - fi echo v1 test result: ${V1_RESULT} if [ $V1_RESULT -gt 0 ]; then echo "Please check v1 \"go test\" logs!" fi - if [ $V1BETA2_RESULT -gt 0 ] || [ $V1_RESULT -gt 0 ]; then + if [ $V1_RESULT -gt 0 ]; then echo "Tests failed" exit 1 fi From 1355f41ba4514a0771e6a679d416be925cda8f46 Mon Sep 17 00:00:00 2001 From: Adrian Moisey Date: Thu, 19 Dec 2024 07:21:34 +0200 Subject: [PATCH 3/3] Regenerate CRDs --- .../deploy/vpa-v1-crd-gen.yaml | 351 ------------------ 1 file changed, 351 deletions(-) diff --git a/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml b/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml index 6499413aa751..9339d1713ed3 100644 --- a/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml +++ b/vertical-pod-autoscaler/deploy/vpa-v1-crd-gen.yaml @@ -118,107 +118,6 @@ spec: type: object served: true storage: true - - name: v1beta2 - schema: - openAPIV3Schema: - description: |- - VerticalPodAutoscalerCheckpoint is the checkpoint of the internal state of VPA that - is used for recovery after recommender's restart. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the checkpoint. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. - properties: - containerName: - description: Name of the checkpointed container. - type: string - vpaObjectName: - description: Name of the VPA object that stored VerticalPodAutoscalerCheckpoint - object. - type: string - type: object - status: - description: Data of the checkpoint. - properties: - cpuHistogram: - description: Checkpoint of histogram for consumption of CPU. - properties: - bucketWeights: - description: Map from bucket index to bucket weight. - type: object - x-kubernetes-preserve-unknown-fields: true - referenceTimestamp: - description: Reference timestamp for samples collected within - this histogram. - format: date-time - nullable: true - type: string - totalWeight: - description: Sum of samples to be used as denominator for weights - from BucketWeights. - type: number - type: object - firstSampleStart: - description: Timestamp of the fist sample from the histograms. - format: date-time - nullable: true - type: string - lastSampleStart: - description: Timestamp of the last sample from the histograms. - format: date-time - nullable: true - type: string - lastUpdateTime: - description: The time when the status was last refreshed. - format: date-time - nullable: true - type: string - memoryHistogram: - description: Checkpoint of histogram for consumption of memory. - properties: - bucketWeights: - description: Map from bucket index to bucket weight. - type: object - x-kubernetes-preserve-unknown-fields: true - referenceTimestamp: - description: Reference timestamp for samples collected within - this histogram. - format: date-time - nullable: true - type: string - totalWeight: - description: Sum of samples to be used as denominator for weights - from BucketWeights. - type: number - type: object - totalSamplesCount: - description: Total number of samples in the histograms. - type: integer - version: - description: Version of the format of the stored data. - type: string - type: object - type: object - served: true - storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -582,253 +481,3 @@ spec: storage: true subresources: status: {} - - deprecated: true - deprecationWarning: autoscaling.k8s.io/v1beta2 API is deprecated - name: v1beta2 - schema: - openAPIV3Schema: - description: |- - VerticalPodAutoscaler is the configuration for a vertical pod - autoscaler, which automatically manages pod resources based on historical and - real time resource utilization. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - Specification of the behavior of the autoscaler. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. - properties: - resourcePolicy: - description: |- - Controls how the autoscaler computes recommended resources. - The resource policy may be used to set constraints on the recommendations - for individual containers. If not specified, the autoscaler computes recommended - resources for all containers in the pod, without additional constraints. - properties: - containerPolicies: - description: Per-container resource policies. - items: - description: |- - ContainerResourcePolicy controls how autoscaler computes the recommended - resources for a specific container. - properties: - containerName: - description: |- - Name of the container or DefaultContainerResourcePolicy, in which - case the policy is used by the containers that don't have their own - policy specified. - type: string - maxAllowed: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Specifies the maximum amount of resources that will be recommended - for the container. The default is no maximum. - type: object - minAllowed: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Specifies the minimal amount of resources that will be recommended - for the container. The default is no minimum. - type: object - mode: - description: Whether autoscaler is enabled for the container. - The default is "Auto". - enum: - - Auto - - "Off" - type: string - type: object - type: array - type: object - targetRef: - description: |- - TargetRef points to the controller managing the set of pods for the - autoscaler to control - e.g. Deployment, StatefulSet. VerticalPodAutoscaler - can be targeted at controller implementing scale subresource (the pod set is - retrieved from the controller's ScaleStatus) or some well known controllers - (e.g. for DaemonSet the pod set is read from the controller's spec). - If VerticalPodAutoscaler cannot use specified target it will report - ConfigUnsupported condition. - Note that VerticalPodAutoscaler does not require full implementation - of scale subresource - it will not use it to modify the replica count. - The only thing retrieved is a label selector matching pods grouped by - the target resource. - properties: - apiVersion: - description: apiVersion is the API version of the referent - type: string - kind: - description: 'kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - name: - description: 'name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - updatePolicy: - description: |- - Describes the rules on how changes are applied to the pods. - If not specified, all fields in the `PodUpdatePolicy` are set to their - default values. - properties: - updateMode: - description: |- - Controls when autoscaler applies changes to the pod resources. - The default is 'Auto'. - enum: - - "Off" - - Initial - - Recreate - - Auto - type: string - type: object - required: - - targetRef - type: object - status: - description: Current information about the autoscaler. - properties: - conditions: - description: |- - Conditions is the set of conditions required for this autoscaler to scale its target, - and indicates whether or not those conditions are met. - items: - description: |- - VerticalPodAutoscalerCondition describes the state of - a VerticalPodAutoscaler at a certain point. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from - one status to another - format: date-time - type: string - message: - description: |- - message is a human-readable explanation containing details about - the transition - type: string - reason: - description: reason is the reason for the condition's last transition. - type: string - status: - description: status is the status of the condition (True, False, - Unknown) - type: string - type: - description: type describes the current condition - type: string - required: - - status - - type - type: object - type: array - recommendation: - description: |- - The most recently computed amount of resources recommended by the - autoscaler for the controlled pods. - properties: - containerRecommendations: - description: Resources recommended by the autoscaler for each - container. - items: - description: |- - RecommendedContainerResources is the recommendation of resources computed by - autoscaler for a specific container. Respects the container resource policy - if present in the spec. In particular the recommendation is not produced for - containers with `ContainerScalingMode` set to 'Off'. - properties: - containerName: - description: Name of the container. - type: string - lowerBound: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Minimum recommended amount of resources. Observes ContainerResourcePolicy. - This amount is not guaranteed to be sufficient for the application to operate in a stable way, however - running with less resources is likely to have significant impact on performance/availability. - type: object - target: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Recommended amount of resources. Observes ContainerResourcePolicy. - type: object - uncappedTarget: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - The most recent recommended resources target computed by the autoscaler - for the controlled pods, based only on actual resource usage, not taking - into account the ContainerResourcePolicy. - May differ from the Recommendation if the actual resource usage causes - the target to violate the ContainerResourcePolicy (lower than MinAllowed - or higher that MaxAllowed). - Used only as status indication, will not affect actual resource assignment. - type: object - upperBound: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: |- - Maximum recommended amount of resources. Observes ContainerResourcePolicy. - Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum - amount of application is actually capable of consuming. - type: object - required: - - target - type: object - type: array - type: object - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {}