From 023453251c178191b84d934764aeb32e2e2927e5 Mon Sep 17 00:00:00 2001 From: cyclinder Date: Fri, 5 Jan 2024 13:52:16 +0800 Subject: [PATCH] Spidercoordinator: It able to get CIDR from kubeadm-config If the kube-controller-manager Pod is running as systemd precess rather than Pod, In this case, We can't get the CIDR from the KCM Pod. We can get the CIDR from the kubeadm-config configMap. Signed-off-by: cyclinder --- .../overlay/get-started-calico-zh_cn.md | 27 +- .../install/overlay/get-started-calico.md | 27 +- .../overlay/get-started-cilium-zh_cn.md | 27 +- .../install/overlay/get-started-cilium.md | 27 +- .../coordinator_informer.go | 91 ++-- test/doc/spidercoodinator.md | 21 +- test/e2e/common/constant.go | 17 +- test/e2e/common/mode.go | 5 + .../spidercoordinator_suite_test.go | 96 ++++ .../spidercoordinator_test.go | 445 ++++++++++++++++++ test/scripts/install-default-cni.sh | 1 + 11 files changed, 731 insertions(+), 53 deletions(-) create mode 100644 test/e2e/spidercoordinator/spidercoordinator_suite_test.go create mode 100644 test/e2e/spidercoordinator/spidercoordinator_test.go diff --git a/docs/usage/install/overlay/get-started-calico-zh_cn.md b/docs/usage/install/overlay/get-started-calico-zh_cn.md index 31ce6c3d87..8113228544 100644 --- a/docs/usage/install/overlay/get-started-calico-zh_cn.md +++ b/docs/usage/install/overlay/get-started-calico-zh_cn.md @@ -87,10 +87,31 @@ status: serviceCIDR: - 10.233.0.0/18 ``` + +> 目前 Spiderpool 优先通过查询 `kube-system/kubeadm-config` ConfigMap 获取集群的 Pod 和 Service 子网。 如果 kubeadm-config 不存在导致无法获取集群子网,那么 Spiderpool 会从 Kube-controller-manager Pod 中获取集群 Pod 和 Service 的子网。 如果您集群的 Kube-controller-manager 组件以 `systemd` 方式而不是以静态 Pod 运行。那么 Spiderpool 仍然无法获取集群的子网信息。 -> 1.如果 phase 不为 Synced, 那么将会阻止 Pod 被创建 -> -> 2.如果 overlayPodCIDR 不正常, 可能会导致通信问题 +如果上面两种方式都失败,Spiderpool 会同步 status.phase 为 NotReady, 这将会阻止 Pod 被创建。我们可以通过下面解决异常情况: + +- 手动创建 kubeadm-config ConfigMap, 并正确配置集群的子网信息: + +```shell +export POD_SUBNET= +export SERVICE_SUBNET= +cat << EOF | kubectl apply -f - +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubeadm-config + namespace: kube-system +data: + ClusterConfiguration: + networking: + podSubnet: ${POD_SUBNET} + serviceSubnet: ${SERVICE_SUBNET} +EOF +``` + +一旦创建完成,Spiderpool 将会自动同步其状态。 ### 创建 SpiderIPPool diff --git a/docs/usage/install/overlay/get-started-calico.md b/docs/usage/install/overlay/get-started-calico.md index 9dd5e6f1c2..1b4c66f3af 100644 --- a/docs/usage/install/overlay/get-started-calico.md +++ b/docs/usage/install/overlay/get-started-calico.md @@ -83,9 +83,30 @@ status: - 10.233.0.0/18 ``` -> 1.If the phase is not synced, the pod will be prevented from being created. -> -> 2.If the overlayPodCIDR does not meet expectations, it may cause pod communication issue. +> At present, Spiderpool prioritizes obtaining the cluster's Pod and Service subnets by querying the kube-system/kubeadm-config ConfigMap. If the kubeadm-config does not exist, causing the failure to obtain the cluster subnet, Spiderpool will attempt to retrieve the cluster Pod and Service subnets from the kube-controller-manager Pod. If the kube-controller-manager component in your cluster runs in systemd mode instead of as a static Pod, Spiderpool still cannot retrieve the cluster's subnet information. + +If both of the above methods fail, Spiderpool will synchronize the status.phase as NotReady, preventing Pod creation. To address such abnormal situations, we can take either of the following approaches: + +- Manually create the kubeadm-config ConfigMap and correctly configure the cluster's subnet information: + +```shell +export POD_SUBNET= +export SERVICE_SUBNET= +cat << EOF | kubectl apply -f - +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubeadm-config + namespace: kube-system +data: + ClusterConfiguration: + networking: + podSubnet: ${POD_SUBNET} + serviceSubnet: ${SERVICE_SUBNET} +EOF +``` + +Once created, Spiderpool will automatically synchronize its status. ### Create SpiderIPPool diff --git a/docs/usage/install/overlay/get-started-cilium-zh_cn.md b/docs/usage/install/overlay/get-started-cilium-zh_cn.md index 49b5589584..6bf697f4cf 100644 --- a/docs/usage/install/overlay/get-started-cilium-zh_cn.md +++ b/docs/usage/install/overlay/get-started-cilium-zh_cn.md @@ -84,9 +84,30 @@ status: - 10.233.0.0/18 ``` -> 1.如果 phase 不为 Synced, 那么将会阻止 Pod 被创建 -> -> 2.如果 overlayPodCIDR 不正常, 可能会导致通信问题 +> 目前 Spiderpool 优先通过查询 `kube-system/kubeadm-config` ConfigMap 获取集群的 Pod 和 Service 子网。 如果 kubeadm-config 不存在导致无法获取集群子网,那么 Spiderpool 会从 Kube-controller-manager Pod 中获取集群 Pod 和 Service 的子网。 如果您集群的 Kube-controller-manager 组件以 `systemd` 方式而不是以静态 Pod 运行。那么 Spiderpool 仍然无法获取集群的子网信息。 + +如果上面两种方式都失败,Spiderpool 会同步 status.phase 为 NotReady, 这将会阻止 Pod 被创建。我们可以通过下面的方式解决异常情况: + +- 手动创建 kubeadm-config ConfigMap, 并正确配置集群的子网信息: + +```shell +export POD_SUBNET= +export SERVICE_SUBNET= +cat << EOF | kubectl apply -f - +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubeadm-config + namespace: kube-system +data: + ClusterConfiguration: | + networking: + podSubnet: ${POD_SUBNET} + serviceSubnet: ${SERVICE_SUBNET} +EOF +``` + +一旦创建完成,Spiderpool 将会自动同步其状态。 ### 创建 SpiderIPPool diff --git a/docs/usage/install/overlay/get-started-cilium.md b/docs/usage/install/overlay/get-started-cilium.md index d485e8a5a9..febe73d4c8 100644 --- a/docs/usage/install/overlay/get-started-cilium.md +++ b/docs/usage/install/overlay/get-started-cilium.md @@ -84,9 +84,30 @@ status: - 10.233.0.0/18 ``` -> 1.If the phase is not synced, the pod will be prevented from being created. -> -> 2.If the overlayPodCIDR does not meet expectations, it may cause pod communication issue. +> At present, Spiderpool prioritizes obtaining the cluster's Pod and Service subnets by querying the kube-system/kubeadm-config ConfigMap. If the kubeadm-config does not exist, causing the failure to obtain the cluster subnet, Spiderpool will attempt to retrieve the cluster Pod and Service subnets from the kube-controller-manager Pod. If the kube-controller-manager component in your cluster runs in systemd mode instead of as a static Pod, Spiderpool still cannot retrieve the cluster's subnet information. + +If both of the above methods fail, Spiderpool will synchronize the status.phase as NotReady, preventing Pod creation. To address such abnormal situations, we can take either of the following approaches: + +- Manually create the kubeadm-config ConfigMap and correctly configure the cluster's subnet information: + +```shell +export POD_SUBNET= +export SERVICE_SUBNET= +cat << EOF | kubectl apply -f - +apiVersion: v1 +kind: ConfigMap +metadata: + name: kubeadm-config + namespace: kube-system +data: + ClusterConfiguration: + networking: + podSubnet: ${POD_SUBNET} + serviceSubnet: ${SERVICE_SUBNET} +EOF +``` + +Once created, Spiderpool will automatically synchronize its status. ### Create SpiderIPPool diff --git a/pkg/coordinatormanager/coordinator_informer.go b/pkg/coordinatormanager/coordinator_informer.go index 6b61259e6c..980c107723 100644 --- a/pkg/coordinatormanager/coordinator_informer.go +++ b/pkg/coordinatormanager/coordinator_informer.go @@ -334,36 +334,8 @@ func (cc *CoordinatorController) syncHandler(ctx context.Context, coordinatorNam } func (cc *CoordinatorController) fetchPodAndServerCIDR(ctx context.Context, logger *zap.Logger, coordCopy *spiderpoolv2beta1.SpiderCoordinator) (*spiderpoolv2beta1.SpiderCoordinator, error) { - var err error - var cmPodList corev1.PodList - if err := cc.APIReader.List(ctx, &cmPodList, client.MatchingLabels{"component": "kube-controller-manager"}); err != nil { - event.EventRecorder.Eventf( - coordCopy, - corev1.EventTypeWarning, - "ClusterNotReady", - err.Error(), - ) - - setStatus2NoReady(logger, coordCopy) - return coordCopy, err - } - if len(cmPodList.Items) == 0 { - msg := `Failed to get kube-controller-manager Pod with label "component: kube-controller-manager"` - event.EventRecorder.Eventf( - coordCopy, - corev1.EventTypeWarning, - "ClusterNotReady", - msg, - ) - - setStatus2NoReady(logger, coordCopy) - return coordCopy, err - } - - k8sPodCIDR, k8sServiceCIDR := extractK8sCIDR(&cmPodList.Items[0]) if *coordCopy.Spec.PodCIDRType == auto { - var podCidrType string - podCidrType, err = fetchType(cc.DefaultCniConfDir) + podCidrType, err := fetchType(cc.DefaultCniConfDir) if err != nil { if apierrors.IsNotFound(err) { event.EventRecorder.Eventf( @@ -381,6 +353,30 @@ func (cc *CoordinatorController) fetchPodAndServerCIDR(ctx context.Context, logg coordCopy.Spec.PodCIDRType = &podCidrType } + var err error + var cm *corev1.ConfigMap + var k8sPodCIDR, k8sServiceCIDR []string + if err := cc.APIReader.Get(ctx, types.NamespacedName{Namespace: metav1.NamespaceSystem, Name: "kubeadm-config"}, cm); err == nil && cm != nil { + logger.Sugar().Infof("Trying to fetch the ClusterCIDR from kube-system/kubeadm-config") + k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKubeadmConfigMap(cm) + } else { + logger.Sugar().Warn("kube-system/kubeadm-config is no found, trying to fetch the ClusterCIDR from kube-controller-manager Pod") + var cmPodList corev1.PodList + err = cc.APIReader.List(ctx, &cmPodList, client.MatchingLabels{"component": "kube-controller-manager"}) + if err != nil { + logger.Sugar().Errorf("failed to get kube-controller-manager Pod with label \"component: kube-controller-manager\": %v", err) + event.EventRecorder.Eventf( + coordCopy, + corev1.EventTypeWarning, + "ClusterNotReady", + "Neither kubeadm-config ConfigMap nor kube-controller-manager Pod can be found", + ) + setStatus2NoReady(logger, coordCopy) + return coordCopy, err + } + k8sPodCIDR, k8sServiceCIDR = ExtractK8sCIDRFromKCMPod(&cmPodList.Items[0]) + } + switch *coordCopy.Spec.PodCIDRType { case cluster: if cc.caliCtrlCanncel != nil { @@ -538,7 +534,42 @@ func (cc *CoordinatorController) fetchCiliumCIDR(ctx context.Context, logger *za return nil } -func extractK8sCIDR(kcm *corev1.Pod) ([]string, []string) { +func ExtractK8sCIDRFromKubeadmConfigMap(cm *corev1.ConfigMap) ([]string, []string) { + var podCIDR, serviceCIDR []string + + podReg := regexp.MustCompile(`podSubnet: (.*)`) + serviceReg := regexp.MustCompile(`serviceSubnet: (.*)`) + + var podSubnets, serviceSubnets []string + for _, data := range cm.Data { + podSubnets = podReg.FindStringSubmatch(data) + serviceSubnets = serviceReg.FindStringSubmatch(data) + } + + if len(podSubnets) != 0 { + for _, cidr := range strings.Split(podSubnets[1], ",") { + _, _, err := net.ParseCIDR(cidr) + if err != nil { + continue + } + podCIDR = append(podCIDR, cidr) + } + } + + if len(serviceSubnets) != 0 { + for _, cidr := range strings.Split(serviceSubnets[1], ",") { + _, _, err := net.ParseCIDR(cidr) + if err != nil { + continue + } + serviceCIDR = append(serviceCIDR, cidr) + } + } + + return podCIDR, serviceCIDR +} + +func ExtractK8sCIDRFromKCMPod(kcm *corev1.Pod) ([]string, []string) { var podCIDR, serviceCIDR []string podReg := regexp.MustCompile(`--cluster-cidr=(.*)`) diff --git a/test/doc/spidercoodinator.md b/test/doc/spidercoodinator.md index b2d71aa303..dde1179b71 100644 --- a/test/doc/spidercoodinator.md +++ b/test/doc/spidercoodinator.md @@ -1,12 +1,13 @@ # E2E Cases for spidercoordinator -| Case ID | Title | Priority | Smoke | Status | Other | -| ------- | ------------------------------------------------------------------------ | -------- | ----- | ------ | ----- | -| V00001 | Switch podClusterType to `auto`, see if it could auto fetch the type | p3 | | | | -| V00002 | Switch podClusterType to `auto` but no cni files in /etc/cni/net.d, see if the phase is NotReady | p3 | | | | -| V00003 | Switch podClusterType to `calico`, see if it could auto fetch the cidr from calico ippools | p3 | | | | -| V00004 | Switch podClusterType to `cilium`, see if it works in ipam-mode: [cluster-pool,kubernetes,multi-pool] | p3 | | | | -| V00005 | Switch podClusterType to `none`, expect the cidr of status to be empty | p3 | | | | -| V00006 | status.phase is not-ready, expect the cidr of status to be empty | p3 | | | | -| V00007 | spidercoordinator has the lowest priority | p3 | | | | -| V00008 | status.phase is not-ready, pods will fail to run | p3 | | | | +| Case ID | Title | Priority | Smoke | Status | Other | +| ------- | --------------------------------------------------------------------------------------------------------- | -------- | ----- | ------ | ----- | +| V00001 | Switch podCIDRType to `auto`, see if it could auto fetch the type | p3 | | done | | +| V00002 | Switch podCIDRType to `auto` but no cni files in /etc/cni/net.d, Viewing should be consistent with `none` | p3 | | done | | +| V00003 | Switch podCIDRType to `calico`, see if it could auto fetch the cidr from calico ippools | p3 | | done | | +| V00004 | Switch podCIDRType to `cilium`, see if it works in ipam-mode: [cluster-pool,kubernetes,multi-pool] | p3 | | done | | +| V00005 | Switch podCIDRType to `none`, expect the cidr of status to be empty | p3 | | done | | +| V00006 | status.phase is not-ready, expect the cidr of status to be empty | p3 | | done | | +| V00007 | spidercoordinator has the lowest priority | p3 | | done | | +| V00008 | status.phase is not-ready, pods will fail to run | p3 | | done | | +| V00009 | it can get the clusterCIDR from kubeadmConfig or kube-controller-manager pod | p3 | | done| diff --git a/test/e2e/common/constant.go b/test/e2e/common/constant.go index 08146e8e81..33ef075217 100644 --- a/test/e2e/common/constant.go +++ b/test/e2e/common/constant.go @@ -40,6 +40,8 @@ var ForcedWaitingTime = time.Second const ( SpiderPoolConfigmapName = "spiderpool-conf" SpiderPoolConfigmapNameSpace = "kube-system" + SpiderPoolLeases = "spiderpool-controller-leases" + SpiderPoolLeasesNamespace = "kube-system" ) // Kubeadm configurations @@ -88,6 +90,17 @@ var ( NIC2 string = "net1" NIC3 string = "eth0.100" NIC4 string = "eth0.200" + NIC5 string = "eth1" + + // Spidercoodinator podCIDRType + PodCIDRTypeAuto = "auto" + PodCIDRTypeCluster = "cluster" + PodCIDRTypeCalico = "calico" + PodCIDRTypeCilium = "cilium" + PodCIDRTypeNone = "none" + + // Spidercoodinator default config + SpidercoodinatorDefaultName = "default" ) // Error @@ -104,7 +117,9 @@ const ( // Webhook Port const ( - WebhookPort = "5722" + WebhookPort = "5722" + SpiderControllerMetricsPort = "5721" + SpiderAgentMetricsPort = "5711" ) func init() { diff --git a/test/e2e/common/mode.go b/test/e2e/common/mode.go index 5afaab36a3..66f38af48c 100644 --- a/test/e2e/common/mode.go +++ b/test/e2e/common/mode.go @@ -11,6 +11,7 @@ import ( const ( ENV_INSTALL_OVERLAY = "INSTALL_OVERLAY_CNI" E2E_SPIDERPOOL_ENABLE_SUBNET = "E2E_SPIDERPOOL_ENABLE_SUBNET" + INSTALL_CALICO = "INSTALL_CALICO" INSTALL_CILIUM = "INSTALL_CILIUM" ) @@ -31,6 +32,10 @@ func CheckSubnetFeatureOn() bool { return checkBoolEnv(E2E_SPIDERPOOL_ENABLE_SUBNET) } +func CheckCalicoFeatureOn() bool { + return checkBoolEnv(INSTALL_CALICO) +} + func CheckCiliumFeatureOn() bool { return checkBoolEnv(INSTALL_CILIUM) } diff --git a/test/e2e/spidercoordinator/spidercoordinator_suite_test.go b/test/e2e/spidercoordinator/spidercoordinator_suite_test.go new file mode 100644 index 0000000000..1c07ec0c5f --- /dev/null +++ b/test/e2e/spidercoordinator/spidercoordinator_suite_test.go @@ -0,0 +1,96 @@ +// Copyright 2022 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package spidercoordinator_suite_test + +import ( + "fmt" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + e2e "github.com/spidernet-io/e2eframework/framework" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/test/e2e/common" +) + +const ( + CLUSTER_POD_SUBNET_V4 = "10.233.64.0/18" + CLUSTER_POD_SUBNET_V6 = "fd00:10:233:64::/64" + CALICO_CLUSTER_POD_SUBNET_V4 = "10.243.64.0/18" + CALICO_CLUSTER_POD_SUBNET_V6 = "fd00:10:243::/112" + CILIUM_CLUSTER_POD_SUBNET_V4 = "10.244.64.0/18" + CILIUM_CLUSTER_POD_SUBNET_V6 = "fd00:10:244::/112" +) + +func TestSpiderCoordinator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "SpiderCoordinator Suite") +} + +var frame *e2e.Framework +var v4PodCIDRString, v6PodCIDRString string + +var _ = BeforeSuite(func() { + defer GinkgoRecover() + var e error + frame, e = e2e.NewFramework(GinkgoT(), []func(*runtime.Scheme) error{spiderpoolv2beta1.AddToScheme}) + Expect(e).NotTo(HaveOccurred()) + + if !common.CheckRunOverlayCNI() && !common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + if frame.Info.IpV4Enabled { + v4PodCIDRString = CLUSTER_POD_SUBNET_V4 + } + if frame.Info.IpV6Enabled { + v6PodCIDRString = CLUSTER_POD_SUBNET_V6 + } + GinkgoWriter.Println("This environment is in underlay mode.") + } + + if common.CheckRunOverlayCNI() && common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + if frame.Info.IpV4Enabled { + v4PodCIDRString = CALICO_CLUSTER_POD_SUBNET_V4 + } + if frame.Info.IpV6Enabled { + v6PodCIDRString = CALICO_CLUSTER_POD_SUBNET_V6 + } + GinkgoWriter.Println("The environment is calico mode.") + } + + if common.CheckRunOverlayCNI() && common.CheckCiliumFeatureOn() && !common.CheckCalicoFeatureOn() { + if frame.Info.IpV4Enabled { + v4PodCIDRString = CILIUM_CLUSTER_POD_SUBNET_V4 + } + if frame.Info.IpV6Enabled { + v6PodCIDRString = CILIUM_CLUSTER_POD_SUBNET_V6 + } + GinkgoWriter.Println("The environment is cilium mode.") + } +}) + +func GetSpiderCoordinator(name string) (*spiderpoolv2beta1.SpiderCoordinator, error) { + var spc spiderpoolv2beta1.SpiderCoordinator + err := frame.GetResource(types.NamespacedName{ + Name: name, + }, &spc) + if nil != err { + return nil, err + } + + return &spc, nil +} + +func PatchSpiderCoordinator(desired, original *spiderpoolv2beta1.SpiderCoordinator, opts ...client.PatchOption) error { + + mergePatch := client.MergeFrom(original) + d, err := mergePatch.Data(desired) + GinkgoWriter.Printf("the patch is: %v. \n", string(d)) + if err != nil { + return fmt.Errorf("failed to generate patch, err is %v", err) + } + + return frame.PatchResource(desired, mergePatch, opts...) +} diff --git a/test/e2e/spidercoordinator/spidercoordinator_test.go b/test/e2e/spidercoordinator/spidercoordinator_test.go new file mode 100644 index 0000000000..45e149039e --- /dev/null +++ b/test/e2e/spidercoordinator/spidercoordinator_test.go @@ -0,0 +1,445 @@ +// Copyright 2023 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 +package spidercoordinator_suite_test + +import ( + "context" + "fmt" + "reflect" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spidernet-io/spiderpool/pkg/constant" + "github.com/spidernet-io/spiderpool/pkg/coordinatormanager" + "github.com/spidernet-io/spiderpool/pkg/ip" + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/test/e2e/common" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("SpiderCoordinator", Label("spidercoordinator", "overlay"), Serial, func() { + + Context("auto mode of spidercoordinator", func() { + // This case adaptation runs in different network modes, such as macvlan, calico, and cilium. + // Prerequisite: The podCIDRType of the spidercoodinator deployed by default in the spiderpool environment is auto mode. + It("Switch podCIDRType to `auto`, see if it could auto fetch the type", Label("V00001"), func() { + + By("Get the default spidercoodinator.") + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator,error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + By("Checking podCIDRType for status.overlayPodCIDR in auto mode is as expected.") + // Loop through all of the OverlayPodCIDRs to avoid the possibility of a value mismatch. + for _, cidr := range spc.Status.OverlayPodCIDR { + if ip.IsIPv4CIDR(cidr) { + Expect(cidr).To(Equal(v4PodCIDRString)) + GinkgoWriter.Printf("ipv4 podCIDR is as expected, value %v=%v \n", cidr, v4PodCIDRString) + } else { + Expect(cidr).To(Equal(v6PodCIDRString)) + GinkgoWriter.Printf("ipv6 podCIDR is as expected, value %v=%v \n", cidr, v6PodCIDRString) + } + } + }) + }) + + Context("There is no cni file in /etc/cni/net.d.", func() { + var calicoCNIConfigName, ciliumCNIConfigName string + var newCalicoCNIConfigName, newCiliumCNIConfigName string + + BeforeEach(func() { + podList, err := frame.GetPodListByLabel(map[string]string{"app.kubernetes.io/component": constant.SpiderpoolController}) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderpoolController, error is %v", err) + + ctx, cancel := context.WithTimeout(context.Background(), common.ExecCommandTimeout) + defer cancel() + var mvCNIConfig string + if !common.CheckRunOverlayCNI() && !common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + GinkgoWriter.Println("This environment is in underlay mode.") + Skip("Not applicable to underlay mode") + } + + if common.CheckRunOverlayCNI() && common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + GinkgoWriter.Println("The environment is calico mode.") + calicoCNIConfigName = "10-calico.conflist" + newCalicoCNIConfigName = "10-calico.conflist-bak" + mvCNIConfig = fmt.Sprintf("mv /etc/cni/net.d/%s /etc/cni/net.d/%s", calicoCNIConfigName, newCalicoCNIConfigName) + } + + if common.CheckRunOverlayCNI() && common.CheckCiliumFeatureOn() && !common.CheckCalicoFeatureOn() { + GinkgoWriter.Println("The environment is cilium mode.") + ciliumCNIConfigName = "05-cilium.conflist" + newCiliumCNIConfigName = "05-cilium.conflist-bak" + mvCNIConfig = fmt.Sprintf("mv /etc/cni/net.d/%s /etc/cni/net.d/%s", ciliumCNIConfigName, newCiliumCNIConfigName) + } + for _, pod := range podList.Items { + _, err := frame.DockerExecCommand(ctx, pod.Spec.NodeName, mvCNIConfig) + Expect(err).NotTo(HaveOccurred(), "Failed to execute mv command on the node %s ; error is %v", pod.Spec.NodeName, err) + } + + DeferCleanup(func() { + if common.CheckRunOverlayCNI() && common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + GinkgoWriter.Println("The environment is calico mode.") + mvCNIConfig = fmt.Sprintf("mv /etc/cni/net.d/%s /etc/cni/net.d/%s", newCalicoCNIConfigName, calicoCNIConfigName) + } + + if common.CheckRunOverlayCNI() && common.CheckCiliumFeatureOn() && !common.CheckCalicoFeatureOn() { + GinkgoWriter.Println("The environment is cilium mode.") + mvCNIConfig = fmt.Sprintf("mv /etc/cni/net.d/%s /etc/cni/net.d/%s", newCiliumCNIConfigName, ciliumCNIConfigName) + } + + ctx, cancel := context.WithTimeout(context.Background(), common.ExecCommandTimeout) + defer cancel() + for _, pod := range podList.Items { + _, err := frame.DockerExecCommand(ctx, pod.Spec.NodeName, mvCNIConfig) + Expect(err).NotTo(HaveOccurred(), "Failed to execute mv command on the node %s ; error is %v", pod.Spec.NodeName, err) + } + + Eventually(func() bool { + By("Get the default spidercoodinator.") + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + By("After restoring the cni configuration under /etc/cni/net.d, the environment returns to normal.") + if spc.Status.OverlayPodCIDR == nil || spc.Status.Phase != coordinatormanager.Synced { + GinkgoWriter.Printf("status.overlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + for _, cidr := range spc.Status.OverlayPodCIDR { + if ip.IsIPv4CIDR(cidr) { + Expect(cidr).To(Equal(v4PodCIDRString)) + GinkgoWriter.Printf("ipv4 podCIDR is as expected, value %v=%v \n", cidr, v4PodCIDRString) + } else { + Expect(cidr).To(Equal(v6PodCIDRString)) + GinkgoWriter.Printf("ipv6 podCIDR is as expected, value %v=%v \n", cidr, v6PodCIDRString) + } + } + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + }) + + It("Switch podCIDRType to `auto` but no cni files in /etc/cni/net.d, Viewing should be consistent with `none`.", Label("V00002"), func() { + + Eventually(func() bool { + By("Get the default spidercoodinator.") + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + By("Checking status.overlayPodCIDR in automatic mode for pod CIDR type should be nil.") + if spc.Status.OverlayPodCIDR != nil { + GinkgoWriter.Printf("status.overlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + + if spc.Status.Phase != coordinatormanager.Synced { + GinkgoWriter.Printf("status.Phase is still synchronizing, status is %+v \n", spc.Status.Phase) + return false + } + + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + }) + + Context("Switch podCIDRType to `calico` or `cilium`、`none` ", func() { + var invalidPodCIDRType, validPodCIDRType, depName, namespace string + + BeforeEach(func() { + if !common.CheckRunOverlayCNI() && !common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + GinkgoWriter.Println("This environment is in underlay mode.") + Skip("Not applicable to underlay mode") + } + + if common.CheckCalicoFeatureOn() && !common.CheckCiliumFeatureOn() { + GinkgoWriter.Println("The environment is calico mode.") + invalidPodCIDRType = common.PodCIDRTypeCilium + validPodCIDRType = common.PodCIDRTypeCalico + } + + if common.CheckCiliumFeatureOn() && !common.CheckCalicoFeatureOn() { + GinkgoWriter.Println("The environment is cilium mode.") + invalidPodCIDRType = common.PodCIDRTypeCalico + validPodCIDRType = common.PodCIDRTypeCilium + } + + namespace = "ns-" + common.GenerateString(10, true) + depName = "dep-name-" + common.GenerateString(10, true) + err := frame.CreateNamespaceUntilDefaultServiceAccountReady(namespace, common.ServiceAccountReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + DeferCleanup(func() { + // The default podCIDRType for all environments is `auto` and should eventually fall back to auto mode in any case. + // Avoid failure of other use cases. + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + // Switch podCIDRType to `auto`. + spcCopy := spc.DeepCopy() + spcCopy.Spec.PodCIDRType = pointer.String(common.PodCIDRTypeAuto) + Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) + + Eventually(func() bool { + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + if spc.Status.OverlayPodCIDR == nil || spc.Status.Phase != coordinatormanager.Synced { + GinkgoWriter.Printf("status.overlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + for _, cidr := range spc.Status.OverlayPodCIDR { + if ip.IsIPv4CIDR(cidr) { + Expect(cidr).To(Equal(v4PodCIDRString)) + GinkgoWriter.Printf("ipv4 podCIDR is as expected, value %v=%v \n", cidr, v4PodCIDRString) + } else { + Expect(cidr).To(Equal(v6PodCIDRString)) + GinkgoWriter.Printf("ipv6 podCIDR is as expected, value %v=%v \n", cidr, v6PodCIDRString) + } + } + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + + GinkgoWriter.Printf("delete namespace %v. \n", namespace) + Expect(frame.DeleteNamespace(namespace)).NotTo(HaveOccurred()) + }) + }) + + // This case adaptation runs in different network modes, such as macvlan, calico, and cilium. + // Prerequisite: The podCIDRType of the spidercoodinator deployed by default in the spiderpool environment is auto mode. + It("Switch podCIDRType to `calico` or `cilium`, see if it could auto fetch the cidr from calico ippools", Label("V00003", "V00004", "V00006", "V00008"), func() { + + By("Get the default spidercoodinator.") + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + // Switch podCIDRType to `calico` or `cilium`. + // This is a failure scenario where the cluster's default CNI is calico, but the podCIDRType is set to cilium. + // Instead, when defaulting to Cilium, set podCIDRType to Calico + spcCopy := spc.DeepCopy() + spcCopy.Spec.PodCIDRType = pointer.String(invalidPodCIDRType) + Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) + Eventually(func() bool { + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + if spc.Status.Phase == coordinatormanager.Synced { + GinkgoWriter.Printf("status.Phase and OverlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + + // status.phase is not-ready, expect the cidr of status to be empty + if spc.Status.Phase == coordinatormanager.NotReady { + Expect(spc.Status.OverlayPodCIDR).Should(BeNil()) + } + + GinkgoWriter.Printf("status.Phase status is %+v \n", spc.Status.Phase) + + // Pod creation in the Not Ready state should fail. + var annotations = make(map[string]string) + annotations[common.MultusDefaultNetwork] = fmt.Sprintf("%s/%s", common.MultusNs, common.MacvlanUnderlayVlan0) + deployObject := common.GenerateExampleDeploymentYaml(depName, namespace, int32(1)) + deployObject.Spec.Template.Annotations = annotations + ctx, cancel := context.WithTimeout(context.Background(), common.PodStartTimeout) + defer cancel() + podList, err := common.CreateDeployUntilExpectedReplicas(frame, deployObject, ctx) + Expect(err).NotTo(HaveOccurred()) + ctx, cancel = context.WithTimeout(context.Background(), common.EventOccurTimeout) + defer cancel() + errLog := "spidercoordinator: default no ready" + for _, pod := range podList.Items { + err = frame.WaitExceptEventOccurred(ctx, common.OwnerPod, pod.Name, pod.Namespace, errLog) + Expect(err).To(Succeed(), "Failed to get 'spidercoordinator not ready', error is: %v", err) + } + + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + + spc, err = GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + spcCopy = spc.DeepCopy() + spcCopy.Spec.PodCIDRType = pointer.String(validPodCIDRType) + Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) + Eventually(func() bool { + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + if spc.Status.Phase != coordinatormanager.Synced { + GinkgoWriter.Printf("status.Phase status is still synchronizing, status %+v \n", spc.Status.Phase) + return false + } + if spc.Status.OverlayPodCIDR == nil { + GinkgoWriter.Printf("status.overlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + for _, cidr := range spc.Status.OverlayPodCIDR { + if ip.IsIPv4CIDR(cidr) { + Expect(cidr).To(Equal(v4PodCIDRString)) + GinkgoWriter.Printf("ipv4 podCIDR is as expected, value %v=%v \n", cidr, v4PodCIDRString) + } else { + Expect(cidr).To(Equal(v6PodCIDRString)) + GinkgoWriter.Printf("ipv6 podCIDR is as expected, value %v=%v \n", cidr, v6PodCIDRString) + } + } + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + + It("Switch podCIDRType to `none`, expect the cidr of status to be empty", Label("V00005"), func() { + + By("Get the default spidercoodinator.") + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + // Switch podCIDRType to `None`. + spcCopy := spc.DeepCopy() + spcCopy.Spec.PodCIDRType = pointer.String(common.PodCIDRTypeNone) + Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) + Eventually(func() bool { + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + if spc.Status.Phase != coordinatormanager.Synced { + GinkgoWriter.Printf("status.Phase status is still synchronizing, status %+v \n", spc.Status.Phase) + return false + } + + if spc.Status.OverlayPodCIDR != nil { + GinkgoWriter.Printf("status.overlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + + GinkgoWriter.Println("status.overlayPodCIDR is nil, as expected.") + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + }) + + Context("It can get the clusterCIDR from kubeadmConfig and kube-controller-manager pod", Label("V00009"), func() { + var spc *spiderpoolv2beta1.SpiderCoordinator + var cm *corev1.ConfigMap + var err error + BeforeEach(func() { + if !common.CheckRunOverlayCNI() { + GinkgoWriter.Println("This environment is in underlay mode.") + Skip("Not applicable to underlay mode") + } + + if !common.CheckCalicoFeatureOn() { + GinkgoWriter.Println("The CNI isn't calico.") + Skip("This case only run in calico") + } + + cm, err = frame.GetConfigmap("kubeadm-config", "kube-system") + Expect(err).NotTo(HaveOccurred()) + + spc, err = GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + // Switch podCIDRType to `cluster`. + spcCopy := spc.DeepCopy() + spcCopy.Spec.PodCIDRType = pointer.String(common.PodCIDRTypeCluster) + Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) + + DeferCleanup(func() { + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + // Switch podCIDRType to `auto`. + spcCopy := spc.DeepCopy() + spcCopy.Spec.PodCIDRType = pointer.String(common.PodCIDRTypeAuto) + Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) + + Eventually(func() bool { + spc, err := GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + GinkgoWriter.Printf("Display the default spider coordinator information: %+v \n", spc) + + if spc.Status.OverlayPodCIDR == nil || spc.Status.Phase != coordinatormanager.Synced { + GinkgoWriter.Printf("status.overlayPodCIDR status is still synchronizing, status %+v \n", spc.Status.OverlayPodCIDR) + return false + } + + for _, cidr := range spc.Status.OverlayPodCIDR { + if ip.IsIPv4CIDR(cidr) { + if cidr != v4PodCIDRString { + return false + } + GinkgoWriter.Printf("ipv4 podCIDR is as expected, value %v=%v \n", cidr, v4PodCIDRString) + } else { + if cidr != v6PodCIDRString { + return false + } + GinkgoWriter.Printf("ipv6 podCIDR is as expected, value %v=%v \n", cidr, v6PodCIDRString) + } + } + return true + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + }) + + It("Prioritize getting ClusterCIDR from kubeadm-config", func() { + GinkgoWriter.Printf("podCIDR and serviceCIDR from spidercoordinator: %v,%v\n", spc.Status.OverlayPodCIDR, spc.Status.ServiceCIDR) + + podCIDR, serviceCIDr := coordinatormanager.ExtractK8sCIDRFromKubeadmConfigMap(cm) + GinkgoWriter.Printf("podCIDR and serviceCIDR from kubeadm-config : %v,%v\n", podCIDR, serviceCIDr) + + Eventually(func() bool { + spc, err = GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + if spc.Status.Phase != coordinatormanager.Synced { + return false + } + + if reflect.DeepEqual(podCIDR, spc.Status.OverlayPodCIDR) && reflect.DeepEqual(serviceCIDr, spc.Status.ServiceCIDR) { + return true + } + + return false + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + + It("Getting clusterCIDR from kube-controller-manager Pod when kubeadm-config does not exist", func() { + // delete the kubeadm-config configMap + GinkgoWriter.Print("deleting kubeadm-config\n") + err = frame.DeleteConfigmap("kubeadm-config", "kube-system") + Expect(err).NotTo(HaveOccurred()) + + defer func() { + cm.ResourceVersion = "" + cm.Generation = 0 + err = frame.CreateConfigmap(cm) + Expect(err).NotTo(HaveOccurred()) + }() + + allPods, err := frame.GetPodList(client.MatchingLabels{"component": "kube-controller-manager"}) + Expect(err).NotTo(HaveOccurred()) + + kcmPodCIDR, kcmServiceCIDR := coordinatormanager.ExtractK8sCIDRFromKCMPod(&allPods.Items[0]) + GinkgoWriter.Printf("podCIDR and serviceCIDR from kube-controller-manager pod : %v,%v\n", kcmPodCIDR, kcmServiceCIDR) + + Eventually(func() bool { + spc, err = GetSpiderCoordinator(common.SpidercoodinatorDefaultName) + Expect(err).NotTo(HaveOccurred(), "failed to get SpiderCoordinator, error is %v", err) + + if spc.Status.Phase != coordinatormanager.Synced { + return false + } + + if reflect.DeepEqual(kcmPodCIDR, spc.Status.OverlayPodCIDR) && reflect.DeepEqual(kcmServiceCIDR, spc.Status.ServiceCIDR) { + return true + } + + return false + }, common.ExecCommandTimeout, common.ForcedWaitingTime).Should(BeTrue()) + }) + }) +}) diff --git a/test/scripts/install-default-cni.sh b/test/scripts/install-default-cni.sh index 5b66759d7c..3018a24c76 100755 --- a/test/scripts/install-default-cni.sh +++ b/test/scripts/install-default-cni.sh @@ -149,6 +149,7 @@ function install_calico() { kubectl patch ippools default-ipv4-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV4POOL_CIDR}"'"}}' --type=merge ;; ipv6) + kubectl delete ippools default-ipv4-ippool --force kubectl patch ippools default-ipv6-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV6POOL_CIDR}"'"}}' --type=merge ;; dual)