From 039b7234581c7333652bd6937f95c02efc203e5a Mon Sep 17 00:00:00 2001 From: guowei17 Date: Tue, 18 Jun 2024 14:59:01 +0800 Subject: [PATCH] Release cce-network-v2/2.10.1 --- cce-network-v2/VERSION | 2 +- cce-network-v2/docs/release.md | 9 + cce-network-v2/pkg/bce/api/cloud/error.go | 3 +- .../pkg/bce/bcesync/bcc_primary_eni.go | 5 + cce-network-v2/pkg/bce/bcesync/eni.go | 6 +- .../pkg/bce/bcesync/physical_eni.go | 5 +- .../pkg/ipam/allocator/podcidr/podcidr.go | 113 +++-- .../ipam/allocator/podcidr/podcidr_test.go | 2 +- .../pkg/ipam/allocator/podcidr/podcidr_v2.go | 324 -------------- .../ipam/allocator/podcidr/podcidr_v2_test.go | 417 ------------------ cce-network-v2/plugins/cptp/cptp.go | 22 - .../plugins/pluginmanager/plugin_manager.go | 2 +- 12 files changed, 80 insertions(+), 830 deletions(-) delete mode 100644 cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2.go delete mode 100644 cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2_test.go diff --git a/cce-network-v2/VERSION b/cce-network-v2/VERSION index 10c2c0c..8bbb6e4 100644 --- a/cce-network-v2/VERSION +++ b/cce-network-v2/VERSION @@ -1 +1 @@ -2.10.0 +2.10.1 diff --git a/cce-network-v2/docs/release.md b/cce-network-v2/docs/release.md index 3908b1a..d5ee5a3 100644 --- a/cce-network-v2/docs/release.md +++ b/cce-network-v2/docs/release.md @@ -2,6 +2,11 @@ v2 版本新架构,支持VPC-ENI 辅助IP和vpc路由。版本发布历史如下: ### 2.10 (2024/03/05) +### 2.10.1 [20240325] +1. [BUG] 修复 vpc-route 模式下,重启 operator 可能导致多个节点的 cidr 重复的问题 +2. [BUG] 修复调用 bce sdk 出错时,可能出现的stack overflow,导致operator重启的问题 +3. [Opimize] vpc-eni 增加 mac 地址合法性校验,避免误操作其它网卡 + ### 2.10.0 (2024/03/05) 1. [Feature] VPC-ENI 支持自动获取节点 eni 配额信息,去掉了自定义 ENI 配额的参数。 2. [Feature] VPC-ENI 支持 ebc 主网卡辅助 IP 模式 @@ -22,6 +27,10 @@ v2 版本新架构,支持VPC-ENI 辅助IP和vpc路由。版本发布历史如 3. 新特性: 支持ubuntu 22.04 操作系统,在容器网络环境下,定义 systemd-networkd 的 MacAddressPolicy 为 none。 4. 新特性:支持 pod 级 Qos +### 2.9.5 [20240325] +1. [BUG] 修复 vpc-route 模式下,重启 operator 可能导致多个节点的 cidr 重复的问题 +2. [BUG] 修复调用 bce sdk 出错时,可能出现的stack overflow,导致operator重启的问题 + ### 2.9.4 [20240305] 1. [Feature] 支持 BBC 实例通过 Node 上增加 `network.cce.baidubce.com/node-eni-subnet` Anotation 配置指定节点上 ENI 的子网。 diff --git a/cce-network-v2/pkg/bce/api/cloud/error.go b/cce-network-v2/pkg/bce/api/cloud/error.go index 65299be..c09acdd 100644 --- a/cce-network-v2/pkg/bce/api/cloud/error.go +++ b/cce-network-v2/pkg/bce/api/cloud/error.go @@ -119,8 +119,7 @@ func IsErrorRouteRuleRepeated(err error) bool { } func IsErrorQuotaLimitExceeded(err error) bool { - return ReasonForError(err) == ErrorReasonQuotaLimitExceeded || - IsErrorQuotaLimitExceeded(err) + return ReasonForError(err) == ErrorReasonQuotaLimitExceeded } func IsErrorCreateRouteRuleExceededQuota(err error) bool { diff --git a/cce-network-v2/pkg/bce/bcesync/bcc_primary_eni.go b/cce-network-v2/pkg/bce/bcesync/bcc_primary_eni.go index c0b6479..8219cb7 100644 --- a/cce-network-v2/pkg/bce/bcesync/bcc_primary_eni.go +++ b/cce-network-v2/pkg/bce/bcesync/bcc_primary_eni.go @@ -2,6 +2,7 @@ package bcesync import ( "context" + "errors" "fmt" enisdk "github.com/baidubce/bce-sdk-go/services/eni" @@ -82,6 +83,10 @@ func (es *remoteBCCPrimarySyncher) statENI(ctx context.Context, eniID string) (* if bcceni.EniId != k8seni.Spec.ENI.ID { continue } + + if bcceni.MacAddress == "" { + return nil, errors.New("vpc mac address is empty") + } trancelateENI := eni.Eni{ Eni: enisdk.Eni{ EniId: bcceni.EniId, diff --git a/cce-network-v2/pkg/bce/bcesync/eni.go b/cce-network-v2/pkg/bce/bcesync/eni.go index d3528a1..89e59fd 100644 --- a/cce-network-v2/pkg/bce/bcesync/eni.go +++ b/cce-network-v2/pkg/bce/bcesync/eni.go @@ -377,7 +377,11 @@ func (es *eniSyncher) refreshENI(ctx context.Context, newObj *ccev2.ENI) error { return err } - if eniCache != nil && eniCache.MacAddress != "" { + if eniCache != nil { + if eniCache.MacAddress == "" { + return errors.New("vpc mac address is empty") + } + newObj.Spec.ENI.ID = eniCache.EniId newObj.Spec.ENI.Name = eniCache.Name newObj.Spec.ENI.MacAddress = eniCache.MacAddress diff --git a/cce-network-v2/pkg/bce/bcesync/physical_eni.go b/cce-network-v2/pkg/bce/bcesync/physical_eni.go index e5cb359..a7f1c85 100644 --- a/cce-network-v2/pkg/bce/bcesync/physical_eni.go +++ b/cce-network-v2/pkg/bce/bcesync/physical_eni.go @@ -212,7 +212,10 @@ func (es *physicalENISyncer) refreshENI(ctx context.Context, newObj *ccev2.ENI) return err } - if eniCache != nil && eniCache.MacAddress != "" { + if eniCache != nil { + if eniCache.MacAddress == "" { + return errors.New("vpc mac address is empty") + } newObj.Spec.ENI.ID = eniCache.Id newObj.Spec.ENI.Name = eniCache.Name newObj.Spec.ENI.MacAddress = eniCache.MacAddress diff --git a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr.go b/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr.go index 78a6697..d8c2f6e 100644 --- a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr.go +++ b/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr.go @@ -24,17 +24,16 @@ import ( "github.com/sirupsen/logrus" k8sErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/cidr" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/controller" ipPkg "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/ip" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/ipam" - ipamOption "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/ipam/option" v2 "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/k8s/apis/cce.baidubce.com/v2" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/lock" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/logging" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/logging/logfields" - "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/option" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/revert" "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/trigger" ) @@ -162,6 +161,7 @@ var updateK8sInterval = 15 * time.Second type NodesPodCIDRManager struct { k8sReSyncController *controller.Manager k8sReSync *trigger.Trigger + nodeGetter ipam.NetResourceSetGetterUpdater // Lock protects all fields below lock.Mutex @@ -217,6 +217,7 @@ func NewNodesPodCIDRManager( nodes: map[string]*nodeCIDRs{}, netResourceSetsToK8s: map[string]*netResourceSetK8sOp{}, k8sReSyncController: controller.NewManager(), + nodeGetter: nodeGetter, } // Have a trigger so that multiple calls, within a second, to sync with k8s @@ -369,64 +370,8 @@ func (n *NodesPodCIDRManager) Create(node *v2.NetResourceSet) error { func (n *NodesPodCIDRManager) Update(node *v2.NetResourceSet) error { n.Mutex.Lock() defer n.Mutex.Unlock() - return n.update(node) -} -// Needs n.Mutex to be held. -func (n *NodesPodCIDRManager) update(node *v2.NetResourceSet) error { - var ( - updateStatus, updateSpec bool - cn *v2.NetResourceSet - err error - ) - if option.Config.IPAMMode() == ipamOption.IPAMClusterPoolV2 || option.Config.IPAMMode() == ipamOption.IPAMVpcRoute { - cn, updateSpec, updateStatus, err = n.allocateNodeV2(node) - if err != nil { - return err - } - } else { - // FIXME: This code block falls back to the old behavior of clusterpool, - // where we only assign one pod CIDR for IPv4 and IPv6. Once v2 becomes - // fully backwards compatible with v1, we can remove this else block. - var allocated bool - cn, allocated, updateStatus, err = n.allocateNode(node) - if err != nil { - return err - } - // if allocated is false it means that we were unable to allocate - // a CIDR so we need to update the status of the node into k8s. - updateStatus = !allocated && updateStatus - // ClusterPool v1 never updates both the spec and the status - updateSpec = !updateStatus - } - if cn == nil { - // no-op - return nil - } - if updateStatus { - // the n.syncNode will never fail because it's only adding elements to a - // map. - // NodesPodCIDRManager will later on sync the node into k8s by the - // controller defined, which keeps retrying to create the node in k8s - // until it succeeds. - - // If the resource version is != "" it means the object already exists - // in kubernetes so we should perform an update status instead of a create. - if cn.GetResourceVersion() != "" { - n.syncNode(k8sOpUpdateStatus, cn) - } else { - n.syncNode(k8sOpCreate, cn) - } - } - if updateSpec { - // If the resource version is != "" it means the object already exists - // in kubernetes so we should perform an update instead of a create. - if cn.GetResourceVersion() != "" { - n.syncNode(k8sOpUpdate, cn) - } else { - n.syncNode(k8sOpCreate, cn) - } - } + n.upsertLocked(node) return nil } @@ -455,20 +400,68 @@ func (n *NodesPodCIDRManager) Delete(nodeName string) error { func (n *NodesPodCIDRManager) Resync(context.Context, time.Time) { n.Mutex.Lock() if !n.canAllocatePodCIDRs { + nrsDatas, err := n.nodeGetter.Lister().List(labels.Everything()) + if err != nil { + log.WithError(err).Fatal("Failed to list NetResourceSet") + } + for _, nrs := range nrsDatas { + n.upsertLocked(nrs) + } + + log.Infof("completed to resync %d nrs cidr", len(nrsDatas)) + // We can now allocate podCIDRs n.canAllocatePodCIDRs = true // Iterate over all nodes that we have kept stored up until Resync // is called as now we are allowed to allocate podCIDRs for nodes // without any podCIDR. for _, cn := range n.nodesToAllocate { - n.update(cn) + n.upsertLocked(cn) } n.nodesToAllocate = nil + log.Infof("completed to allocate new %d nodes cidr", len(n.nodesToAllocate)) } n.Mutex.Unlock() n.k8sReSync.Trigger() } +// Needs n.Mutex to be held. +func (n *NodesPodCIDRManager) upsertLocked(node *v2.NetResourceSet) { + cn, allocated, updateStatus, err := n.allocateNode(node) + if err != nil { + return + } + if cn == nil { + // no-op + return + } + // if allocated is false it means that we were unable to allocate + // a CIDR so we need to update the status of the node into k8s. + if !allocated && updateStatus { + // the n.syncNode will never fail because it's only adding elements to a + // map. + // NodesPodCIDRManager will later on sync the node into k8s by the + // controller defined, which keeps retrying to create the node in k8s + // until it succeeds. + + // If the resource version is != "" it means the object already exists + // in kubernetes so we should perform an update status instead of a create. + if cn.GetResourceVersion() != "" { + n.syncNode(k8sOpUpdateStatus, cn) + } else { + n.syncNode(k8sOpCreate, cn) + } + return + } + // If the resource version is != "" it means the object already exists + // in kubernetes so we should perform an update instead of a create. + if cn.GetResourceVersion() != "" { + n.syncNode(k8sOpUpdate, cn) + } else { + n.syncNode(k8sOpCreate, cn) + } +} + // AllocateNode allocates the podCIDRs for the given node. Returns a DeepCopied // node with the podCIDRs allocated. In case there weren't CIDRs allocated // the returned node will be nil. diff --git a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_test.go b/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_test.go index 2879477..b57d367 100644 --- a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_test.go +++ b/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_test.go @@ -71,7 +71,7 @@ func mustNewTrigger(f func(), minInterval time.Duration) *trigger.Trigger { return t } -var defaultIPAMModes = []string{ipamOption.IPAMClusterPool, ipamOption.IPAMClusterPoolV2} +var defaultIPAMModes = []string{ipamOption.IPAMClusterPool} func runWithIPAMModes(ipamModes []string, testFunc func(mode string)) { oldIPAMMode := option.Config.IPAM diff --git a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2.go b/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2.go deleted file mode 100644 index 158638c..0000000 --- a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2.go +++ /dev/null @@ -1,324 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2021 Authors of CCE - -package podcidr - -import ( - "fmt" - "net" - "sort" - - "github.com/sirupsen/logrus" - "go.uber.org/multierr" - - "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/cidr" - ipPkg "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/ip" - "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/ipam/types" - v2 "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/k8s/apis/cce.baidubce.com/v2" - "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/logging/logfields" -) - -type specPodCIDRs []*net.IPNet - -func (s specPodCIDRs) Contains(other *net.IPNet) bool { - for _, ipNet := range s { - if cidr.Equal(ipNet, other) { - return true - } - } - return false -} - -type podCIDRStatus struct { - ipNet *net.IPNet - status types.PodCIDRStatus -} - -type statusPodCIDRs []podCIDRStatus - -func (s statusPodCIDRs) Contains(other *net.IPNet) bool { - for _, c := range s { - if cidr.Equal(c.ipNet, other) { - return true - } - } - return false -} - -func (s statusPodCIDRs) Sort() { - sort.SliceStable(s, func(i, j int) bool { - return s[i].ipNet.String() < s[j].ipNet.String() - }) -} - -type nodeAction struct { - // allocateNext is set to true to indicate that a new pod CIDR should be - // allocated and added to this node's podCIDR list - allocateNext bool - // release contains a list of CIDRs which can be deallocated and removed - // from this node's podCIDR list - release []*net.IPNet - // reuse contains a list of CIDRs which we want mark as occupied and keep - // in this node's podCIDR list. This list is guaranteed to have the same - // pod CIDRs order as node.Spec.IPAM.PodCIDRs. - reuse []*net.IPNet - // needsResync is set to true if the internal allocator state is not - // reflected in the NetResourceSet CRD and therefore needs to be resynced. - needsResync bool -} - -func (a *nodeAction) performNodeAction( - allocators []CIDRAllocator, - allocType allocatorType, - allocatedCIDRs []*net.IPNet, -) (result []*net.IPNet, changed bool, errs error) { - result = append([]*net.IPNet(nil), allocatedCIDRs...) - - if len(a.reuse) > 0 { - _, err := allocateIPNet(allocType, allocators, a.reuse) - if err != nil { - errs = multierr.Append(errs, err) - } else { - result = append(result, a.reuse...) - changed = true - } - } - - if len(a.release) > 0 { - releaseCIDRs(allocators, a.release) - result = cidr.RemoveAll(result, a.release) - changed = true - } - - if a.allocateNext { - _, cidr, err := allocateFirstFreeCIDR(allocators) - if err != nil { - errs = multierr.Append(errs, err) - } else { - result = append(result, cidr) - changed = true - } - } - - return result, changed, errs -} - -func buildNodeAction( - spec specPodCIDRs, - status statusPodCIDRs, - allocatedCIDRs []*net.IPNet, - hasAllocators bool, -) (action nodeAction) { - // Keeps track of any CIDRs we do not want to reuse, i.e. any CIDRs which - // are either already allocated, marked for released, or already released - noReuseCIDRs := map[string]struct{}{} - for _, podCIDR := range allocatedCIDRs { - noReuseCIDRs[podCIDR.String()] = struct{}{} - } - - // Check if node has any in in-use or released pod CIDRs - hasAvailablePodCIDR := false - for _, statusCIDR := range status { - podCIDR := statusCIDR.ipNet - switch statusCIDR.status { - case types.PodCIDRStatusReleased: - // Never reuse CIDRs marked for release - noReuseCIDRs[podCIDR.String()] = struct{}{} - // Only actually release the CIDRs which have been allocated to this node - if cidr.Contains(allocatedCIDRs, podCIDR) { - action.release = append(action.release, podCIDR) - } - case types.PodCIDRStatusDepleted: - // If the node only contains depleted and released CIDRs, the next - // case ("in-use") will never be hit and we will allocate a new - // CIDR for this node. - case types.PodCIDRStatusInUse: - hasAvailablePodCIDR = true - } - } - - // If we find an unused CIDR, i.e. one that is present in .Spec, but absent - // in .Status, we do not have to allocate a new CIDR for this node. - for _, specCIDR := range spec { - if status.Contains(specCIDR) { - continue - } - hasAvailablePodCIDR = true - } - - // Only allocate if a node has no available pod CIDRs in either .Spec or .Status - action.allocateNext = hasAllocators && !hasAvailablePodCIDR - - // If there are any existing pod CIDRs in either .Spec or .Status which - // have neither been allocated to the node yet nor are marked for release, - // we want to reuse them, meaning marking them as allocated such that - // they are not accidentally handed out to any other node. We add each - // reused pod CIDR to noReuseCIDRs to avoid duplicates. - // - // Note: We iterate over spec and then status to preserve the order - // in which the CIDRs are listed in the NetResourceSet CRD. - for _, podCIDR := range spec { - podCIDRStr := podCIDR.String() - if _, ok := noReuseCIDRs[podCIDRStr]; !ok { - action.reuse = append(action.reuse, podCIDR) - noReuseCIDRs[podCIDRStr] = struct{}{} - } - } - for _, podCIDR := range status { - podCIDRStr := podCIDR.ipNet.String() - if _, ok := noReuseCIDRs[podCIDRStr]; !ok { - action.reuse = append(action.reuse, podCIDR.ipNet) - noReuseCIDRs[podCIDRStr] = struct{}{} - } - } - - // If we find any allocated pod CIDRs which are absent in the - // NetResourceSet CRD, we want to resync the CRD to ensure they get added back in. - for _, podCIDR := range allocatedCIDRs { - if !spec.Contains(podCIDR) && !cidr.Contains(action.release, podCIDR) { - action.needsResync = true - break - } - } - - return action -} - -// updateNode is set to true if the NetResourceSet CRD needs to be updated -// based on the determined node actions -func determineNodeActions(node *v2.NetResourceSet, hasV4Allocators, hasV6Allocators bool, v4PodCIDRs, v6PodCIDRs []*net.IPNet) (v4Action, v6Action nodeAction, err error) { - v4PodCIDRSpec, v6PodCIDRSpec, v4PodCIDRStatus, v6PodCIDRStatus, err := extractPodCIDRs(node) - if err != nil { - return v4Action, v6Action, err - } - - v4Action = buildNodeAction(v4PodCIDRSpec, v4PodCIDRStatus, v4PodCIDRs, hasV4Allocators) - v6Action = buildNodeAction(v6PodCIDRSpec, v6PodCIDRStatus, v6PodCIDRs, hasV6Allocators) - - return v4Action, v6Action, nil -} - -func extractPodCIDRs(node *v2.NetResourceSet) ( - v4PodCIDRSpec, v6PodCIDRSpec specPodCIDRs, - v4PodCIDRStatus, v6PodCIDRStatus statusPodCIDRs, - err error, -) { - for _, podCIDRStr := range node.Spec.IPAM.PodCIDRs { - _, podCIDR, err := net.ParseCIDR(podCIDRStr) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("invalid pod CIDR in .Spec.IPAM.PodCIDRs: %w", err) - } - - if ipPkg.IsIPv4(podCIDR.IP) { - v4PodCIDRSpec = append(v4PodCIDRSpec, podCIDR) - } else { - v6PodCIDRSpec = append(v6PodCIDRSpec, podCIDR) - } - } - - for podCIDRStr, s := range node.Status.IPAM.PodCIDRs { - _, podCIDR, err := net.ParseCIDR(podCIDRStr) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("invalid pod CIDR in .Status.IPAM.PodCIDRs: %w", err) - } - - status := podCIDRStatus{ - ipNet: podCIDR, - status: s.Status, - } - - if ipPkg.IsIPv4(podCIDR.IP) { - v4PodCIDRStatus = append(v4PodCIDRStatus, status) - } else { - v6PodCIDRStatus = append(v6PodCIDRStatus, status) - } - } - - // The iteration order of Golang maps is random. Sort status CIDRs to ensure - // deterministic behavior - v4PodCIDRStatus.Sort() - v6PodCIDRStatus.Sort() - - return v4PodCIDRSpec, v6PodCIDRSpec, v4PodCIDRStatus, v6PodCIDRStatus, nil -} - -func (n *NodesPodCIDRManager) allocateNodeV2(node *v2.NetResourceSet) (cn *v2.NetResourceSet, updateSpec, updateStatus bool, err error) { - log = log.WithFields(logrus.Fields{ - "node-name": node.Name, - }) - - // list of pod CIDRs already allocated to this node - allocated, ok := n.nodes[node.Name] - if !ok { - allocated = &nodeCIDRs{} - } - - // determines the allocation actions to be performed on this node - hasV4Allocators := len(n.v4CIDRAllocators) != 0 - hasV6Allocators := len(n.v6CIDRAllocators) != 0 - v4Action, v6Action, err := determineNodeActions(node, hasV4Allocators, hasV6Allocators, allocated.v4PodCIDRs, allocated.v6PodCIDRs) - if err != nil { - cn = node.DeepCopy() - cn.Status.IPAM.OperatorStatus.Error = err.Error() - return cn, false, true, nil - } - - // cannot allocate until we have received all existing node objects - postponeAllocation := (v4Action.allocateNext || v6Action.allocateNext) && !n.canAllocatePodCIDRs - if postponeAllocation { - v4Action.allocateNext = false - v6Action.allocateNext = false - } - - v4PodCIDRs, v4Changed, v4Errors := v4Action.performNodeAction(n.v4CIDRAllocators, v4AllocatorType, allocated.v4PodCIDRs) - v6PodCIDRs, v6Changed, v6Errors := v6Action.performNodeAction(n.v6CIDRAllocators, v6AllocatorType, allocated.v6PodCIDRs) - err = multierr.Combine(v4Errors, v6Errors) - - updateStatus = err != nil - updateSpec = (v4Changed || v4Action.needsResync) || (v6Changed || v6Action.needsResync) - - log.WithFields(logrus.Fields{ - "v4-pod-cidrs": logfields.Repr(v4PodCIDRs), - "v6-pod-cidrs": logfields.Repr(v6PodCIDRs), - "v4-changed": v4Changed, - "v6-changed": v6Changed, - "update-spec": updateSpec, - "update-status": updateStatus, - "error": err, - "postpone-allocation": postponeAllocation, - }).Debug("Performed node actions") - - if !(postponeAllocation || updateSpec || updateStatus) { - return nil, false, false, nil // no-op - } - - cn = node.DeepCopy() - if updateSpec { - n.nodes[node.Name] = &nodeCIDRs{ - v4PodCIDRs: v4PodCIDRs, - v6PodCIDRs: v6PodCIDRs, - } - - cn.Spec.IPAM.PodCIDRs = make([]string, 0, len(v4PodCIDRs)+len(v6PodCIDRs)) - for _, v4CIDR := range v4PodCIDRs { - cn.Spec.IPAM.PodCIDRs = append(cn.Spec.IPAM.PodCIDRs, v4CIDR.String()) - } - for _, v6CIDR := range v6PodCIDRs { - cn.Spec.IPAM.PodCIDRs = append(cn.Spec.IPAM.PodCIDRs, v6CIDR.String()) - } - } - - // Clear any previous errors - cn.Status.IPAM.OperatorStatus.Error = "" - if err != nil { - cn.Status.IPAM.OperatorStatus.Error = err.Error() - } - - // queue this node for new CIDR allocation once we've reused all other CIDRs - if postponeAllocation { - log.Debug("Postponing new CIDR allocation") - n.nodesToAllocate[node.Name] = cn - } - - return cn, updateSpec, updateStatus, nil -} diff --git a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2_test.go b/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2_test.go deleted file mode 100644 index 71c4284..0000000 --- a/cce-network-v2/pkg/ipam/allocator/podcidr/podcidr_v2_test.go +++ /dev/null @@ -1,417 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2021 Authors of CCE - -//go:build !privileged_tests - -package podcidr - -import ( - "net" - - . "gopkg.in/check.v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/checker" - ipamTypes "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/ipam/types" - v2 "github.com/baidubce/baiducloud-cce-cni-driver/cce-network-v2/pkg/k8s/apis/cce.baidubce.com/v2" -) - -func (s *PodCIDRSuite) TestNodesPodCIDRManager_allocateNodeV2(c *C) { - type fields struct { - v4ClusterCIDRs []CIDRAllocator - v6ClusterCIDRs []CIDRAllocator - nodes map[string]*nodeCIDRs - canAllocatePodCIDRs bool - nodesToAllocate map[string]*v2.NetResourceSet - } - type args struct { - node *v2.NetResourceSet - } - tests := []struct { - testSetup func() *fields - testPostRun func(fields *fields) - name string - fields *fields - args args - wantNetResourceSet *v2.NetResourceSet - wantUpdateSpec bool - wantUpdateStatus bool - wantErr error - }{ - { - name: "test occupy, release, and allocate v4", - testSetup: func() *fields { - return &fields{ - v4ClusterCIDRs: []CIDRAllocator{ - &mockCIDRAllocator{ - OnIsFull: func() bool { - return false - }, - OnAllocateNext: func() (*net.IPNet, error) { - return mustNewCIDRs("10.10.3.0/24")[0], nil - }, - OnOccupy: func(cidr *net.IPNet) error { - c.Assert(cidr.String(), checker.DeepEquals, "10.10.2.0/24") - return nil - }, - OnIsAllocated: func(_ *net.IPNet) (bool, error) { - return false, nil - }, - OnInRange: func(_ *net.IPNet) bool { - return true - }, - OnRelease: func(cidr *net.IPNet) error { - c.Assert(cidr.String(), checker.DeepEquals, "10.10.1.0/24") - return nil - }, - }, - }, - nodes: map[string]*nodeCIDRs{ - "node-1": { - v4PodCIDRs: mustNewCIDRs("10.10.1.0/24"), - }, - }, - nodesToAllocate: map[string]*v2.NetResourceSet{}, - canAllocatePodCIDRs: true, - } - }, - testPostRun: func(fields *fields) { - c.Assert(fields.nodes, checker.DeepEquals, map[string]*nodeCIDRs{ - "node-1": { - v4PodCIDRs: mustNewCIDRs("10.10.2.0/24", "10.10.3.0/24"), - }, - }) - }, - args: args{ - node: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{ - "10.10.1.0/24", - "10.10.2.0/24", - }, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "10.10.1.0/24": {Status: ipamTypes.PodCIDRStatusReleased}, - "10.10.2.0/24": {Status: ipamTypes.PodCIDRStatusDepleted}, - }, - }, - }, - }, - }, - wantNetResourceSet: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{ - "10.10.2.0/24", - "10.10.3.0/24", - }, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "10.10.1.0/24": {Status: ipamTypes.PodCIDRStatusReleased}, - "10.10.2.0/24": {Status: ipamTypes.PodCIDRStatusDepleted}, - }, - }, - }, - }, - wantUpdateStatus: false, - wantUpdateSpec: true, - wantErr: nil, - }, - { - name: "test allocate v4 and occupy v6 from status", - testSetup: func() *fields { - return &fields{ - v4ClusterCIDRs: []CIDRAllocator{ - &mockCIDRAllocator{ - OnIsFull: func() bool { - return false - }, - OnAllocateNext: func() (*net.IPNet, error) { - return mustNewCIDRs("10.10.1.0/24")[0], nil - }, - }, - }, - v6ClusterCIDRs: []CIDRAllocator{ - &mockCIDRAllocator{ - OnOccupy: func(_ *net.IPNet) error { - return nil - }, - OnIsAllocated: func(_ *net.IPNet) (bool, error) { - return false, nil - }, - OnIsFull: func() bool { - return false - }, - OnInRange: func(cidr *net.IPNet) bool { - return true - }, - }, - }, - nodes: map[string]*nodeCIDRs{}, - nodesToAllocate: map[string]*v2.NetResourceSet{}, - canAllocatePodCIDRs: true, - } - }, - testPostRun: func(fields *fields) { - c.Assert(fields.nodes, checker.DeepEquals, map[string]*nodeCIDRs{ - "node-1": { - v4PodCIDRs: mustNewCIDRs("10.10.1.0/24"), - v6PodCIDRs: mustNewCIDRs("fd00::/80", "fd01::/80"), - }, - }) - }, - args: args{ - node: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{}, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "fd00::/80": {Status: ipamTypes.PodCIDRStatusInUse}, - "fd01::/80": {Status: ipamTypes.PodCIDRStatusDepleted}, - }, - }, - }, - }, - }, - wantNetResourceSet: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{ - "10.10.1.0/24", - "fd00::/80", "fd01::/80", - }, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "fd00::/80": {Status: ipamTypes.PodCIDRStatusInUse}, - "fd01::/80": {Status: ipamTypes.PodCIDRStatusDepleted}, - }, - }, - }, - }, - wantUpdateStatus: false, - wantUpdateSpec: true, - wantErr: nil, - }, - { - name: "test occupy depleted but delay allocation v4", - testSetup: func() *fields { - return &fields{ - canAllocatePodCIDRs: false, - v4ClusterCIDRs: []CIDRAllocator{ - &mockCIDRAllocator{ - OnOccupy: func(_ *net.IPNet) error { - return nil - }, - OnIsAllocated: func(_ *net.IPNet) (bool, error) { - return false, nil - }, - OnIsFull: func() bool { - return false - }, - OnInRange: func(cidr *net.IPNet) bool { - return true - }, - }, - }, - nodes: map[string]*nodeCIDRs{}, - nodesToAllocate: map[string]*v2.NetResourceSet{}, - } - }, - testPostRun: func(fields *fields) { - c.Assert(fields.nodes, checker.DeepEquals, map[string]*nodeCIDRs{ - "node-1": { - v4PodCIDRs: mustNewCIDRs("10.10.1.0/24"), - }, - }) - c.Assert(fields.nodesToAllocate, checker.DeepEquals, map[string]*v2.NetResourceSet{ - "node-1": { - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{"10.10.1.0/24"}, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "10.10.1.0/24": {Status: ipamTypes.PodCIDRStatusDepleted}, - "10.10.2.0/24": {Status: ipamTypes.PodCIDRStatusReleased}, - }, - }, - }, - }, - }) - }, - args: args{ - node: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{"10.10.1.0/24"}, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "10.10.1.0/24": {Status: ipamTypes.PodCIDRStatusDepleted}, - "10.10.2.0/24": {Status: ipamTypes.PodCIDRStatusReleased}, - }, - }, - }, - }, - }, - wantNetResourceSet: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{"10.10.1.0/24"}, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "10.10.1.0/24": {Status: ipamTypes.PodCIDRStatusDepleted}, - "10.10.2.0/24": {Status: ipamTypes.PodCIDRStatusReleased}, - }, - }, - }, - }, - wantUpdateStatus: false, - wantUpdateSpec: true, - wantErr: nil, - }, - { - name: "test allocate and occupy v4 errors, but allocate and occupy v6 succeeds", - testSetup: func() *fields { - return &fields{ - v4ClusterCIDRs: []CIDRAllocator{ - &mockCIDRAllocator{ - OnIsFull: func() bool { - return true - }, - OnAllocateNext: func() (*net.IPNet, error) { - return nil, &ErrAllocatorFull{} - }, - OnInRange: func(_ *net.IPNet) bool { - return true - }, - }, - }, - v6ClusterCIDRs: []CIDRAllocator{ - &mockCIDRAllocator{ - OnIsFull: func() bool { - return false - }, - OnAllocateNext: func() (*net.IPNet, error) { - return mustNewCIDRs("fd01::/80")[0], nil - }, - OnOccupy: func(cidr *net.IPNet) error { - c.Assert(cidr.String(), checker.DeepEquals, "fd00::/80") - return nil - }, - OnIsAllocated: func(_ *net.IPNet) (bool, error) { - return false, nil - }, - OnInRange: func(_ *net.IPNet) bool { - return true - }, - }, - }, - nodes: map[string]*nodeCIDRs{}, - nodesToAllocate: map[string]*v2.NetResourceSet{}, - canAllocatePodCIDRs: true, - } - }, - testPostRun: func(fields *fields) { - c.Assert(fields.nodes, checker.DeepEquals, map[string]*nodeCIDRs{ - "node-1": { - v6PodCIDRs: mustNewCIDRs("fd00::/80", "fd01::/80"), - }, - }) - }, - args: args{ - node: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "fd00::/80": ipamTypes.PodCIDRMapEntry{ - Status: ipamTypes.PodCIDRStatusDepleted, - }, - "10.10.0.0/24": ipamTypes.PodCIDRMapEntry{ - Status: ipamTypes.PodCIDRStatusDepleted, - }, - }, - }, - }, - }, - }, - wantNetResourceSet: &v2.NetResourceSet{ - ObjectMeta: metav1.ObjectMeta{Name: "node-1"}, - Spec: v2.NetResourceSpec{ - IPAM: ipamTypes.IPAMSpec{ - PodCIDRs: []string{ - "fd00::/80", "fd01::/80", - }, - }, - }, - Status: v2.NetResourceStatus{ - IPAM: ipamTypes.IPAMStatus{ - PodCIDRs: ipamTypes.PodCIDRMap{ - "fd00::/80": ipamTypes.PodCIDRMapEntry{ - Status: ipamTypes.PodCIDRStatusDepleted, - }, - "10.10.0.0/24": ipamTypes.PodCIDRMapEntry{ - Status: ipamTypes.PodCIDRStatusDepleted, - }, - }, - OperatorStatus: ipamTypes.OperatorStatus{ - Error: "allocator clusterCIDR: 10.0.0.0/24, nodeMask: 24 full; allocator full", - }, - }, - }, - }, - wantUpdateStatus: true, - wantUpdateSpec: true, - wantErr: nil, - }, - } - - for _, tt := range tests { - tt.fields = tt.testSetup() - n := &NodesPodCIDRManager{ - v4CIDRAllocators: tt.fields.v4ClusterCIDRs, - v6CIDRAllocators: tt.fields.v6ClusterCIDRs, - nodes: tt.fields.nodes, - nodesToAllocate: tt.fields.nodesToAllocate, - canAllocatePodCIDRs: tt.fields.canAllocatePodCIDRs, - } - cn, updateSpec, updateStatus, err := n.allocateNodeV2(tt.args.node) - c.Assert(err, checker.DeepEquals, tt.wantErr, Commentf("Test Name: %s", tt.name)) - c.Assert(updateSpec, checker.DeepEquals, tt.wantUpdateSpec, Commentf("Test Name: %s", tt.name)) - c.Assert(updateStatus, checker.DeepEquals, tt.wantUpdateStatus, Commentf("Test Name: %s", tt.name)) - c.Assert(cn, checker.DeepEquals, tt.wantNetResourceSet, Commentf("Test Name: %s", tt.name)) - - if tt.testPostRun != nil { - tt.testPostRun(tt.fields) - } - } -} diff --git a/cce-network-v2/plugins/cptp/cptp.go b/cce-network-v2/plugins/cptp/cptp.go index abd9f73..fc49230 100644 --- a/cce-network-v2/plugins/cptp/cptp.go +++ b/cce-network-v2/plugins/cptp/cptp.go @@ -45,28 +45,6 @@ func containerSet(hostVeth, contVeth *net.Interface, pr *current.Result) error { return err } -// hostSet sets up the host interface -// this method is called at host network namespace -func hostSet(hostVeth, contVeth *net.Interface, pr *current.Result) error { - var err error - for _, ipc := range pr.IPs { - // Add a permanent ARP entry for the gateway - err = netlink.NeighAdd(&netlink.Neigh{ - LinkIndex: hostVeth.Index, - State: netlink.NUD_PERMANENT, - IP: ipc.Address.IP, - HardwareAddr: func() net.HardwareAddr { - return contVeth.HardwareAddr - }(), - }) - if err != nil { - return fmt.Errorf("failed to add permanent ARP entry for the gateway %q: %v", ipc.Gateway, err) - } - } - - return err -} - // AddLinkRoute adds a link-scoped route to a device. func AddLinkRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error { return netlink.RouteAdd(&netlink.Route{ diff --git a/cce-network-v2/plugins/pluginmanager/plugin_manager.go b/cce-network-v2/plugins/pluginmanager/plugin_manager.go index b76d4d3..ca7b495 100644 --- a/cce-network-v2/plugins/pluginmanager/plugin_manager.go +++ b/cce-network-v2/plugins/pluginmanager/plugin_manager.go @@ -131,7 +131,7 @@ func defaultCNIPlugin() *CniListConfig { } // primary eni plugin - if option.Config.ENI.UseMode == string(ccev2.ENIUseModePrimaryIP) { + if option.Config.ENI != nil && option.Config.ENI.UseMode == string(ccev2.ENIUseModePrimaryIP) { result.Plugins = append(result.Plugins, ccePlugins[pluginNameExclusiveDevice]) } else { // use cptp plugin defalt