From 1995b3df01d34bc209b1eb255bedd27ea3f14716 Mon Sep 17 00:00:00 2001 From: Simon Tien Date: Fri, 24 Jan 2025 14:21:22 +1100 Subject: [PATCH] feat: add hub election leader controller fix: update configuration param --- .../crds/apps.openyurt.io_nodepools.yaml | 6 + .../yurt-manager-auto-generated.yaml | 34 + .../app/options/hubleadercontroller.go | 69 ++ cmd/yurt-manager/app/options/options.go | 12 +- cmd/yurt-manager/names/controller_names.go | 1 + pkg/apis/apps/v1alpha1/nodepool_conversion.go | 1 + pkg/apis/apps/v1beta1/nodepool_conversion.go | 1 + pkg/apis/apps/v1beta2/default.go | 3 + pkg/apis/apps/v1beta2/nodepool_types.go | 5 + .../controller/apis/config/types.go | 4 + .../controller/hubleader/config/types.go | 22 + .../hubleader/hubleader_controller.go | 297 ++++++++ .../hubleader/hubleader_controller_test.go | 690 ++++++++++++++++++ .../nodepool/nodepool_controller.go | 3 +- .../nodepool/nodepool_enqueue_handlers.go | 6 +- pkg/yurtmanager/controller/nodepool/util.go | 8 - .../controller/nodepool/util_test.go | 59 -- .../controller/util/node/controller_utils.go | 102 ++- .../util/node/controller_utils_test.go | 82 +++ .../nodepool/v1beta2/nodepool_default.go | 7 +- .../nodepool/v1beta2/nodepool_default_test.go | 132 +++- .../nodepool/v1beta2/nodepool_handler.go | 2 +- .../nodepool/v1beta2/nodepool_validation.go | 2 +- .../v1beta2/nodepool_validation_test.go | 2 +- 24 files changed, 1460 insertions(+), 90 deletions(-) create mode 100644 cmd/yurt-manager/app/options/hubleadercontroller.go create mode 100644 pkg/yurtmanager/controller/hubleader/config/types.go create mode 100644 pkg/yurtmanager/controller/hubleader/hubleader_controller.go create mode 100644 pkg/yurtmanager/controller/hubleader/hubleader_controller_test.go create mode 100644 pkg/yurtmanager/controller/util/node/controller_utils_test.go diff --git a/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml b/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml index 628c3b73cb1..1139efd4461 100644 --- a/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml +++ b/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml @@ -373,6 +373,12 @@ spec: LeaderNodeLabelSelector is used only when LeaderElectionStrategy is mark. leader Yurhub will be elected from nodes that filtered by this label selector. type: object + leaderReplicas: + description: |- + LeaderReplicas is used for specifying the number of leader replicas in the nodepool. + If the field is not specified, the default value is 1. + format: int32 + type: integer poolScopeMetadata: description: |- PoolScopeMetadata is used for specifying resources which will be shared in the nodepool. diff --git a/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml b/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml index bb0f42ee84f..3295ac27366 100644 --- a/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml +++ b/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml @@ -50,6 +50,12 @@ metadata: --- apiVersion: v1 kind: ServiceAccount +metadata: + name: yurt-manager-hubleader-controller + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount metadata: name: yurt-manager-load-balancer-set-controller namespace: {{ .Release.Namespace }} @@ -471,6 +477,21 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole +metadata: + name: yurt-manager-hubleader-controller +rules: +- apiGroups: + - apps.openyurt.io + resources: + - nodepool + - nodepool/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole metadata: name: yurt-manager-load-balancer-set-controller rules: @@ -1035,6 +1056,19 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + name: yurt-manager-hubleader-controller-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: yurt-manager-hubleader-controller +subjects: +- kind: ServiceAccount + name: yurt-manager-hubleader-controller + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: name: yurt-manager-load-balancer-set-controller-binding roleRef: diff --git a/cmd/yurt-manager/app/options/hubleadercontroller.go b/cmd/yurt-manager/app/options/hubleadercontroller.go new file mode 100644 index 00000000000..db6e45fa14b --- /dev/null +++ b/cmd/yurt-manager/app/options/hubleadercontroller.go @@ -0,0 +1,69 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "github.com/spf13/pflag" + + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader/config" +) + +type HubLeaderControllerOptions struct { + *config.HubLeaderControllerConfiguration +} + +func NewHubLeaderControllerOptions() *HubLeaderControllerOptions { + return &HubLeaderControllerOptions{ + &config.HubLeaderControllerConfiguration{ + ConcurrentHubLeaderWorkers: 3, + }, + } +} + +// AddFlags adds flags related to hubleader for yurt-manager to the specified FlagSet. +func (h *HubLeaderControllerOptions) AddFlags(fs *pflag.FlagSet) { + if h == nil { + return + } + + fs.Int32Var( + &h.ConcurrentHubLeaderWorkers, + "concurrent-hubleader-workers", + h.ConcurrentHubLeaderWorkers, + "The number of nodepool objects that are allowed to reconcile concurrently.", + ) +} + +// ApplyTo fills up hubleader config with options. +func (h *HubLeaderControllerOptions) ApplyTo(cfg *config.HubLeaderControllerConfiguration) error { + if h == nil { + return nil + } + + cfg.ConcurrentHubLeaderWorkers = h.ConcurrentHubLeaderWorkers + + return nil +} + +// Validate checks validation of HubLeaderControllerOptions. +func (h *HubLeaderControllerOptions) Validate() []error { + if h == nil { + return nil + } + errs := []error{} + return errs +} diff --git a/cmd/yurt-manager/app/options/options.go b/cmd/yurt-manager/app/options/options.go index b7306677536..6a6058f5898 100644 --- a/cmd/yurt-manager/app/options/options.go +++ b/cmd/yurt-manager/app/options/options.go @@ -46,6 +46,7 @@ type YurtManagerOptions struct { GatewayDNSController *GatewayDNSControllerOptions GatewayInternalSvcController *GatewayInternalSvcControllerOptions GatewayPublicSvcController *GatewayPublicSvcControllerOptions + HubLeaderController *HubLeaderControllerOptions } // NewYurtManagerOptions creates a new YurtManagerOptions with a default config. @@ -73,6 +74,7 @@ func NewYurtManagerOptions() (*YurtManagerOptions, error) { GatewayDNSController: NewGatewayDNSControllerOptions(), GatewayInternalSvcController: NewGatewayInternalSvcControllerOptions(), GatewayPublicSvcController: NewGatewayPublicSvcControllerOptions(), + HubLeaderController: NewHubLeaderControllerOptions(), } return &s, nil @@ -101,6 +103,7 @@ func (y *YurtManagerOptions) Flags(allControllers, disabledByDefaultControllers y.GatewayDNSController.AddFlags(fss.FlagSet("gatewaydns controller")) y.GatewayInternalSvcController.AddFlags(fss.FlagSet("gatewayinternalsvc controller")) y.GatewayPublicSvcController.AddFlags(fss.FlagSet("gatewaypublicsvc controller")) + y.HubLeaderController.AddFlags(fss.FlagSet("hubleader controller")) return fss } @@ -128,6 +131,7 @@ func (y *YurtManagerOptions) Validate(allControllers []string, controllerAliases errs = append(errs, y.GatewayDNSController.Validate()...) errs = append(errs, y.GatewayInternalSvcController.Validate()...) errs = append(errs, y.GatewayPublicSvcController.Validate()...) + errs = append(errs, y.HubLeaderController.Validate()...) return utilerrors.NewAggregate(errs) } @@ -196,11 +200,17 @@ func (y *YurtManagerOptions) ApplyTo(c *config.Config, controllerAliases map[str if err := y.GatewayPublicSvcController.ApplyTo(&c.ComponentConfig.GatewayPublicSvcController); err != nil { return err } + if err := y.HubLeaderController.ApplyTo(&c.ComponentConfig.HubLeaderController); err != nil { + return err + } return nil } // Config return a yurt-manager config objective -func (y *YurtManagerOptions) Config(allControllers []string, controllerAliases map[string]string) (*config.Config, error) { +func (y *YurtManagerOptions) Config( + allControllers []string, + controllerAliases map[string]string, +) (*config.Config, error) { if err := y.Validate(allControllers, controllerAliases); err != nil { return nil, err } diff --git a/cmd/yurt-manager/names/controller_names.go b/cmd/yurt-manager/names/controller_names.go index 4001aa0cee1..be8b5281113 100644 --- a/cmd/yurt-manager/names/controller_names.go +++ b/cmd/yurt-manager/names/controller_names.go @@ -37,6 +37,7 @@ const ( NodeLifeCycleController = "node-life-cycle-controller" NodeBucketController = "node-bucket-controller" LoadBalancerSetController = "load-balancer-set-controller" + HubLeaderController = "hubleader-controller" ) func YurtManagerControllerAliases() map[string]string { diff --git a/pkg/apis/apps/v1alpha1/nodepool_conversion.go b/pkg/apis/apps/v1alpha1/nodepool_conversion.go index 72cc66b3e8d..4e6342d4cf7 100644 --- a/pkg/apis/apps/v1alpha1/nodepool_conversion.go +++ b/pkg/apis/apps/v1alpha1/nodepool_conversion.go @@ -46,6 +46,7 @@ func (src *NodePool) ConvertTo(dstRaw conversion.Hub) error { // Set interconnectivity to false which will not use leader election strategy or reuse list/watch events dst.Spec.InterConnectivity = false dst.Spec.LeaderElectionStrategy = string(v1beta2.ElectionStrategyRandom) + dst.Spec.LeaderReplicas = 1 klog.V(4).Infof("convert from v1alpha1 to v1beta1 for nodepool %s", dst.Name) diff --git a/pkg/apis/apps/v1beta1/nodepool_conversion.go b/pkg/apis/apps/v1beta1/nodepool_conversion.go index 97c6d3fb42b..e7201a973b3 100644 --- a/pkg/apis/apps/v1beta1/nodepool_conversion.go +++ b/pkg/apis/apps/v1beta1/nodepool_conversion.go @@ -46,6 +46,7 @@ func (src *NodePool) ConvertTo(dstRaw conversion.Hub) error { // Set interconnectivity to false which will not use leader election strategy or reuse list/watch events dst.Spec.InterConnectivity = false dst.Spec.LeaderElectionStrategy = string(v1beta2.ElectionStrategyRandom) + dst.Spec.LeaderReplicas = 1 klog.V(4).Infof("convert from v1beta to v1beta2 for nodepool %s", dst.Name) diff --git a/pkg/apis/apps/v1beta2/default.go b/pkg/apis/apps/v1beta2/default.go index 7d9b75cdc7b..977b5de82f7 100644 --- a/pkg/apis/apps/v1beta2/default.go +++ b/pkg/apis/apps/v1beta2/default.go @@ -23,4 +23,7 @@ func SetDefaultsNodePool(obj *NodePool) { obj.Annotations = make(map[string]string) } + if obj.Spec.LeaderReplicas <= 0 { + obj.Spec.LeaderReplicas = 1 + } } diff --git a/pkg/apis/apps/v1beta2/nodepool_types.go b/pkg/apis/apps/v1beta2/nodepool_types.go index b722104d2fe..5a69aaaccc5 100644 --- a/pkg/apis/apps/v1beta2/nodepool_types.go +++ b/pkg/apis/apps/v1beta2/nodepool_types.go @@ -84,6 +84,11 @@ type NodePoolSpec struct { // PoolScopeMetadata is used for specifying resources which will be shared in the nodepool. // And it is supported to modify dynamically. and the default value is v1.Service and discovery.Endpointslice. PoolScopeMetadata []metav1.GroupVersionKind `json:"poolScopeMetadata,omitempty"` + + // LeaderReplicas is used for specifying the number of leader replicas in the nodepool. + // If the field is not specified, the default value is 1. + // + optional + LeaderReplicas int32 `json:"leaderReplicas,omitempty"` } // NodePoolStatus defines the observed state of NodePool diff --git a/pkg/yurtmanager/controller/apis/config/types.go b/pkg/yurtmanager/controller/apis/config/types.go index 86aabcb8442..ad8667917b1 100644 --- a/pkg/yurtmanager/controller/apis/config/types.go +++ b/pkg/yurtmanager/controller/apis/config/types.go @@ -23,6 +23,7 @@ import ( csrapproverconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/csrapprover/config" daemonpodupdaterconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/daemonpodupdater/config" + hubleaderconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader/config" loadbalancersetconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/config" nodebucketconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodebucket/config" nodepoolconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodepool/config" @@ -105,6 +106,9 @@ type YurtManagerConfiguration struct { // GatewayPublicSvcController holds configuration for GatewayPublicSvcController related features. GatewayPublicSvcController gatewaypublicsvcconfig.GatewayPublicSvcControllerConfiguration + + // HubLeaderController holds configuration for HubLeaderController related features. + HubLeaderController hubleaderconfig.HubLeaderControllerConfiguration } type GenericConfiguration struct { diff --git a/pkg/yurtmanager/controller/hubleader/config/types.go b/pkg/yurtmanager/controller/hubleader/config/types.go new file mode 100644 index 00000000000..24eaca898ab --- /dev/null +++ b/pkg/yurtmanager/controller/hubleader/config/types.go @@ -0,0 +1,22 @@ +/* +Copyright 2023 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +// HubLeaderControllerConfiguration contains elements describing HubLeaderController. +type HubLeaderControllerConfiguration struct { + ConcurrentHubLeaderWorkers int32 +} diff --git a/pkg/yurtmanager/controller/hubleader/hubleader_controller.go b/pkg/yurtmanager/controller/hubleader/hubleader_controller.go new file mode 100644 index 00000000000..96b06fe451d --- /dev/null +++ b/pkg/yurtmanager/controller/hubleader/hubleader_controller.go @@ -0,0 +1,297 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hubleader + +import ( + "context" + "fmt" + "maps" + "slices" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + yurtClient "github.com/openyurtio/openyurt/cmd/yurt-manager/app/client" + appconfig "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" + "github.com/openyurtio/openyurt/cmd/yurt-manager/names" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" + "github.com/openyurtio/openyurt/pkg/projectinfo" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader/config" + nodeutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/node" +) + +var ( + controllerKind = appsv1beta2.SchemeGroupVersion.WithKind("Nodepool") +) + +// Add creates a new HubLeader Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(ctx context.Context, cfg *appconfig.CompletedConfig, mgr manager.Manager) error { + klog.Infof("hubleader-controller add controller %s", controllerKind.String()) + + reconciler := &ReconcileHubLeader{ + Client: yurtClient.GetClientByControllerNameOrDie(mgr, names.HubLeaderController), + recorder: mgr.GetEventRecorderFor(names.HubLeaderController), + Configuration: cfg.ComponentConfig.HubLeaderController, + } + + // Create a new controller + c, err := controller.New( + names.HubLeaderController, + mgr, + controller.Options{ + Reconciler: reconciler, + MaxConcurrentReconciles: int(cfg.ComponentConfig.HubLeaderController.ConcurrentHubLeaderWorkers), + }, + ) + if err != nil { + return err + } + + poolPredicate := predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return true + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldPool, ok := e.ObjectOld.(*appsv1beta2.NodePool) + if !ok { + return false + } + newNode, ok := e.ObjectNew.(*appsv1beta2.NodePool) + if !ok { + return false + } + + // Only update if: + // 1. Leader election strategy has changed + // 2. Leader replicas has changed + // 3. Node readiness count has changed + // 4. Leader node label selector has changed (if mark strategy) + if oldPool.Spec.LeaderElectionStrategy != newNode.Spec.LeaderElectionStrategy || + oldPool.Spec.LeaderReplicas != newNode.Spec.LeaderReplicas || + oldPool.Status.ReadyNodeNum != newNode.Status.ReadyNodeNum || + oldPool.Status.UnreadyNodeNum != newNode.Status.UnreadyNodeNum || + (oldPool.Spec.LeaderElectionStrategy == string(appsv1beta2.ElectionStrategyMark) && + !maps.Equal(oldPool.Spec.LeaderNodeLabelSelector, newNode.Spec.LeaderNodeLabelSelector)) { + return true + + } + return false + }, + } + + // Watch for changes to NodePool + err = c.Watch( + source.Kind[client.Object]( + mgr.GetCache(), + &appsv1beta2.NodePool{}, + &handler.EnqueueRequestForObject{}, + poolPredicate, + ), + ) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileHubLeader{} + +// ReconcileHubLeader reconciles a HubLeader object +type ReconcileHubLeader struct { + client.Client + recorder record.EventRecorder + Configuration config.HubLeaderControllerConfiguration +} + +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepool,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps.openyurt.io,resources=nodepool/status,verbs=get;update;patch + +// Reconcile reads that state of the cluster for a HubLeader object and makes changes based on the state read +// and what is in the HubLeader.Spec +func (r *ReconcileHubLeader) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + klog.Infof("Reconcile NodePool leader %s/%s", request.Namespace, request.Name) + + // Fetch the NodePool instance + nodepool := &appsv1beta2.NodePool{} + if err := r.Get(ctx, request.NamespacedName, nodepool); err != nil { + return reconcile.Result{}, client.IgnoreNotFound(err) + } + + if !nodepool.Spec.InterConnectivity { + // If the NodePool is not interconnectivity, it should not reconcile + return reconcile.Result{}, nil + } + + // Reconcile the NodePool + if err := r.reconcileHubLeader(ctx, nodepool); err != nil { + r.recorder.Eventf(nodepool, corev1.EventTypeWarning, "ReconcileError", "Failed to reconcile NodePool: %v", err) + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (r *ReconcileHubLeader) reconcileHubLeader(ctx context.Context, nodepool *appsv1beta2.NodePool) error { + // Get all nodes that belong to the nodepool + var currentNodeList corev1.NodeList + + // Set match labels + matchLabels := make(map[string]string) + if nodepool.Spec.LeaderElectionStrategy == string(appsv1beta2.ElectionStrategyMark) { + // Add mark strategy match labels + matchLabels = nodepool.Spec.LeaderNodeLabelSelector + } + matchLabels[projectinfo.GetNodePoolLabel()] = nodepool.GetName() + + err := r.List(ctx, ¤tNodeList, client.MatchingLabels(matchLabels)) + if err != nil { + return client.IgnoreNotFound(err) + } + + // Copy the nodepool to update + updatedNodePool := nodepool.DeepCopy() + + // Cache nodes in the list by internalIP -> Node + // if they are ready and have internal IP + endpointsMap := make(map[string]*corev1.Node) + for _, n := range currentNodeList.Items { + internalIP, ok := nodeutil.GetInternalIP(&n) + if !ok { + // Can't be leader + klog.V(5).InfoS("Node is missing Internal IP, skip consideration for hub leader", "node", n.Name) + continue + } + + if !nodeutil.IsNodeReady(n) { + klog.V(5).InfoS("Node is not ready, skip consideration for hub leader", "node", n.Name) + // Can't be leader if not ready + continue + } + + endpointsMap[internalIP] = &n + } + + // Delete leader endpoints that are not in endpoints map + // They are either not ready or not longer the node list and need to be removed + leaderDeleteFn := func(endpoint string) bool { + _, ok := endpointsMap[endpoint] + return !ok + } + updatedLeaders := slices.DeleteFunc(updatedNodePool.Status.LeaderEndpoints, leaderDeleteFn) + + // If the number of leaders is not equal to the desired number of leaders + if len(updatedLeaders) < int(nodepool.Spec.LeaderReplicas) { + // Remove current leaders from candidates + for _, leader := range updatedLeaders { + delete(endpointsMap, leader) + } + + leaders, ok := electNLeaders( + nodepool.Spec.LeaderElectionStrategy, + int(nodepool.Spec.LeaderReplicas)-len(updatedLeaders), + endpointsMap, + ) + if !ok { + klog.Errorf("Failed to elect a leader for NodePool %s", nodepool.Name) + return fmt.Errorf("failed to elect a leader for NodePool %s", nodepool.Name) + } + + updatedLeaders = append(updatedLeaders, leaders...) + } else if len(updatedLeaders) > int(nodepool.Spec.LeaderReplicas) { + // Remove extra leaders + updatedLeaders = updatedLeaders[:nodepool.Spec.LeaderReplicas] + } + + updatedNodePool.Status.LeaderEndpoints = updatedLeaders + + if !hasLeadersChanged(nodepool.Status.LeaderEndpoints, updatedNodePool.Status.LeaderEndpoints) { + return nil + } + + // Update Status since changed + if err = r.Status().Update(ctx, updatedNodePool); err != nil { + klog.ErrorS(err, "Update NodePool status error", "nodepool", updatedNodePool.Name) + return err + } + + return nil +} + +// hasLeadersChanged checks if the leader endpoints have changed +func hasLeadersChanged(old, new []string) bool { + if len(old) != len(new) { + return true + } + + oldSet := make(map[string]struct{}, len(old)) + + for i := range old { + oldSet[old[i]] = struct{}{} + } + + for i := range new { + if _, ok := oldSet[new[i]]; !ok { + return true + } + } + + return false +} + +// electNLeaders elects N leaders from the candidates based on the strategy +func electNLeaders( + strategy string, + numLeaders int, + candidates map[string]*corev1.Node, +) ([]string, bool) { + leaderEndpoints := make([]string, 0, len(candidates)) + + switch strategy { + case string(appsv1beta2.ElectionStrategyMark), string(appsv1beta2.ElectionStrategyRandom): + // Iterate candidates and append endpoints until + // desired number of leaders is reached + // Note: Iterating a map in Go is non-deterministic enough to be considered random + // for this purpose + for k := range candidates { + leaderEndpoints = append(leaderEndpoints, k) + numLeaders-- + + if numLeaders == 0 { + break + } + } + default: + klog.Errorf("Unknown leader election strategy %s", strategy) + return nil, false + } + + return leaderEndpoints, true +} diff --git a/pkg/yurtmanager/controller/hubleader/hubleader_controller_test.go b/pkg/yurtmanager/controller/hubleader/hubleader_controller_test.go new file mode 100644 index 00000000000..95d12b6c694 --- /dev/null +++ b/pkg/yurtmanager/controller/hubleader/hubleader_controller_test.go @@ -0,0 +1,690 @@ +/* +Copyright 2025 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hubleader + +import ( + "context" + "slices" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/openyurtio/openyurt/pkg/apis" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" + "github.com/openyurtio/openyurt/pkg/projectinfo" + "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/hubleader/config" +) + +// prepareNodes returns a list of nodes for testing +// 5 nodes are in hangzhou are in various readiness statuses. Only 1 will be a valid leader for random election strategy. +// 4 nodes are in shanghai. 2 are valid candidates for leader election. 1 isn't marked and the other isn't ready. +// For deterministic test results, hangzhou is used for random election strategy +// and shanghai is used for mark election strategy. +func prepareNodes() []client.Object { + return []client.Object{ + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready no internal IP", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "hangzhou", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not ready no internal IP", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "hangzhou", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeNetworkUnavailable, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not ready internal IP", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "hangzhou", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.1", + }, + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeNetworkUnavailable, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no condition", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "hangzhou", + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready with internal IP", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "hangzhou", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.1", + }, + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready with internal IP and marked as leader", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "shanghai", + "apps.openyurt.io/leader": "true", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.2", + }, + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready with internal IP and not marked as leader", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "shanghai", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.3", + }, + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not ready with internal IP marked as leader", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "shanghai", + "apps.openyurt.io/leader": "true", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.4", + }, + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeNetworkUnavailable, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready with internal IP and marked as 2nd leader", + Labels: map[string]string{ + projectinfo.GetNodePoolLabel(): "shanghai", + "apps.openyurt.io/leader": "true", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "10.0.0.5", + }, + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + } +} + +func TestReconcile(t *testing.T) { + nodes := prepareNodes() + scheme := runtime.NewScheme() + + err := clientgoscheme.AddToScheme(scheme) + require.NoError(t, err) + err = apis.AddToScheme(scheme) + require.NoError(t, err) + + testCases := map[string]struct { + pool *appsv1beta2.NodePool + expectedNodePool *appsv1beta2.NodePool + expectErr bool + }{ + "random election strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.1"}, + }, + }, + expectErr: false, + }, + "mark election strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + LeaderReplicas: 2, + InterConnectivity: true, + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + LeaderReplicas: 2, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2", "10.0.0.5"}, + }, + }, + expectErr: false, + }, + "no potential leaders in hangzhou with mark strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", // there are no marked leaders in hangzhou + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + }, + expectErr: false, + }, + "interconnectivity false with mark strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: false, // should not change nodepool + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: false, + }, + }, + expectErr: false, + }, + "interconnectivity false with random strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: false, // should not change nodepool + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: false, + }, + }, + expectErr: false, + }, + "invalid election strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: "", // invalid strategy + InterConnectivity: true, + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hangzhou", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "hangzhou", + }, + LeaderReplicas: 1, + LeaderElectionStrategy: "", + InterConnectivity: true, + }, + }, + expectErr: true, + }, + "no election required with mark strategy": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + LeaderReplicas: 1, // set to 1 as there's 2 possible leaders in pool + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2"}, // leader already set + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + LeaderReplicas: 1, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2"}, // should not change leader as replicas met + }, + }, + expectErr: false, + }, + "re election required": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderReplicas: 3, + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2", "10.0.0.4"}, // .4 was leader (node not ready) + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderReplicas: 3, + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2", "10.0.0.5"}, // new leader is .5 + }, + }, + expectErr: false, + }, + "mark strategy multiple leaders": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderReplicas: 3, // higher than number of available leaders + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderReplicas: 3, + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2", "10.0.0.5"}, // multiple marked leaders + }, + }, + expectErr: false, + }, + "random strategy multiple leaders": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + LeaderReplicas: 3, // higher than number of available leaders + InterConnectivity: true, + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), + LeaderReplicas: 3, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2", "10.0.0.3", "10.0.0.5"}, // multiple marked leaders + }, + }, + expectErr: false, + }, + "leader replicas reduced": { + pool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderReplicas: 1, // Nodepool leader replicas reduced + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2", "10.0.0.5"}, // 2 leaders set, last should be dropped + }, + }, + expectedNodePool: &appsv1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "shanghai", + }, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + Labels: map[string]string{ + "region": "shanghai", + }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), + LeaderReplicas: 1, + LeaderNodeLabelSelector: map[string]string{ + "apps.openyurt.io/leader": "true", + }, + InterConnectivity: true, + }, + Status: appsv1beta2.NodePoolStatus{ + LeaderEndpoints: []string{"10.0.0.2"}, + }, + }, + expectErr: false, + }, + } + + ctx := context.TODO() + for k, tc := range testCases { + t.Run(k, func(t *testing.T) { + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(tc.pool). + WithStatusSubresource(tc.pool). + WithObjects(nodes...). + Build() + + r := &ReconcileHubLeader{ + Client: c, + Configuration: config.HubLeaderControllerConfiguration{}, + recorder: record.NewFakeRecorder(1000), + } + req := reconcile.Request{NamespacedName: types.NamespacedName{Name: tc.pool.Name}} + _, err := r.Reconcile(ctx, req) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + var actualPool appsv1beta2.NodePool + err = r.Get(ctx, req.NamespacedName, &actualPool) + require.NoError(t, err) + + // Reset resource version - it's not important for the test + actualPool.ResourceVersion = "" + // Sort leader endpoints for comparison - it is not important for the order + slices.Sort(actualPool.Status.LeaderEndpoints) + + require.Equal(t, *tc.expectedNodePool, actualPool) + }) + } +} diff --git a/pkg/yurtmanager/controller/nodepool/nodepool_controller.go b/pkg/yurtmanager/controller/nodepool/nodepool_controller.go index bbe1835c3a0..3037139cea3 100644 --- a/pkg/yurtmanager/controller/nodepool/nodepool_controller.go +++ b/pkg/yurtmanager/controller/nodepool/nodepool_controller.go @@ -37,6 +37,7 @@ import ( appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" poolconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodepool/config" + nodeutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/node" ) var ( @@ -144,7 +145,7 @@ func (r *ReconcileNodePool) Reconcile(ctx context.Context, req reconcile.Request for _, node := range currentNodeList.Items { // prepare nodepool status nodes = append(nodes, node.GetName()) - if isNodeReady(node) { + if nodeutil.IsNodeReady(node) { readyNode += 1 } else { notReadyNode += 1 diff --git a/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go b/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go index 41c9b3426b0..3efe14efc33 100644 --- a/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go +++ b/pkg/yurtmanager/controller/nodepool/nodepool_enqueue_handlers.go @@ -31,6 +31,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/projectinfo" + nodeutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/node" ) type EnqueueNodePoolForNode struct { @@ -91,7 +92,7 @@ func (e *EnqueueNodePoolForNode) Update(ctx context.Context, evt event.UpdateEve } // check node ready status - if isNodeReady(*newNode) != isNodeReady(*oldNode) { + if nodeutil.IsNodeReady(*newNode) != nodeutil.IsNodeReady(*oldNode) { klog.V(4).Info(Format("Node ready status has been changed,"+ " will enqueue pool(%s) for node(%s)", newNp, newNode.GetName())) addNodePoolToWorkQueue(newNp, q) @@ -104,7 +105,8 @@ func (e *EnqueueNodePoolForNode) Update(ctx context.Context, evt event.UpdateEve !reflect.DeepEqual(newNode.Annotations, oldNode.Annotations) || !reflect.DeepEqual(newNode.Spec.Taints, oldNode.Spec.Taints) { // TODO only consider the pool related attributes - klog.V(5).Info(Format("NodePool related attributes has been changed,will enqueue pool(%s) for node(%s)", newNp, newNode.Name)) + klog.V(5). + Info(Format("NodePool related attributes has been changed,will enqueue pool(%s) for node(%s)", newNp, newNode.Name)) addNodePoolToWorkQueue(newNp, q) } } diff --git a/pkg/yurtmanager/controller/nodepool/util.go b/pkg/yurtmanager/controller/nodepool/util.go index b57c467734e..0d46b24c1b8 100644 --- a/pkg/yurtmanager/controller/nodepool/util.go +++ b/pkg/yurtmanager/controller/nodepool/util.go @@ -25,7 +25,6 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" - nodeutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/node" ) // conciliatePoolRelatedAttrs will update the node's attributes that related to @@ -144,13 +143,6 @@ func containTaint(taint corev1.Taint, taints []corev1.Taint) (int, bool) { return 0, false } -// isNodeReady checks if the `node` is `corev1.NodeReady` -func isNodeReady(node corev1.Node) bool { - _, nc := nodeutil.GetNodeCondition(&node.Status, corev1.NodeReady) - // GetNodeCondition will return nil and -1 if the condition is not present - return nc != nil && nc.Status == corev1.ConditionTrue -} - func mergeMap(m1, m2 map[string]string) map[string]string { if m1 == nil { m1 = make(map[string]string) diff --git a/pkg/yurtmanager/controller/nodepool/util_test.go b/pkg/yurtmanager/controller/nodepool/util_test.go index 69ab96db27a..6e1fa663d43 100644 --- a/pkg/yurtmanager/controller/nodepool/util_test.go +++ b/pkg/yurtmanager/controller/nodepool/util_test.go @@ -804,62 +804,3 @@ func TestDecodePoolAttrs(t *testing.T) { t.Errorf("Expected %v, got %v", wantNpra, gotNpra) } } - -func TestIsNodeReady(t *testing.T) { - tests := []struct { - name string - node corev1.Node - want bool - }{ - { - name: "NodeReady and ConditionTrue", - node: corev1.Node{ - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - want: true, - }, - { - name: "NodeReady but ConditionFalse", - node: corev1.Node{ - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeReady, - Status: corev1.ConditionFalse, - }, - }, - }, - }, - want: false, - }, - { - name: "Node status not NodeReady", - node: corev1.Node{ - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - { - Type: corev1.NodeMemoryPressure, - Status: corev1.ConditionTrue, - }, - }, - }, - }, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := isNodeReady(tt.node); got != tt.want { - t.Errorf("isNodeReady() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/yurtmanager/controller/util/node/controller_utils.go b/pkg/yurtmanager/controller/util/node/controller_utils.go index 7ecedf3cbeb..f0487e3f077 100644 --- a/pkg/yurtmanager/controller/util/node/controller_utils.go +++ b/pkg/yurtmanager/controller/util/node/controller_utils.go @@ -69,12 +69,26 @@ var UpdateLabelBackoff = wait.Backoff{ // DeletePods will delete all pods from master running on given node, // and return true if any pods were deleted, or were found pending // deletion. -func DeletePods(ctx context.Context, c client.Client, pods []*corev1.Pod, recorder record.EventRecorder, nodeName, nodeUID string) (bool, error) { +func DeletePods( + ctx context.Context, + c client.Client, + pods []*corev1.Pod, + recorder record.EventRecorder, + nodeName, nodeUID string, +) (bool, error) { remaining := false var updateErrList []error if len(pods) > 0 { - RecordNodeEvent(ctx, recorder, nodeName, nodeUID, corev1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName)) + RecordNodeEvent( + ctx, + recorder, + nodeName, + nodeUID, + corev1.EventTypeNormal, + "DeletingAllPods", + fmt.Sprintf("Deleting all Pods from Node %v.", nodeName), + ) } for i := range pods { @@ -100,7 +114,14 @@ func DeletePods(ctx context.Context, c client.Client, pods []*corev1.Pod, record } klog.InfoS("Starting deletion of pod", "pod", klog.KObj(pod)) - recorder.Eventf(pod, corev1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) + recorder.Eventf( + pod, + corev1.EventTypeNormal, + "NodeControllerEviction", + "Marking for deletion Pod %s from Node %s", + pod.Name, + nodeName, + ) //if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}); err != nil { if err := c.Delete(ctx, pod); err != nil { if apierrors.IsNotFound(err) { @@ -122,7 +143,12 @@ func DeletePods(ctx context.Context, c client.Client, pods []*corev1.Pod, record // SetPodTerminationReason attempts to set a reason and message in the // pod status, updates it in the apiserver, and returns an error if it // encounters one. -func SetPodTerminationReason(ctx context.Context, c client.Client, pod *corev1.Pod, nodeName string) (*corev1.Pod, error) { +func SetPodTerminationReason( + ctx context.Context, + c client.Client, + pod *corev1.Pod, + nodeName string, +) (*corev1.Pod, error) { if pod.Status.Reason == NodeUnreachablePodReason { return pod, nil } @@ -140,7 +166,13 @@ func SetPodTerminationReason(ctx context.Context, c client.Client, pod *corev1.P // MarkPodsNotReady updates ready status of given pods running on // given node from master return true if success -func MarkPodsNotReady(ctx context.Context, c client.Client, recorder record.EventRecorder, pods []*corev1.Pod, nodeName string) error { +func MarkPodsNotReady( + ctx context.Context, + c client.Client, + recorder record.EventRecorder, + pods []*corev1.Pod, + nodeName string, +) error { klog.V(2).InfoS("Update ready status of pods on node", "node", klog.KRef("", nodeName)) errs := []error{} @@ -183,7 +215,11 @@ func MarkPodsNotReady(ctx context.Context, c client.Client, recorder record.Even } // RecordNodeEvent records a event related to a node. -func RecordNodeEvent(ctx context.Context, recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) { +func RecordNodeEvent( + ctx context.Context, + recorder record.EventRecorder, + nodeName, nodeUID, eventtype, reason, event string, +) { ref := &corev1.ObjectReference{ APIVersion: "v1", Kind: "Node", @@ -212,7 +248,12 @@ func RecordNodeStatusChange(recorder record.EventRecorder, node *corev1.Node, ne // SwapNodeControllerTaint returns true in case of success and false // otherwise. -func SwapNodeControllerTaint(ctx context.Context, kubeClient clientset.Interface, taintsToAdd, taintsToRemove []*corev1.Taint, node *corev1.Node) bool { +func SwapNodeControllerTaint( + ctx context.Context, + kubeClient clientset.Interface, + taintsToAdd, taintsToRemove []*corev1.Taint, + node *corev1.Node, +) bool { for _, taintToAdd := range taintsToAdd { now := metav1.Now() taintToAdd.TimeAdded = &now @@ -247,7 +288,12 @@ func SwapNodeControllerTaint(ctx context.Context, kubeClient clientset.Interface // AddOrUpdateLabelsOnNode updates the labels on the node and returns true on // success and false on failure. -func AddOrUpdateLabelsOnNode(ctx context.Context, kubeClient clientset.Interface, labelsToUpdate map[string]string, node *corev1.Node) bool { +func AddOrUpdateLabelsOnNode( + ctx context.Context, + kubeClient clientset.Interface, + labelsToUpdate map[string]string, + node *corev1.Node, +) bool { if err := addOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate); err != nil { utilruntime.HandleError( fmt.Errorf( @@ -277,7 +323,12 @@ func GetNodeCondition(status *corev1.NodeStatus, conditionType corev1.NodeCondit // AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls // to update nodes; otherwise, no API calls. Return error if any. -func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName string, taints ...*corev1.Taint) error { +func AddOrUpdateTaintOnNode( + ctx context.Context, + c clientset.Interface, + nodeName string, + taints ...*corev1.Taint, +) error { if len(taints) == 0 { return nil } @@ -320,7 +371,13 @@ func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName // won't fail if target taint doesn't exist or has been removed. // If passed a node it'll check if there's anything to be done, if taint is not present it won't issue // any API calls. -func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName string, node *corev1.Node, taints ...*corev1.Taint) error { +func RemoveTaintOffNode( + ctx context.Context, + c clientset.Interface, + nodeName string, + node *corev1.Node, + taints ...*corev1.Taint, +) error { if len(taints) == 0 { return nil } @@ -374,7 +431,13 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str } // PatchNodeTaints patches node's taints. -func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *corev1.Node, newNode *corev1.Node) error { +func PatchNodeTaints( + ctx context.Context, + c clientset.Interface, + nodeName string, + oldNode *corev1.Node, + newNode *corev1.Node, +) error { // Strip base diff node from RV to ensure that our Patch request will set RV to check for conflicts over .spec.taints. // This is needed because .spec.taints does not specify patchMergeKey and patchStrategy and adding them is no longer an option for compatibility reasons. // Using other Patch strategy works for adding new taints, however will not resolve problem with taint removal. @@ -462,3 +525,20 @@ func IsPodBoundenToNode(node *corev1.Node) bool { node.Annotations[projectinfo.GetAutonomyAnnotation()] == "true" || node.Annotations[projectinfo.GetNodeAutonomyDurationAnnotation()] != "" } + +// GetInternalIP returns the internal IP of the node. +func GetInternalIP(node *corev1.Node) (string, bool) { + for _, addr := range node.Status.Addresses { + if addr.Type == corev1.NodeInternalIP { + return addr.Address, true + } + } + return "", false +} + +// IsNodeReady checks if the `node` is `corev1.NodeReady` +func IsNodeReady(node corev1.Node) bool { + _, nc := GetNodeCondition(&node.Status, corev1.NodeReady) + // GetNodeCondition will return nil and -1 if the condition is not present + return nc != nil && nc.Status == corev1.ConditionTrue +} diff --git a/pkg/yurtmanager/controller/util/node/controller_utils_test.go b/pkg/yurtmanager/controller/util/node/controller_utils_test.go new file mode 100644 index 00000000000..55cf0bda472 --- /dev/null +++ b/pkg/yurtmanager/controller/util/node/controller_utils_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2023 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package node + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" +) + +func TestIsNodeReady(t *testing.T) { + tests := []struct { + name string + node corev1.Node + want bool + }{ + { + name: "NodeReady and ConditionTrue", + node: corev1.Node{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + want: true, + }, + { + name: "NodeReady but ConditionFalse", + node: corev1.Node{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + want: false, + }, + { + name: "Node status not NodeReady", + node: corev1.Node{ + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeMemoryPressure, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsNodeReady(tt.node); got != tt.want { + t.Errorf("isNodeReady() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go index 092ac1fcc31..46a5bfc07ae 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The OpenYurt Authors. +Copyright 2025 The OpenYurt Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -61,6 +61,11 @@ func (webhook *NodePoolHandler) Default(ctx context.Context, obj runtime.Object) np.Spec.LeaderElectionStrategy = string(v1beta2.ElectionStrategyRandom) } + // Set default LeaderReplicas + if np.Spec.LeaderReplicas <= 0 { + np.Spec.LeaderReplicas = 1 + } + // Set default PoolScopeMetadata defaultPoolScopeMetadata := []v1.GroupVersionKind{ { diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go index 42499384984..c65676a1c64 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go @@ -1,5 +1,5 @@ /* -Copyright 2024 The OpenYurt Authors. +Copyright 2025 The OpenYurt Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -45,7 +45,8 @@ func TestDefault(t *testing.T) { Name: "foo", }, Spec: v1beta2.NodePoolSpec{ - HostNetwork: true, + HostNetwork: true, + LeaderReplicas: 3, }, }, wantedNodePool: &v1beta2.NodePool{ @@ -59,6 +60,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Edge, LeaderElectionStrategy: string(v1beta2.ElectionStrategyRandom), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -88,8 +90,9 @@ func TestDefault(t *testing.T) { }, }, Spec: v1beta2.NodePoolSpec{ - HostNetwork: true, - Type: v1beta2.Cloud, + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderReplicas: 3, }, }, wantedNodePool: &v1beta2.NodePool{ @@ -104,6 +107,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyRandom), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -136,6 +140,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: "", + LeaderReplicas: 3, }, }, wantedNodePool: &v1beta2.NodePool{ @@ -150,6 +155,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyRandom), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -182,6 +188,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, }, }, wantedNodePool: &v1beta2.NodePool{ @@ -196,6 +203,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -228,6 +236,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, }, }, wantedNodePool: &v1beta2.NodePool{ @@ -242,6 +251,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -274,6 +284,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "discovery.k8s.io", @@ -295,6 +306,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "discovery.k8s.io", @@ -332,6 +344,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -353,6 +366,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "core", @@ -385,6 +399,7 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "discovery.k8s.io", @@ -406,6 +421,115 @@ func TestDefault(t *testing.T) { HostNetwork: true, Type: v1beta2.Cloud, LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 3, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has leader replicas": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 2, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 2, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has no leader replicas": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 0, + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + }, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + LeaderReplicas: 1, PoolScopeMetadata: []metav1.GroupVersionKind{ { Group: "discovery.k8s.io", diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go index f05eb6de22b..d8eb9f8ce85 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The OpenYurt Authors. +Copyright 2025 The OpenYurt Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go index feeaa4e16cb..294914dcde5 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The OpenYurt Authors. +Copyright 2025 The OpenYurt Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go index 0be9d6e12b0..0ce2033b770 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The OpenYurt Authors. +Copyright 2025 The OpenYurt Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.