From 578a3554835994e6e8a7b242d2ba4fc3d9511e2e Mon Sep 17 00:00:00 2001 From: andrew Date: Wed, 18 Sep 2024 17:08:22 +0700 Subject: [PATCH] =?UTF-8?q?=F0=9F=94=A8=20fix:=20import=20consistent=20cor?= =?UTF-8?q?ev1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/ingress/controller/controller.go | 35 ++++++++++----------- pkg/utils/k8s.go | 14 ++++----- pkg/utils/utils.go | 46 ++++++++++++++-------------- pkg/vngcloud/annotation.go | 19 ++++++------ pkg/vngcloud/vcontainer.go | 8 ++--- pkg/vngcloud/vlb.go | 42 ++++++++++++------------- 6 files changed, 82 insertions(+), 82 deletions(-) diff --git a/pkg/ingress/controller/controller.go b/pkg/ingress/controller/controller.go index 828b428..8f838a2 100644 --- a/pkg/ingress/controller/controller.go +++ b/pkg/ingress/controller/controller.go @@ -27,7 +27,6 @@ import ( "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/policy" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/pool" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/network/v2/extensions/secgroup_rule" - apiv1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" nwv1 "k8s.io/api/networking/v1" apimetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -73,7 +72,7 @@ type Controller struct { kubeClient kubernetes.Interface stopCh chan struct{} - knownNodes []*apiv1.Node + knownNodes []*corev1.Node queue workqueue.RateLimitingInterface informer informers.SharedInformerFactory recorder record.EventRecorder @@ -126,7 +125,7 @@ func NewController(conf config.Config) *Controller { eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{ Interface: kubeClient.CoreV1().Events(""), }) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: "vngcloud-ingress-controller"}) + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "vngcloud-ingress-controller"}) controller := &Controller{ config: &conf, @@ -140,7 +139,7 @@ func NewController(conf config.Config) *Controller { serviceListerSynced: serviceInformer.Informer().HasSynced, nodeLister: nodeInformer.Lister(), nodeListerSynced: nodeInformer.Informer().HasSynced, - knownNodes: []*apiv1.Node{}, + knownNodes: []*corev1.Node{}, trackLBUpdate: utils.NewUpdateTracker(), numOfUpdatingThread: 0, queues: make(map[string][]interface{}), @@ -159,7 +158,7 @@ func NewController(conf config.Config) *Controller { return } - recorder.Event(addIng, apiv1.EventTypeNormal, "Creating", fmt.Sprintf("Ingress %s", key)) + recorder.Event(addIng, corev1.EventTypeNormal, "Creating", fmt.Sprintf("Ingress %s", key)) controller.queue.AddRateLimited(Event{Obj: addIng, Type: CreateEvent, oldObj: nil}) }, UpdateFunc: func(old, new interface{}) { @@ -179,13 +178,13 @@ func NewController(conf config.Config) *Controller { validOld := IsValid(oldIng) validCur := IsValid(newIng) if !validOld && validCur { - recorder.Event(newIng, apiv1.EventTypeNormal, "Creating", fmt.Sprintf("Ingress %s", key)) + recorder.Event(newIng, corev1.EventTypeNormal, "Creating", fmt.Sprintf("Ingress %s", key)) controller.queue.AddRateLimited(Event{Obj: newIng, Type: CreateEvent, oldObj: nil}) } else if validOld && !validCur { - recorder.Event(newIng, apiv1.EventTypeNormal, "Deleting", fmt.Sprintf("Ingress %s", key)) + recorder.Event(newIng, corev1.EventTypeNormal, "Deleting", fmt.Sprintf("Ingress %s", key)) controller.queue.AddRateLimited(Event{Obj: newIng, Type: DeleteEvent, oldObj: nil}) } else if validCur && (!reflect.DeepEqual(newIng.Spec, oldIng.Spec) || !reflect.DeepEqual(newAnnotations, oldAnnotations)) { - recorder.Event(newIng, apiv1.EventTypeNormal, "Updating", fmt.Sprintf("Ingress %s", key)) + recorder.Event(newIng, corev1.EventTypeNormal, "Updating", fmt.Sprintf("Ingress %s", key)) controller.queue.AddRateLimited(Event{Obj: newIng, Type: UpdateEvent, oldObj: oldIng}) } else { return @@ -213,7 +212,7 @@ func NewController(conf config.Config) *Controller { return } - recorder.Event(delIng, apiv1.EventTypeNormal, "Deleting", fmt.Sprintf("Ingress %s", key)) + recorder.Event(delIng, corev1.EventTypeNormal, "Deleting", fmt.Sprintf("Ingress %s", key)) controller.queue.AddRateLimited(Event{Obj: delIng, Type: DeleteEvent, oldObj: nil}) }, }) @@ -483,36 +482,36 @@ func (c *Controller) processItem(event Event) { if err := c.ensureIngress(oldIng, ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to create vngcloud resources for ingress %s: %v", key, err)) - c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to create vngcloud resources for ingress %s: %v", key, err)) + c.recorder.Event(ing, corev1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to create vngcloud resources for ingress %s: %v", key, err)) } else { - c.recorder.Event(ing, apiv1.EventTypeNormal, "Created", fmt.Sprintf("Ingress %s", key)) + c.recorder.Event(ing, corev1.EventTypeNormal, "Created", fmt.Sprintf("Ingress %s", key)) } case UpdateEvent: logger.Info("updating ingress") if err := c.ensureIngress(oldIng, ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to update vngcloud resources for ingress %s: %v", key, err)) - c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to update vngcloud resources for ingress %s: %v", key, err)) + c.recorder.Event(ing, corev1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to update vngcloud resources for ingress %s: %v", key, err)) } else { - c.recorder.Event(ing, apiv1.EventTypeNormal, "Updated", fmt.Sprintf("Ingress %s", key)) + c.recorder.Event(ing, corev1.EventTypeNormal, "Updated", fmt.Sprintf("Ingress %s", key)) } case DeleteEvent: logger.Info("deleting ingress") if err := c.deleteIngress(ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to delete vngcloud resources for ingress %s: %v", key, err)) - c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to delete vngcloud resources for ingress %s: %v", key, err)) + c.recorder.Event(ing, corev1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to delete vngcloud resources for ingress %s: %v", key, err)) } else { - c.recorder.Event(ing, apiv1.EventTypeNormal, "Deleted", fmt.Sprintf("Ingress %s", key)) + c.recorder.Event(ing, corev1.EventTypeNormal, "Deleted", fmt.Sprintf("Ingress %s", key)) } case SyncEvent: logger.Info("sync ingress") if err := c.ensureIngress(oldIng, ing); err != nil { utilruntime.HandleError(fmt.Errorf("failed to sync vngcloud resources for ingress %s: %v", key, err)) - c.recorder.Event(ing, apiv1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to sync vngcloud resources for ingress %s: %v", key, err)) + c.recorder.Event(ing, corev1.EventTypeWarning, "Failed", fmt.Sprintf("Failed to sync vngcloud resources for ingress %s: %v", key, err)) } else { - c.recorder.Event(ing, apiv1.EventTypeNormal, "Synced", fmt.Sprintf("Ingress %s", key)) + c.recorder.Event(ing, corev1.EventTypeNormal, "Synced", fmt.Sprintf("Ingress %s", key)) } } @@ -586,7 +585,7 @@ func (c *Controller) updateIngressStatus(ing *nwv1.Ingress, lb *lObjects.LoadBal if err != nil { return nil, err } - c.recorder.Event(ing, apiv1.EventTypeNormal, "Updated", fmt.Sprintf("Successfully associated IP address %s to ingress %s", lb.Address, newIng.Name)) + c.recorder.Event(ing, corev1.EventTypeNormal, "Updated", fmt.Sprintf("Successfully associated IP address %s to ingress %s", lb.Address, newIng.Name)) return newObj, nil } diff --git a/pkg/utils/k8s.go b/pkg/utils/k8s.go index b2cbdf9..4108153 100644 --- a/pkg/utils/k8s.go +++ b/pkg/utils/k8s.go @@ -7,7 +7,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/vngcloud/cloud-provider-vngcloud/pkg/consts" "github.com/vngcloud/cloud-provider-vngcloud/pkg/utils/errors" - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" nwv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" @@ -60,7 +60,7 @@ func GetIngress(ingressLister nwlisters.IngressLister, key string) (*nwv1.Ingres return ingress, nil } -func GetService(serviceLister corelisters.ServiceLister, key string) (*apiv1.Service, error) { +func GetService(serviceLister corelisters.ServiceLister, key string) (*corev1.Service, error) { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return nil, err @@ -112,7 +112,7 @@ func GetServiceNodePort(serviceLister corelisters.ServiceLister, name string, se return targetPort, nodePort, nil } -func GetNodeMembersAddr(nodeObjs []*apiv1.Node) []string { +func GetNodeMembersAddr(nodeObjs []*corev1.Node) []string { var nodeAddr []string for _, node := range nodeObjs { addr, err := getNodeAddressForLB(node) @@ -126,14 +126,14 @@ func GetNodeMembersAddr(nodeObjs []*apiv1.Node) []string { return nodeAddr } -func getNodeAddressForLB(node *apiv1.Node) (string, error) { +func getNodeAddressForLB(node *corev1.Node) (string, error) { addrs := node.Status.Addresses if len(addrs) == 0 { return "", errors.NewErrNodeAddressNotFound(node.Name, "") } for _, addr := range addrs { - if addr.Type == apiv1.NodeInternalIP { + if addr.Type == corev1.NodeInternalIP { return addr.Address, nil } } @@ -142,7 +142,7 @@ func getNodeAddressForLB(node *apiv1.Node) (string, error) { } // NodeNames get all the node names. -func NodeNames(nodes []*apiv1.Node) []string { +func NodeNames(nodes []*corev1.Node) []string { ret := make([]string, len(nodes)) for i, node := range nodes { ret[i] = node.Name @@ -151,7 +151,7 @@ func NodeNames(nodes []*apiv1.Node) []string { } // NodeSlicesEqual check if two nodes equals to each other. -func NodeSlicesEqual(x, y []*apiv1.Node) bool { +func NodeSlicesEqual(x, y []*corev1.Node) bool { if len(x) != len(y) { return false } diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 76492b4..46a9865 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -17,7 +17,7 @@ import ( lListenerV2 "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/listener" lLoadBalancerV2 "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/loadbalancer" lPoolV2 "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/pool" - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -32,7 +32,7 @@ type MyDuration struct { } // PatchService makes patch request to the Service object. -func PatchService(ctx context.Context, client clientset.Interface, cur, mod *apiv1.Service) error { +func PatchService(ctx context.Context, client clientset.Interface, cur, mod *corev1.Service) error { curJSON, err := json.Marshal(cur) if err != nil { return fmt.Errorf("failed to serialize current service object: %s", err) @@ -43,7 +43,7 @@ func PatchService(ctx context.Context, client clientset.Interface, cur, mod *api return fmt.Errorf("failed to serialize modified service object: %s", err) } - patch, err := strategicpatch.CreateTwoWayMergePatch(curJSON, modJSON, apiv1.Service{}) + patch, err := strategicpatch.CreateTwoWayMergePatch(curJSON, modJSON, corev1.Service{}) if err != nil { return fmt.Errorf("failed to create 2-way merge patch: %s", err) } @@ -60,7 +60,7 @@ func PatchService(ctx context.Context, client clientset.Interface, cur, mod *api return nil } -func IsPoolProtocolValid(pPool *lObjects.Pool, pPort apiv1.ServicePort, pPoolName string) bool { +func IsPoolProtocolValid(pPool *lObjects.Pool, pPort corev1.ServicePort, pPoolName string) bool { if pPool != nil && !lStr.EqualFold(lStr.TrimSpace(pPool.Protocol), lStr.TrimSpace(string(pPort.Protocol))) && pPoolName == pPool.Name { @@ -77,8 +77,8 @@ func MinInt(a, b int) int { return b } -func ListWorkerNodes(pNodes []*apiv1.Node, pOnlyReadyNode bool) []*apiv1.Node { - var workerNodes []*apiv1.Node +func ListWorkerNodes(pNodes []*corev1.Node, pOnlyReadyNode bool) []*corev1.Node { + var workerNodes []*corev1.Node for _, node := range pNodes { // Ignore master nodes @@ -99,7 +99,7 @@ func ListWorkerNodes(pNodes []*apiv1.Node, pOnlyReadyNode bool) []*apiv1.Node { // Only consider ready nodes for _, condition := range node.Status.Conditions { - if condition.Type == apiv1.NodeReady && condition.Status != apiv1.ConditionTrue { + if condition.Type == corev1.NodeReady && condition.Status != corev1.ConditionTrue { continue } } @@ -136,7 +136,7 @@ func ParsePoolProtocol(pPoolProtocol string) lPoolV2.CreateOptsProtocolOpt { } func ParseMonitorProtocol( - pPoolProtocol apiv1.Protocol, pMonitorProtocol string) lPoolV2.CreateOptsHealthCheckProtocolOpt { + pPoolProtocol corev1.Protocol, pMonitorProtocol string) lPoolV2.CreateOptsHealthCheckProtocolOpt { switch lStr.TrimSpace(lStr.ToUpper(string(pPoolProtocol))) { case string(lPoolV2.CreateOptsProtocolOptUDP): @@ -189,7 +189,7 @@ func ParseLoadBalancerScheme(pInternal bool) lLoadBalancerV2.CreateOptsSchemeOpt return lLoadBalancerV2.CreateOptsSchemeOptInternet } -func ParseListenerProtocol(pPort apiv1.ServicePort) lListenerV2.CreateOptsListenerProtocolOpt { +func ParseListenerProtocol(pPort corev1.ServicePort) lListenerV2.CreateOptsListenerProtocolOpt { opt := lStr.TrimSpace(lStr.ToUpper(string(pPort.Protocol))) switch opt { case string(lListenerV2.CreateOptsListenerProtocolOptUDP): @@ -199,7 +199,7 @@ func ParseListenerProtocol(pPort apiv1.ServicePort) lListenerV2.CreateOptsListen return lListenerV2.CreateOptsListenerProtocolOptTCP } -func GetStringFromServiceAnnotation(pService *apiv1.Service, annotationKey string, defaultSetting string) string { +func GetStringFromServiceAnnotation(pService *corev1.Service, annotationKey string, defaultSetting string) string { klog.V(4).Infof("getStringFromServiceAnnotation(%s/%s, %v, %v)", pService.Namespace, pService.Name, annotationKey, defaultSetting) if annotationValue, ok := pService.Annotations[annotationKey]; ok { //if there is an annotation for this setting, set the "setting" var to it @@ -217,7 +217,7 @@ func GetStringFromServiceAnnotation(pService *apiv1.Service, annotationKey strin return defaultSetting } -func GetIntFromServiceAnnotation(service *apiv1.Service, annotationKey string, defaultSetting int) int { +func GetIntFromServiceAnnotation(service *corev1.Service, annotationKey string, defaultSetting int) int { klog.V(4).Infof("getIntFromServiceAnnotation(%s/%s, %v, %v)", service.Namespace, service.Name, annotationKey, defaultSetting) if annotationValue, ok := service.Annotations[annotationKey]; ok { returnValue, err := strconv.Atoi(annotationValue) @@ -297,14 +297,14 @@ func ParseStringMapAnnotation(s, annotation string) map[string]string { return validStr } -func ListNodeWithPredicate(nodeLister corelisters.NodeLister, nodeLabels map[string]string) ([]*apiv1.Node, error) { +func ListNodeWithPredicate(nodeLister corelisters.NodeLister, nodeLabels map[string]string) ([]*corev1.Node, error) { labelSelector := labels.SelectorFromSet(nodeLabels) nodes, err := nodeLister.List(labelSelector) if err != nil { return nil, err } - var filtered []*apiv1.Node + var filtered []*corev1.Node for i := range nodes { if getNodeConditionPredicate(nodes[i]) { filtered = append(filtered, nodes[i]) @@ -315,8 +315,8 @@ func ListNodeWithPredicate(nodeLister corelisters.NodeLister, nodeLabels map[str } // FilterByNodeLabel filters the given list of nodes by the given node labels. -func FilterByNodeLabel(nodes []*apiv1.Node, nodeLabels map[string]string) []*apiv1.Node { - var filtered []*apiv1.Node +func FilterByNodeLabel(nodes []*corev1.Node, nodeLabels map[string]string) []*corev1.Node { + var filtered []*corev1.Node for _, node := range nodes { if node == nil { continue @@ -331,7 +331,7 @@ func FilterByNodeLabel(nodes []*apiv1.Node, nodeLabels map[string]string) []*api return filtered } -func getNodeConditionPredicate(node *apiv1.Node) bool { +func getNodeConditionPredicate(node *corev1.Node) bool { // We add the master to the node list, but its unschedulable. So we use this to filter // the master. if node.Spec.Unschedulable { @@ -355,7 +355,7 @@ func getNodeConditionPredicate(node *apiv1.Node) bool { // for _, cond := range node.Status.Conditions { // // We consider the node for load balancing only when its NodeReady condition status // // is ConditionTrue - // if cond.Type == apiv1.NodeReady && cond.Status != apiv1.ConditionTrue { + // if cond.Type == corev1.NodeReady && cond.Status != corev1.ConditionTrue { // klog.Info("ignoring node:", "name", node.Name, "status", cond.Status) // // log.WithFields(log.Fields{"name": node.Name, "status": cond.Status}).Info("ignoring node") // return false @@ -363,13 +363,13 @@ func getNodeConditionPredicate(node *apiv1.Node) bool { // } return true } -func ListServiceWithPredicate(serviceLister corelisters.ServiceLister) ([]*apiv1.Service, error) { +func ListServiceWithPredicate(serviceLister corelisters.ServiceLister) ([]*corev1.Service, error) { services, err := serviceLister.List(labels.Everything()) if err != nil { return nil, err } - var filtered []*apiv1.Service + var filtered []*corev1.Service for i := range services { if getServiceConditionPredicate(services[i]) { filtered = append(filtered, services[i]) @@ -378,9 +378,9 @@ func ListServiceWithPredicate(serviceLister corelisters.ServiceLister) ([]*apiv1 return filtered, nil } -func getServiceConditionPredicate(service *apiv1.Service) bool { +func getServiceConditionPredicate(service *corev1.Service) bool { // We only consider services with type LoadBalancer - return service.Spec.Type == apiv1.ServiceTypeLoadBalancer + return service.Spec.Type == corev1.ServiceTypeLoadBalancer } // providerID @@ -395,7 +395,7 @@ var ( vngCloudProviderIDRegex = regexp.MustCompile(pattern) ) -func GetListProviderID(pnodes []*apiv1.Node) []string { +func GetListProviderID(pnodes []*corev1.Node) []string { var providerIDs []string for _, node := range pnodes { if node != nil && (matchCloudProviderPattern(node.Spec.ProviderID)) { @@ -410,7 +410,7 @@ func matchCloudProviderPattern(pproviderID string) bool { return vngCloudProviderIDRegex.MatchString(pproviderID) } -func getProviderID(pnode *apiv1.Node) string { +func getProviderID(pnode *corev1.Node) string { return pnode.Spec.ProviderID[len(rawPrefix):len(pnode.Spec.ProviderID)] } diff --git a/pkg/vngcloud/annotation.go b/pkg/vngcloud/annotation.go index 40836d4..b87d34f 100644 --- a/pkg/vngcloud/annotation.go +++ b/pkg/vngcloud/annotation.go @@ -2,12 +2,13 @@ package vngcloud import ( "fmt" + "github.com/vngcloud/cloud-provider-vngcloud/pkg/consts" "github.com/vngcloud/cloud-provider-vngcloud/pkg/utils" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/listener" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/loadbalancer" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/pool" - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" ) @@ -108,7 +109,7 @@ type ServiceConfig struct { TargetType TargetType } -func NewServiceConfig(pService *apiv1.Service) *ServiceConfig { +func NewServiceConfig(pService *corev1.Service) *ServiceConfig { opt := &ServiceConfig{ LoadBalancerID: "", LoadBalancerName: "", @@ -294,7 +295,7 @@ func (s *ServiceConfig) CreateLoadbalancerOptions() *loadbalancer.CreateOpts { return opt } -func (s *ServiceConfig) CreateListenerOptions(pPort apiv1.ServicePort) *listener.CreateOpts { +func (s *ServiceConfig) CreateListenerOptions(pPort corev1.ServicePort) *listener.CreateOpts { opt := &listener.CreateOpts{ ListenerName: "", ListenerProtocol: utils.ParseListenerProtocol(pPort), @@ -311,7 +312,7 @@ func (s *ServiceConfig) CreateListenerOptions(pPort apiv1.ServicePort) *listener return opt } -func (s *ServiceConfig) CreatePoolOptions(pPort apiv1.ServicePort) *pool.CreateOpts { +func (s *ServiceConfig) CreatePoolOptions(pPort corev1.ServicePort) *pool.CreateOpts { healthMonitor := pool.HealthMonitor{ HealthyThreshold: s.HealthyThresholdCount, UnhealthyThreshold: s.UnhealthyThresholdCount, @@ -344,7 +345,7 @@ func (s *ServiceConfig) CreatePoolOptions(pPort apiv1.ServicePort) *pool.CreateO Members: []*pool.Member{}, } for _, name := range s.EnableProxyProtocol { - if (name == "*" || name == pPort.Name) && pPort.Protocol == apiv1.ProtocolTCP { + if (name == "*" || name == pPort.Name) && pPort.Protocol == corev1.ProtocolTCP { opt.PoolProtocol = pool.CreateOptsProtocolOptProxy break } @@ -352,16 +353,16 @@ func (s *ServiceConfig) CreatePoolOptions(pPort apiv1.ServicePort) *pool.CreateO return opt } -func (s *ServiceConfig) MappingProtocol(pPort apiv1.ServicePort) string { +func (s *ServiceConfig) MappingProtocol(pPort corev1.ServicePort) string { for _, name := range s.EnableProxyProtocol { - if (name == "*" || name == pPort.Name) && pPort.Protocol == apiv1.ProtocolTCP { + if (name == "*" || name == pPort.Name) && pPort.Protocol == corev1.ProtocolTCP { return string(pool.CreateOptsProtocolOptProxy) } } return string(pPort.Protocol) } -func (s *ServiceConfig) GenListenerName(clusterName string, pService *apiv1.Service, resourceType string, pPort apiv1.ServicePort) string { +func (s *ServiceConfig) GenListenerName(clusterName string, pService *corev1.Service, resourceType string, pPort corev1.ServicePort) string { hash := utils.GenerateHashName(clusterName, pService.Namespace, pService.Name, resourceType) name := fmt.Sprintf("%s_%s_%s_%s_%s_%s_%d", consts.DEFAULT_LB_PREFIX_NAME, @@ -374,7 +375,7 @@ func (s *ServiceConfig) GenListenerName(clusterName string, pService *apiv1.Serv return utils.ValidateName(name) } -func (s *ServiceConfig) GenPoolName(clusterName string, pService *apiv1.Service, resourceType string, pPort apiv1.ServicePort) string { +func (s *ServiceConfig) GenPoolName(clusterName string, pService *corev1.Service, resourceType string, pPort corev1.ServicePort) string { realProtocol := s.MappingProtocol(pPort) hash := utils.GenerateHashName(clusterName, pService.Namespace, pService.Name, resourceType) diff --git a/pkg/vngcloud/vcontainer.go b/pkg/vngcloud/vcontainer.go index 852c748..abb58b2 100644 --- a/pkg/vngcloud/vcontainer.go +++ b/pkg/vngcloud/vcontainer.go @@ -9,7 +9,7 @@ import ( vngcloudutil "github.com/vngcloud/cloud-provider-vngcloud/pkg/utils/vngcloud" vconSdkClient "github.com/vngcloud/vngcloud-go-sdk/client" "github.com/vngcloud/vngcloud-go-sdk/vngcloud" - lCoreV1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" @@ -45,7 +45,7 @@ func (s *VContainer) Initialize(clientBuilder lcloudProvider.ControllerClientBui s.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.kubeClient.CoreV1().Events("")}) s.eventRecorder = s.eventBroadcaster.NewRecorder( scheme.Scheme, - lCoreV1.EventSource{Component: fmt.Sprintf("cloud-provider-%s", consts.PROVIDER_NAME)}) + corev1.EventSource{Component: fmt.Sprintf("cloud-provider-%s", consts.PROVIDER_NAME)}) } func (s *VContainer) LoadBalancer() (lcloudProvider.LoadBalancer, bool) { @@ -69,9 +69,9 @@ func (s *VContainer) LoadBalancer() (lcloudProvider.LoadBalancer, bool) { vLbConfig: s.vLbOpts, config: s.config, trackLBUpdate: utils.NewUpdateTracker(), - serviceCache: make(map[string]*lCoreV1.Service), + serviceCache: make(map[string]*corev1.Service), isReApplyNextTime: false, - knownNodes: []*lCoreV1.Node{}, + knownNodes: []*corev1.Node{}, } go lb.Init() s.vlb = lb diff --git a/pkg/vngcloud/vlb.go b/pkg/vngcloud/vlb.go index f709dbd..f1760aa 100644 --- a/pkg/vngcloud/vlb.go +++ b/pkg/vngcloud/vlb.go @@ -22,7 +22,7 @@ import ( "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/listener" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/loadbalancer" "github.com/vngcloud/vngcloud-go-sdk/vngcloud/services/loadbalancer/v2/pool" - lCoreV1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -67,9 +67,9 @@ type ( kubeClient kubernetes.Interface eventRecorder record.EventRecorder vLbConfig VLbOpts - serviceCache map[string]*lCoreV1.Service + serviceCache map[string]*corev1.Service - knownNodes []*lCoreV1.Node + knownNodes []*corev1.Node serviceLister corelisters.ServiceLister serviceListerSynced cache.InformerSynced endpointLister corelisters.EndpointsLister @@ -115,7 +115,7 @@ func (c *vLB) Init() { endpointInformer := kubeInformerFactory.Core().V1().Endpoints() _, err := endpointInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - object := obj.(*lCoreV1.Endpoints) + object := obj.(*corev1.Endpoints) requests := c.resourceDependant.GetServiceNeedReconcile("endpoint", object.GetNamespace(), object.GetName()) if len(requests) == 0 { return @@ -133,8 +133,8 @@ func (c *vLB) Init() { } }, UpdateFunc: func(old, new interface{}) { - objectOld := old.(*lCoreV1.Endpoints) - objectNew := new.(*lCoreV1.Endpoints) + objectOld := old.(*corev1.Endpoints) + objectNew := new.(*corev1.Endpoints) if reflect.DeepEqual(objectOld.Subsets, objectNew.Subsets) { return } @@ -182,18 +182,18 @@ func (c *vLB) Init() { // ****************************** IMPLEMENTATIONS OF KUBERNETES CLOUD PROVIDER INTERFACE ******************************* -func (c *vLB) GetLoadBalancer(pCtx context.Context, clusterName string, pService *lCoreV1.Service) (*lCoreV1.LoadBalancerStatus, bool, error) { +func (c *vLB) GetLoadBalancer(pCtx context.Context, clusterName string, pService *corev1.Service) (*corev1.LoadBalancerStatus, bool, error) { mc := lMetrics.NewMetricContext("loadbalancer", "ensure") klog.InfoS("GetLoadBalancer", "cluster", clusterName, "service", klog.KObj(pService)) status, existed, err := c.ensureGetLoadBalancer(pCtx, clusterName, pService) return status, existed, mc.ObserveReconcile(err) } -func (c *vLB) GetLoadBalancerName(_ context.Context, clusterName string, pService *lCoreV1.Service) string { +func (c *vLB) GetLoadBalancerName(_ context.Context, clusterName string, pService *corev1.Service) string { return utils.GenerateLBName(c.getClusterID(), pService.Namespace, pService.Name, consts.RESOURCE_TYPE_SERVICE) } -func (c *vLB) EnsureLoadBalancer(pCtx context.Context, clusterName string, pService *lCoreV1.Service, pNodes []*lCoreV1.Node) (*lCoreV1.LoadBalancerStatus, error) { +func (c *vLB) EnsureLoadBalancer(pCtx context.Context, clusterName string, pService *corev1.Service, pNodes []*corev1.Node) (*corev1.LoadBalancerStatus, error) { key := fmt.Sprintf("%s/%s", pService.Namespace, pService.Name) c.stringKeyLock.Lock(key) defer c.stringKeyLock.Unlock(key) @@ -211,7 +211,7 @@ func (c *vLB) EnsureLoadBalancer(pCtx context.Context, clusterName string, pServ // UpdateLoadBalancer updates hosts under the specified load balancer. This will be executed when user add or remove nodes // from the cluster -func (c *vLB) UpdateLoadBalancer(pCtx context.Context, clusterName string, pService *lCoreV1.Service, pNodes []*lCoreV1.Node) error { +func (c *vLB) UpdateLoadBalancer(pCtx context.Context, clusterName string, pService *corev1.Service, pNodes []*corev1.Node) error { key := fmt.Sprintf("%s/%s", pService.Namespace, pService.Name) c.stringKeyLock.Lock(key) defer c.stringKeyLock.Unlock(key) @@ -233,7 +233,7 @@ func (c *vLB) UpdateLoadBalancer(pCtx context.Context, clusterName string, pServ return mc.ObserveReconcile(err) } -func (c *vLB) EnsureLoadBalancerDeleted(pCtx context.Context, clusterName string, pService *lCoreV1.Service) error { +func (c *vLB) EnsureLoadBalancerDeleted(pCtx context.Context, clusterName string, pService *corev1.Service) error { key := fmt.Sprintf("%s/%s", pService.Namespace, pService.Name) c.stringKeyLock.Lock(key) defer c.stringKeyLock.Unlock(key) @@ -248,8 +248,8 @@ func (c *vLB) EnsureLoadBalancerDeleted(pCtx context.Context, clusterName string // ************************************************** PRIVATE METHODS ************************************************** func (c *vLB) ensureLoadBalancer( - pCtx context.Context, _ string, pService *lCoreV1.Service, pNodes []*lCoreV1.Node) ( // params - rLb *lCoreV1.LoadBalancerStatus, rErr error) { // returns + pCtx context.Context, _ string, pService *corev1.Service, pNodes []*corev1.Node) ( // params + rLb *corev1.LoadBalancerStatus, rErr error) { // returns if option, ok := pService.Annotations[ServiceAnnotationIgnore]; ok { if isIgnore := utils.ParseBoolAnnotation(option, ServiceAnnotationIgnore, false); isIgnore { @@ -306,7 +306,7 @@ func (c *vLB) ensureLoadBalancer( return lbStatus, nil } -func (c *vLB) createLoadBalancerStatus(pService *lCoreV1.Service, lb *lObjects.LoadBalancer) *lCoreV1.LoadBalancerStatus { +func (c *vLB) createLoadBalancerStatus(pService *corev1.Service, lb *lObjects.LoadBalancer) *corev1.LoadBalancerStatus { if pService == nil { klog.Warningln("can't createLoadBalancerStatus, service is nil") return nil @@ -317,12 +317,12 @@ func (c *vLB) createLoadBalancerStatus(pService *lCoreV1.Service, lb *lObjects.L pService.ObjectMeta.Annotations[ServiceAnnotationLoadBalancerID] = lb.UUID delete(pService.ObjectMeta.Annotations, ServiceAnnotationLoadBalancerName) - status := &lCoreV1.LoadBalancerStatus{} + status := &corev1.LoadBalancerStatus{} addr := net.ParseIP(lb.Address) if addr != nil { - status.Ingress = []lCoreV1.LoadBalancerIngress{{IP: lb.Address}} + status.Ingress = []corev1.LoadBalancerIngress{{IP: lb.Address}} } else { - status.Ingress = []lCoreV1.LoadBalancerIngress{{Hostname: lb.Address}} + status.Ingress = []corev1.LoadBalancerIngress{{Hostname: lb.Address}} } return status } @@ -331,7 +331,7 @@ func (c *vLB) getProjectID() string { return c.extraInfo.ProjectID } -func (c *vLB) ensureDeleteLoadBalancer(_ context.Context, _ string, pService *lCoreV1.Service) error { +func (c *vLB) ensureDeleteLoadBalancer(_ context.Context, _ string, pService *corev1.Service) error { c.resourceDependant.ClearService(pService.Namespace, pService.Name) if option, ok := pService.Annotations[ServiceAnnotationIgnore]; ok { if isIgnore := utils.ParseBoolAnnotation(option, ServiceAnnotationIgnore, false); isIgnore { @@ -438,7 +438,7 @@ func (c *vLB) ensureDeleteLoadBalancer(_ context.Context, _ string, pService *lC return nil } -func (c *vLB) ensureGetLoadBalancer(_ context.Context, _ string, pService *lCoreV1.Service) (*lCoreV1.LoadBalancerStatus, bool, error) { +func (c *vLB) ensureGetLoadBalancer(_ context.Context, _ string, pService *corev1.Service) (*corev1.LoadBalancerStatus, bool, error) { lbID, _ := c.GetLoadbalancerIDByService(pService) if lbID == "" { klog.Infof("Load balancer is not existed") @@ -519,7 +519,7 @@ func (c *vLB) getClusterID() string { return c.config.Cluster.ClusterID } -func (c *vLB) inspectService(pService *lCoreV1.Service, pNodes []*lCoreV1.Node) (*Expander, error) { +func (c *vLB) inspectService(pService *corev1.Service, pNodes []*corev1.Node) (*Expander, error) { if pService == nil { return &Expander{ serviceConf: NewServiceConfig(nil), @@ -756,7 +756,7 @@ func (c *vLB) ensureLoadBalancerInstance(inspect *Expander) (string, error) { return inspect.serviceConf.LoadBalancerID, nil } -func (c *vLB) GetLoadbalancerIDByService(pService *lCoreV1.Service) (string, error) { +func (c *vLB) GetLoadbalancerIDByService(pService *corev1.Service) (string, error) { lbsInSubnet, err := vngcloudutil.ListLB(c.vLBSC, c.getProjectID()) if err != nil { klog.Errorf("error when list lb by subnet id: %v", err)