Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prevent LoadBalancer updates on follower. #6872

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelogs/unreleased/6872-tsaarni-small.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixed a memory leak in Contour follower instance due to unprocessed LoadBalancer status updates.
9 changes: 6 additions & 3 deletions cmd/contour/serve.go
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,10 @@
return err
}

notifier := &leadership.Notifier{
ToNotify: []leadership.NeedLeaderElectionNotification{contourHandler, observer},
}

Check warning on line 709 in cmd/contour/serve.go

View check run for this annotation

Codecov / codecov/patch

cmd/contour/serve.go#L706-L709

Added lines #L706 - L709 were not covered by tests
// Register an informer to watch envoy's service if we haven't been given static details.
if lbAddress := contourConfiguration.Ingress.StatusAddress; len(lbAddress) > 0 {
s.log.WithField("loadbalancer-address", lbAddress).Info("Using supplied information for Ingress status")
Expand All @@ -723,6 +727,8 @@
s.log.WithError(err).WithField("resource", "services").Fatal("failed to create informer")
}

notifier.ToNotify = append(notifier.ToNotify, serviceHandler)

Check warning on line 731 in cmd/contour/serve.go

View check run for this annotation

Codecov / codecov/patch

cmd/contour/serve.go#L730-L731

Added lines #L730 - L731 were not covered by tests
s.log.WithField("envoy-service-name", contourConfiguration.Envoy.Service.Name).
WithField("envoy-service-namespace", contourConfiguration.Envoy.Service.Namespace).
Info("Watching Service for Ingress status")
Expand All @@ -740,9 +746,6 @@
return err
}

notifier := &leadership.Notifier{
ToNotify: []leadership.NeedLeaderElectionNotification{contourHandler, observer},
}
if err := s.mgr.Add(notifier); err != nil {
return err
}
Expand Down
10 changes: 9 additions & 1 deletion internal/k8s/statusaddress.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package k8s
import (
"fmt"
"sync"
"sync/atomic"

"github.com/sirupsen/logrus"
core_v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -186,6 +187,7 @@ type ServiceStatusLoadBalancerWatcher struct {
ServiceName string
LBStatus chan core_v1.LoadBalancerStatus
Log logrus.FieldLogger
leader atomic.Bool
}

func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, _ bool) {
Expand Down Expand Up @@ -234,8 +236,14 @@ func (s *ServiceStatusLoadBalancerWatcher) OnDelete(obj any) {
})
}

func (s *ServiceStatusLoadBalancerWatcher) OnElectedLeader() {
s.leader.Store(true)
}

func (s *ServiceStatusLoadBalancerWatcher) notify(lbstatus core_v1.LoadBalancerStatus) {
s.LBStatus <- lbstatus
if s.leader.Load() {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it possible to lose the envoy service event in this way?

Copy link
Member Author

@tsaarni tsaarni Jan 21, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

True. Currently the event processing logic relies on Added/Updated/Deleted events alone. It works for other resource types as they will be processed by all Contour instances but I think new approach is needed with the load balancer status updates.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can simply handle it. Notify the latest envoy service when it becomes Leader.

s.LBStatus <- lbstatus
}
}

func coreToNetworkingLBStatus(lbs core_v1.LoadBalancerStatus) networking_v1.IngressLoadBalancerStatus {
Expand Down
28 changes: 28 additions & 0 deletions internal/k8s/statusaddress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ func TestServiceStatusLoadBalancerWatcherOnAdd(t *testing.T) {
LBStatus: lbstatus,
Log: fixture.NewTestLogger(t),
}
sw.OnElectedLeader()

recv := func() (core_v1.LoadBalancerStatus, bool) {
select {
Expand Down Expand Up @@ -89,6 +90,7 @@ func TestServiceStatusLoadBalancerWatcherOnUpdate(t *testing.T) {
LBStatus: lbstatus,
Log: fixture.NewTestLogger(t),
}
sw.OnElectedLeader()

recv := func() (core_v1.LoadBalancerStatus, bool) {
select {
Expand Down Expand Up @@ -139,6 +141,7 @@ func TestServiceStatusLoadBalancerWatcherOnDelete(t *testing.T) {
LBStatus: lbstatus,
Log: fixture.NewTestLogger(t),
}
sw.OnElectedLeader()

recv := func() (core_v1.LoadBalancerStatus, bool) {
select {
Expand Down Expand Up @@ -179,6 +182,31 @@ func TestServiceStatusLoadBalancerWatcherOnDelete(t *testing.T) {
assert.Equal(t, want, got)
}

func TestServiceStatusLoadBalancerWatcherNoNotificationsOnFollower(t *testing.T) {
lbstatus := make(chan core_v1.LoadBalancerStatus, 1)

sw := ServiceStatusLoadBalancerWatcher{
ServiceName: "envoy",
LBStatus: lbstatus,
Log: fixture.NewTestLogger(t),
}

recv := func() bool {
select {
case <-sw.LBStatus:
return true
default:
return false
}
}

// assert that when not elected leader, no notifications are sent.
var svc core_v1.Service
svc.Name = "envoy"
sw.OnAdd(&svc, false)
assert.False(t, recv())
}

func TestStatusAddressUpdater(t *testing.T) {
const objName = "someobjfoo"

Expand Down
Loading