From 9ae1fc366b86e21481f3e68a87209fa07245c75f Mon Sep 17 00:00:00 2001 From: Alexander Constantinescu Date: Wed, 18 Oct 2023 18:22:28 +0200 Subject: [PATCH] Store nodes before calling EnsureLoadBalancer I am having difficulties convincing myself if this is better or worse. I didn't implement this originally because I didn't want to store nodes that we weren't sure we've configured. However: if EnsureLoadBalancer fails we should retry the call from the service controller. Doing it like this might save us one update call from the node controller side for calls which have already started executing from the service controller's side...is this really that expensive at this point though? Is it really that dangerous to not do either, given that we retry failed calls? Ahhhhh!!! Opinions, please! Help, please! --- .../controllers/service/controller.go | 2 +- .../controllers/service/controller_test.go | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go index 27cd0f051df..f731e0c7a86 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller.go @@ -443,13 +443,13 @@ func (c *Controller) ensureLoadBalancer(ctx context.Context, service *v1.Service if len(nodes) == 0 { c.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer") } + c.storeLastSyncedNodes(service, nodes) // - Not all cloud providers support all protocols and the next step is expected to return // an error for unsupported protocols status, err := c.balancer.EnsureLoadBalancer(ctx, c.clusterName, service, nodes) if err != nil { return nil, err } - c.storeLastSyncedNodes(service, nodes) return status, nil } diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go index 157a6676746..3e05012c24b 100644 --- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go +++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go @@ -1413,13 +1413,14 @@ func TestSlowNodeSync(t *testing.T) { updateCallCh <- update } - // Three update calls are expected. This is because this test calls - // controller.syncNodes once with two existing services, so we will have an - // update call for each service, and controller.syncService once. The end - // result is therefore three update calls. Each update call takes - // cloudProvider.RequestDelay to process. The test asserts that the order of - // the Hosts defined by the update calls is respected, but doesn't - // necessarily assert the order of the Service. This is because the + // Two update calls are expected. This is because this test calls + // controller.syncNodes once with two existing services, but with one + // controller.syncService while that is happening. The end result is + // therefore two update calls - since the second controller.syncNodes won't + // trigger an update call because the syncService already did. Each update + // call takes cloudProvider.RequestDelay to process. The test asserts that + // the order of the Hosts defined by the update calls is respected, but + // doesn't necessarily assert the order of the Service. This is because the // controller implementation doesn't use a deterministic order when syncing // services. The test therefor works out which service is impacted by the // slow node sync (which will be whatever service is not synced first) and @@ -1429,8 +1430,6 @@ func TestSlowNodeSync(t *testing.T) { {Service: service1, Hosts: []*v1.Node{node1, node2}}, // Second update call for impacted service from controller.syncService {Service: service2, Hosts: []*v1.Node{node1, node2, node3}}, - // Third update call for second service from controller.syncNodes. - {Service: service2, Hosts: []*v1.Node{node1, node2, node3}}, } wg := sync.WaitGroup{}