mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
Store nodes before calling EnsureLoadBalancer
I am having difficulties convincing myself if this is better or worse. I didn't implement this originally because I didn't want to store nodes that we weren't sure we've configured. However: if EnsureLoadBalancer fails we should retry the call from the service controller. Doing it like this might save us one update call from the node controller side for calls which have already started executing from the service controller's side...is this really that expensive at this point though? Is it really that dangerous to not do either, given that we retry failed calls? Ahhhhh!!! Opinions, please! Help, please!
This commit is contained in:
parent
60338c79d7
commit
9ae1fc366b
@ -443,13 +443,13 @@ func (c *Controller) ensureLoadBalancer(ctx context.Context, service *v1.Service
|
|||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
c.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer")
|
c.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer")
|
||||||
}
|
}
|
||||||
|
c.storeLastSyncedNodes(service, nodes)
|
||||||
// - Not all cloud providers support all protocols and the next step is expected to return
|
// - Not all cloud providers support all protocols and the next step is expected to return
|
||||||
// an error for unsupported protocols
|
// an error for unsupported protocols
|
||||||
status, err := c.balancer.EnsureLoadBalancer(ctx, c.clusterName, service, nodes)
|
status, err := c.balancer.EnsureLoadBalancer(ctx, c.clusterName, service, nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.storeLastSyncedNodes(service, nodes)
|
|
||||||
return status, nil
|
return status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1413,13 +1413,14 @@ func TestSlowNodeSync(t *testing.T) {
|
|||||||
updateCallCh <- update
|
updateCallCh <- update
|
||||||
}
|
}
|
||||||
|
|
||||||
// Three update calls are expected. This is because this test calls
|
// Two update calls are expected. This is because this test calls
|
||||||
// controller.syncNodes once with two existing services, so we will have an
|
// controller.syncNodes once with two existing services, but with one
|
||||||
// update call for each service, and controller.syncService once. The end
|
// controller.syncService while that is happening. The end result is
|
||||||
// result is therefore three update calls. Each update call takes
|
// therefore two update calls - since the second controller.syncNodes won't
|
||||||
// cloudProvider.RequestDelay to process. The test asserts that the order of
|
// trigger an update call because the syncService already did. Each update
|
||||||
// the Hosts defined by the update calls is respected, but doesn't
|
// call takes cloudProvider.RequestDelay to process. The test asserts that
|
||||||
// necessarily assert the order of the Service. This is because the
|
// the order of the Hosts defined by the update calls is respected, but
|
||||||
|
// doesn't necessarily assert the order of the Service. This is because the
|
||||||
// controller implementation doesn't use a deterministic order when syncing
|
// controller implementation doesn't use a deterministic order when syncing
|
||||||
// services. The test therefor works out which service is impacted by the
|
// services. The test therefor works out which service is impacted by the
|
||||||
// slow node sync (which will be whatever service is not synced first) and
|
// slow node sync (which will be whatever service is not synced first) and
|
||||||
@ -1429,8 +1430,6 @@ func TestSlowNodeSync(t *testing.T) {
|
|||||||
{Service: service1, Hosts: []*v1.Node{node1, node2}},
|
{Service: service1, Hosts: []*v1.Node{node1, node2}},
|
||||||
// Second update call for impacted service from controller.syncService
|
// Second update call for impacted service from controller.syncService
|
||||||
{Service: service2, Hosts: []*v1.Node{node1, node2, node3}},
|
{Service: service2, Hosts: []*v1.Node{node1, node2, node3}},
|
||||||
// Third update call for second service from controller.syncNodes.
|
|
||||||
{Service: service2, Hosts: []*v1.Node{node1, node2, node3}},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
|
Loading…
Reference in New Issue
Block a user