Refine test case

This commit is contained in:
Alexander Constantinescu 2023-11-13 17:03:51 +01:00
parent 9ae1fc366b
commit f9ab24bf48
2 changed files with 57 additions and 72 deletions

View File

@ -1386,13 +1386,9 @@ func TestNeedsCleanup(t *testing.T) {
// make sure that the slow node sync never removes the Node from LB set because it
// has stale data.
func TestSlowNodeSync(t *testing.T) {
stopCh, updateCallCh := make(chan struct{}), make(chan fakecloud.UpdateBalancerCall)
stopCh, syncServiceDone, syncService := make(chan struct{}), make(chan string), make(chan string)
defer close(stopCh)
defer close(updateCallCh)
duration := time.Millisecond
syncService := make(chan string)
defer close(syncService)
node1 := makeNode(tweakName("node1"))
node2 := makeNode(tweakName("node2"))
@ -1405,14 +1401,16 @@ func TestSlowNodeSync(t *testing.T) {
serviceKeys := sets.New(sKey1, sKey2)
controller, cloudProvider, kubeClient := newController(stopCh, node1, node2, service1, service2)
cloudProvider.RequestDelay = 4 * duration
cloudProvider.UpdateCallCb = func(update fakecloud.UpdateBalancerCall) {
updateCallCh <- update
key, _ := cache.MetaNamespaceKeyFunc(update.Service)
impactedService := serviceKeys.Difference(sets.New(key)).UnsortedList()[0]
syncService <- impactedService
<-syncServiceDone
}
cloudProvider.EnsureCallCb = func(update fakecloud.UpdateBalancerCall) {
updateCallCh <- update
syncServiceDone <- update.Service.Name
}
// Two update calls are expected. This is because this test calls
// controller.syncNodes once with two existing services, but with one
// controller.syncService while that is happening. The end result is
@ -1428,6 +1426,8 @@ func TestSlowNodeSync(t *testing.T) {
expectedUpdateCalls := []fakecloud.UpdateBalancerCall{
// First update call for first service from controller.syncNodes
{Service: service1, Hosts: []*v1.Node{node1, node2}},
}
expectedEnsureCalls := []fakecloud.UpdateBalancerCall{
// Second update call for impacted service from controller.syncService
{Service: service2, Hosts: []*v1.Node{node1, node2, node3}},
}
@ -1439,51 +1439,34 @@ func TestSlowNodeSync(t *testing.T) {
controller.syncNodes(context.TODO(), 1)
}()
wg.Add(1)
go func() {
defer wg.Done()
updateCallIdx := 0
impactedService := ""
for update := range updateCallCh {
// Validate that the call hosts are what we expect
if !compareHostSets(t, expectedUpdateCalls[updateCallIdx].Hosts, update.Hosts) {
t.Errorf("unexpected updated hosts for update: %v, expected: %v, got: %v", updateCallIdx, expectedUpdateCalls[updateCallIdx].Hosts, update.Hosts)
return
}
key, _ := cache.MetaNamespaceKeyFunc(update.Service)
// For call 0: determine impacted service
if updateCallIdx == 0 {
impactedService = serviceKeys.Difference(sets.New(key)).UnsortedList()[0]
syncService <- impactedService
}
// For calls > 0: validate the impacted service
if updateCallIdx > 0 {
if key != impactedService {
t.Error("unexpected impacted service")
return
}
}
if updateCallIdx == len(expectedUpdateCalls)-1 {
return
}
updateCallIdx++
}
}()
key := <-syncService
if _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}); err != nil {
t.Fatalf("error creating node3, err: %v", err)
}
// Give it some time to update the informer cache, needs to be lower than
// cloudProvider.RequestDelay
time.Sleep(duration)
// Sync the service
if err := controller.syncService(context.TODO(), key); err != nil {
t.Errorf("unexpected service sync error, err: %v", err)
t.Fatalf("unexpected service sync error, err: %v", err)
}
wg.Wait()
if len(expectedUpdateCalls) != len(cloudProvider.UpdateCalls) {
t.Fatalf("unexpected amount of update calls, expected: %v, got: %v", len(expectedUpdateCalls), len(cloudProvider.UpdateCalls))
}
for idx, update := range cloudProvider.UpdateCalls {
if !compareHostSets(t, expectedUpdateCalls[idx].Hosts, update.Hosts) {
t.Fatalf("unexpected updated hosts for update: %v, expected: %v, got: %v", idx, expectedUpdateCalls[idx].Hosts, update.Hosts)
}
}
if len(expectedEnsureCalls) != len(cloudProvider.EnsureCalls) {
t.Fatalf("unexpected amount of ensure calls, expected: %v, got: %v", len(expectedEnsureCalls), len(cloudProvider.EnsureCalls))
}
for idx, ensure := range cloudProvider.EnsureCalls {
if !compareHostSets(t, expectedEnsureCalls[idx].Hosts, ensure.Hosts) {
t.Fatalf("unexpected updated hosts for ensure: %v, expected: %v, got: %v", idx, expectedEnsureCalls[idx].Hosts, ensure.Hosts)
}
}
}
func TestNeedsUpdate(t *testing.T) {

View File

@ -73,27 +73,29 @@ type Cloud struct {
ErrShutdownByProviderID error
MetadataErr error
Calls []string
Addresses []v1.NodeAddress
addressesMux sync.Mutex
ExtID map[types.NodeName]string
ExtIDErr map[types.NodeName]error
InstanceTypes map[types.NodeName]string
Machines []types.NodeName
NodeResources *v1.NodeResources
ClusterList []string
MasterName string
ExternalIP net.IP
Balancers map[string]Balancer
UpdateCalls []UpdateBalancerCall
EnsureCalls []UpdateBalancerCall
EnsureCallCb func(UpdateBalancerCall)
UpdateCallCb func(UpdateBalancerCall)
RouteMap map[string]*Route
Lock sync.Mutex
Provider string
ProviderID map[types.NodeName]string
addCallLock sync.Mutex
Calls []string
Addresses []v1.NodeAddress
addressesMux sync.Mutex
ExtID map[types.NodeName]string
ExtIDErr map[types.NodeName]error
InstanceTypes map[types.NodeName]string
Machines []types.NodeName
NodeResources *v1.NodeResources
ClusterList []string
MasterName string
ExternalIP net.IP
Balancers map[string]Balancer
updateCallLock sync.Mutex
UpdateCalls []UpdateBalancerCall
ensureCallLock sync.Mutex
EnsureCalls []UpdateBalancerCall
EnsureCallCb func(UpdateBalancerCall)
UpdateCallCb func(UpdateBalancerCall)
RouteMap map[string]*Route
Lock sync.Mutex
Provider string
ProviderID map[types.NodeName]string
addCallLock sync.Mutex
cloudprovider.Zone
VolumeLabelMap map[string]map[string]string
@ -203,8 +205,8 @@ func (f *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, ser
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
// It adds an entry "create" into the internal method call record.
func (f *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
f.markEnsureCall(service, nodes)
f.addCall("create")
f.markEnsureCall(service, nodes)
if f.Balancers == nil {
f.Balancers = make(map[string]Balancer)
}
@ -227,8 +229,8 @@ func (f *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, serv
}
func (f *Cloud) markUpdateCall(service *v1.Service, nodes []*v1.Node) {
f.Lock.Lock()
defer f.Lock.Unlock()
f.updateCallLock.Lock()
defer f.updateCallLock.Unlock()
update := UpdateBalancerCall{service, nodes}
f.UpdateCalls = append(f.UpdateCalls, update)
if f.UpdateCallCb != nil {
@ -237,8 +239,8 @@ func (f *Cloud) markUpdateCall(service *v1.Service, nodes []*v1.Node) {
}
func (f *Cloud) markEnsureCall(service *v1.Service, nodes []*v1.Node) {
f.Lock.Lock()
defer f.Lock.Unlock()
f.ensureCallLock.Lock()
defer f.ensureCallLock.Unlock()
update := UpdateBalancerCall{service, nodes}
f.EnsureCalls = append(f.EnsureCalls, update)
if f.EnsureCallCb != nil {
@ -249,8 +251,8 @@ func (f *Cloud) markEnsureCall(service *v1.Service, nodes []*v1.Node) {
// UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer.
// It adds an entry "update" into the internal method call record.
func (f *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
f.markUpdateCall(service, nodes)
f.addCall("update")
f.markUpdateCall(service, nodes)
return f.Err
}