mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-19 08:40:42 +00:00
Refine test case
This commit is contained in:
parent
9ae1fc366b
commit
f9ab24bf48
@ -1386,13 +1386,9 @@ func TestNeedsCleanup(t *testing.T) {
|
|||||||
// make sure that the slow node sync never removes the Node from LB set because it
|
// make sure that the slow node sync never removes the Node from LB set because it
|
||||||
// has stale data.
|
// has stale data.
|
||||||
func TestSlowNodeSync(t *testing.T) {
|
func TestSlowNodeSync(t *testing.T) {
|
||||||
stopCh, updateCallCh := make(chan struct{}), make(chan fakecloud.UpdateBalancerCall)
|
stopCh, syncServiceDone, syncService := make(chan struct{}), make(chan string), make(chan string)
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
defer close(updateCallCh)
|
defer close(syncService)
|
||||||
|
|
||||||
duration := time.Millisecond
|
|
||||||
|
|
||||||
syncService := make(chan string)
|
|
||||||
|
|
||||||
node1 := makeNode(tweakName("node1"))
|
node1 := makeNode(tweakName("node1"))
|
||||||
node2 := makeNode(tweakName("node2"))
|
node2 := makeNode(tweakName("node2"))
|
||||||
@ -1405,14 +1401,16 @@ func TestSlowNodeSync(t *testing.T) {
|
|||||||
serviceKeys := sets.New(sKey1, sKey2)
|
serviceKeys := sets.New(sKey1, sKey2)
|
||||||
|
|
||||||
controller, cloudProvider, kubeClient := newController(stopCh, node1, node2, service1, service2)
|
controller, cloudProvider, kubeClient := newController(stopCh, node1, node2, service1, service2)
|
||||||
cloudProvider.RequestDelay = 4 * duration
|
|
||||||
cloudProvider.UpdateCallCb = func(update fakecloud.UpdateBalancerCall) {
|
cloudProvider.UpdateCallCb = func(update fakecloud.UpdateBalancerCall) {
|
||||||
updateCallCh <- update
|
key, _ := cache.MetaNamespaceKeyFunc(update.Service)
|
||||||
|
impactedService := serviceKeys.Difference(sets.New(key)).UnsortedList()[0]
|
||||||
|
syncService <- impactedService
|
||||||
|
<-syncServiceDone
|
||||||
|
|
||||||
}
|
}
|
||||||
cloudProvider.EnsureCallCb = func(update fakecloud.UpdateBalancerCall) {
|
cloudProvider.EnsureCallCb = func(update fakecloud.UpdateBalancerCall) {
|
||||||
updateCallCh <- update
|
syncServiceDone <- update.Service.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Two update calls are expected. This is because this test calls
|
// Two update calls are expected. This is because this test calls
|
||||||
// controller.syncNodes once with two existing services, but with one
|
// controller.syncNodes once with two existing services, but with one
|
||||||
// controller.syncService while that is happening. The end result is
|
// controller.syncService while that is happening. The end result is
|
||||||
@ -1428,6 +1426,8 @@ func TestSlowNodeSync(t *testing.T) {
|
|||||||
expectedUpdateCalls := []fakecloud.UpdateBalancerCall{
|
expectedUpdateCalls := []fakecloud.UpdateBalancerCall{
|
||||||
// First update call for first service from controller.syncNodes
|
// First update call for first service from controller.syncNodes
|
||||||
{Service: service1, Hosts: []*v1.Node{node1, node2}},
|
{Service: service1, Hosts: []*v1.Node{node1, node2}},
|
||||||
|
}
|
||||||
|
expectedEnsureCalls := []fakecloud.UpdateBalancerCall{
|
||||||
// Second update call for impacted service from controller.syncService
|
// Second update call for impacted service from controller.syncService
|
||||||
{Service: service2, Hosts: []*v1.Node{node1, node2, node3}},
|
{Service: service2, Hosts: []*v1.Node{node1, node2, node3}},
|
||||||
}
|
}
|
||||||
@ -1439,51 +1439,34 @@ func TestSlowNodeSync(t *testing.T) {
|
|||||||
controller.syncNodes(context.TODO(), 1)
|
controller.syncNodes(context.TODO(), 1)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
updateCallIdx := 0
|
|
||||||
impactedService := ""
|
|
||||||
for update := range updateCallCh {
|
|
||||||
// Validate that the call hosts are what we expect
|
|
||||||
if !compareHostSets(t, expectedUpdateCalls[updateCallIdx].Hosts, update.Hosts) {
|
|
||||||
t.Errorf("unexpected updated hosts for update: %v, expected: %v, got: %v", updateCallIdx, expectedUpdateCalls[updateCallIdx].Hosts, update.Hosts)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
key, _ := cache.MetaNamespaceKeyFunc(update.Service)
|
|
||||||
// For call 0: determine impacted service
|
|
||||||
if updateCallIdx == 0 {
|
|
||||||
impactedService = serviceKeys.Difference(sets.New(key)).UnsortedList()[0]
|
|
||||||
syncService <- impactedService
|
|
||||||
}
|
|
||||||
// For calls > 0: validate the impacted service
|
|
||||||
if updateCallIdx > 0 {
|
|
||||||
if key != impactedService {
|
|
||||||
t.Error("unexpected impacted service")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if updateCallIdx == len(expectedUpdateCalls)-1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
updateCallIdx++
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
key := <-syncService
|
key := <-syncService
|
||||||
if _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}); err != nil {
|
if _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}); err != nil {
|
||||||
t.Fatalf("error creating node3, err: %v", err)
|
t.Fatalf("error creating node3, err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Give it some time to update the informer cache, needs to be lower than
|
|
||||||
// cloudProvider.RequestDelay
|
|
||||||
time.Sleep(duration)
|
|
||||||
// Sync the service
|
// Sync the service
|
||||||
if err := controller.syncService(context.TODO(), key); err != nil {
|
if err := controller.syncService(context.TODO(), key); err != nil {
|
||||||
t.Errorf("unexpected service sync error, err: %v", err)
|
t.Fatalf("unexpected service sync error, err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
if len(expectedUpdateCalls) != len(cloudProvider.UpdateCalls) {
|
||||||
|
t.Fatalf("unexpected amount of update calls, expected: %v, got: %v", len(expectedUpdateCalls), len(cloudProvider.UpdateCalls))
|
||||||
|
}
|
||||||
|
for idx, update := range cloudProvider.UpdateCalls {
|
||||||
|
if !compareHostSets(t, expectedUpdateCalls[idx].Hosts, update.Hosts) {
|
||||||
|
t.Fatalf("unexpected updated hosts for update: %v, expected: %v, got: %v", idx, expectedUpdateCalls[idx].Hosts, update.Hosts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(expectedEnsureCalls) != len(cloudProvider.EnsureCalls) {
|
||||||
|
t.Fatalf("unexpected amount of ensure calls, expected: %v, got: %v", len(expectedEnsureCalls), len(cloudProvider.EnsureCalls))
|
||||||
|
}
|
||||||
|
for idx, ensure := range cloudProvider.EnsureCalls {
|
||||||
|
if !compareHostSets(t, expectedEnsureCalls[idx].Hosts, ensure.Hosts) {
|
||||||
|
t.Fatalf("unexpected updated hosts for ensure: %v, expected: %v, got: %v", idx, expectedEnsureCalls[idx].Hosts, ensure.Hosts)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNeedsUpdate(t *testing.T) {
|
func TestNeedsUpdate(t *testing.T) {
|
||||||
|
@ -73,27 +73,29 @@ type Cloud struct {
|
|||||||
ErrShutdownByProviderID error
|
ErrShutdownByProviderID error
|
||||||
MetadataErr error
|
MetadataErr error
|
||||||
|
|
||||||
Calls []string
|
Calls []string
|
||||||
Addresses []v1.NodeAddress
|
Addresses []v1.NodeAddress
|
||||||
addressesMux sync.Mutex
|
addressesMux sync.Mutex
|
||||||
ExtID map[types.NodeName]string
|
ExtID map[types.NodeName]string
|
||||||
ExtIDErr map[types.NodeName]error
|
ExtIDErr map[types.NodeName]error
|
||||||
InstanceTypes map[types.NodeName]string
|
InstanceTypes map[types.NodeName]string
|
||||||
Machines []types.NodeName
|
Machines []types.NodeName
|
||||||
NodeResources *v1.NodeResources
|
NodeResources *v1.NodeResources
|
||||||
ClusterList []string
|
ClusterList []string
|
||||||
MasterName string
|
MasterName string
|
||||||
ExternalIP net.IP
|
ExternalIP net.IP
|
||||||
Balancers map[string]Balancer
|
Balancers map[string]Balancer
|
||||||
UpdateCalls []UpdateBalancerCall
|
updateCallLock sync.Mutex
|
||||||
EnsureCalls []UpdateBalancerCall
|
UpdateCalls []UpdateBalancerCall
|
||||||
EnsureCallCb func(UpdateBalancerCall)
|
ensureCallLock sync.Mutex
|
||||||
UpdateCallCb func(UpdateBalancerCall)
|
EnsureCalls []UpdateBalancerCall
|
||||||
RouteMap map[string]*Route
|
EnsureCallCb func(UpdateBalancerCall)
|
||||||
Lock sync.Mutex
|
UpdateCallCb func(UpdateBalancerCall)
|
||||||
Provider string
|
RouteMap map[string]*Route
|
||||||
ProviderID map[types.NodeName]string
|
Lock sync.Mutex
|
||||||
addCallLock sync.Mutex
|
Provider string
|
||||||
|
ProviderID map[types.NodeName]string
|
||||||
|
addCallLock sync.Mutex
|
||||||
cloudprovider.Zone
|
cloudprovider.Zone
|
||||||
VolumeLabelMap map[string]map[string]string
|
VolumeLabelMap map[string]map[string]string
|
||||||
|
|
||||||
@ -203,8 +205,8 @@ func (f *Cloud) GetLoadBalancerName(ctx context.Context, clusterName string, ser
|
|||||||
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
|
// EnsureLoadBalancer is a test-spy implementation of LoadBalancer.EnsureLoadBalancer.
|
||||||
// It adds an entry "create" into the internal method call record.
|
// It adds an entry "create" into the internal method call record.
|
||||||
func (f *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
func (f *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||||
f.markEnsureCall(service, nodes)
|
|
||||||
f.addCall("create")
|
f.addCall("create")
|
||||||
|
f.markEnsureCall(service, nodes)
|
||||||
if f.Balancers == nil {
|
if f.Balancers == nil {
|
||||||
f.Balancers = make(map[string]Balancer)
|
f.Balancers = make(map[string]Balancer)
|
||||||
}
|
}
|
||||||
@ -227,8 +229,8 @@ func (f *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, serv
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Cloud) markUpdateCall(service *v1.Service, nodes []*v1.Node) {
|
func (f *Cloud) markUpdateCall(service *v1.Service, nodes []*v1.Node) {
|
||||||
f.Lock.Lock()
|
f.updateCallLock.Lock()
|
||||||
defer f.Lock.Unlock()
|
defer f.updateCallLock.Unlock()
|
||||||
update := UpdateBalancerCall{service, nodes}
|
update := UpdateBalancerCall{service, nodes}
|
||||||
f.UpdateCalls = append(f.UpdateCalls, update)
|
f.UpdateCalls = append(f.UpdateCalls, update)
|
||||||
if f.UpdateCallCb != nil {
|
if f.UpdateCallCb != nil {
|
||||||
@ -237,8 +239,8 @@ func (f *Cloud) markUpdateCall(service *v1.Service, nodes []*v1.Node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Cloud) markEnsureCall(service *v1.Service, nodes []*v1.Node) {
|
func (f *Cloud) markEnsureCall(service *v1.Service, nodes []*v1.Node) {
|
||||||
f.Lock.Lock()
|
f.ensureCallLock.Lock()
|
||||||
defer f.Lock.Unlock()
|
defer f.ensureCallLock.Unlock()
|
||||||
update := UpdateBalancerCall{service, nodes}
|
update := UpdateBalancerCall{service, nodes}
|
||||||
f.EnsureCalls = append(f.EnsureCalls, update)
|
f.EnsureCalls = append(f.EnsureCalls, update)
|
||||||
if f.EnsureCallCb != nil {
|
if f.EnsureCallCb != nil {
|
||||||
@ -249,8 +251,8 @@ func (f *Cloud) markEnsureCall(service *v1.Service, nodes []*v1.Node) {
|
|||||||
// UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer.
|
// UpdateLoadBalancer is a test-spy implementation of LoadBalancer.UpdateLoadBalancer.
|
||||||
// It adds an entry "update" into the internal method call record.
|
// It adds an entry "update" into the internal method call record.
|
||||||
func (f *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
func (f *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error {
|
||||||
f.markUpdateCall(service, nodes)
|
|
||||||
f.addCall("update")
|
f.addCall("update")
|
||||||
|
f.markUpdateCall(service, nodes)
|
||||||
return f.Err
|
return f.Err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user