diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 6fff75a7449..04961a81e83 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -55,6 +55,7 @@ go_test( name = "go_default_test", srcs = [ "daemon_controller_test.go", + "init_test.go", "update_test.go", ], embed = [":go_default_library"], @@ -74,6 +75,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", @@ -82,6 +84,7 @@ go_test( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", + "//vendor/k8s.io/klog/v2:go_default_library", ], ) diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 48a56c073c5..f8f2581d9ce 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -163,17 +163,8 @@ func NewDaemonSetsController( } daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - ds := obj.(*apps.DaemonSet) - klog.V(4).Infof("Adding daemon set %s", ds.Name) - dsc.enqueueDaemonSet(ds) - }, - UpdateFunc: func(old, cur interface{}) { - oldDS := old.(*apps.DaemonSet) - curDS := cur.(*apps.DaemonSet) - klog.V(4).Infof("Updating daemon set %s", oldDS.Name) - dsc.enqueueDaemonSet(curDS) - }, + AddFunc: dsc.addDaemonset, + UpdateFunc: dsc.updateDaemonset, DeleteFunc: dsc.deleteDaemonset, }) dsc.dsLister = daemonSetInformer.Lister() @@ -231,22 +222,59 @@ func indexByPodNodeName(obj interface{}) ([]string, error) { return []string{pod.Spec.NodeName}, nil } +func (dsc *DaemonSetsController) addDaemonset(obj interface{}) { + ds := obj.(*apps.DaemonSet) + klog.V(4).Infof("Adding daemon set %s", ds.Name) + dsc.enqueueDaemonSet(ds) +} + +func (dsc *DaemonSetsController) updateDaemonset(cur, old interface{}) { + oldDS := old.(*apps.DaemonSet) + curDS := cur.(*apps.DaemonSet) + + // TODO: make a KEP and fix informers to always call the delete event handler on re-create + if curDS.UID != oldDS.UID { + key, err := controller.KeyFunc(oldDS) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", oldDS, err)) + return + } + dsc.deleteDaemonset(cache.DeletedFinalStateUnknown{ + Key: key, + Obj: oldDS, + }) + } + + klog.V(4).Infof("Updating daemon set %s", oldDS.Name) + dsc.enqueueDaemonSet(curDS) +} + func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) { ds, ok := obj.(*apps.DaemonSet) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) return } ds, ok = tombstone.Obj.(*apps.DaemonSet) if !ok { - utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj)) + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a DaemonSet %#v", obj)) return } } klog.V(4).Infof("Deleting daemon set %s", ds.Name) - dsc.enqueueDaemonSet(ds) + + key, err := controller.KeyFunc(ds) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", ds, err)) + return + } + + // Delete expectations for the DaemonSet so if we create a new one with the same name it starts clean + dsc.expectations.DeleteExpectations(key) + + dsc.queue.Add(key) } // Run begins watching and syncing daemon sets. diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index da90a3b3d8c..040b05bdebd 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package daemon import ( + "context" "fmt" "reflect" "sort" @@ -33,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/storage/names" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" @@ -41,6 +43,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" podutil "k8s.io/kubernetes/pkg/api/v1/pod" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" @@ -55,6 +58,7 @@ var ( simpleNodeLabel = map[string]string{"color": "blue", "speed": "fast"} simpleNodeLabel2 = map[string]string{"color": "red", "speed": "fast"} alwaysReady = func() bool { return true } + informerSyncTimeout = 30 * time.Second ) var ( @@ -369,41 +373,52 @@ func resetCounters(manager *daemonSetsController) { manager.fakeRecorder = fakeRecorder } -func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { +func validateSyncDaemonSets(manager *daemonSetsController, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) error { if len(fakePodControl.Templates) != expectedCreates { - t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) + return fmt.Errorf("Unexpected number of creates. Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates)) } if len(fakePodControl.DeletePodName) != expectedDeletes { - t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName)) + return fmt.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", expectedDeletes, len(fakePodControl.DeletePodName)) } if len(manager.fakeRecorder.Events) != expectedEvents { - t.Errorf("Unexpected number of events. Expected %d, saw %d\n", expectedEvents, len(manager.fakeRecorder.Events)) + return fmt.Errorf("Unexpected number of events. Expected %d, saw %d\n", expectedEvents, len(manager.fakeRecorder.Events)) } // Every Pod created should have a ControllerRef. if got, want := len(fakePodControl.ControllerRefs), expectedCreates; got != want { - t.Errorf("len(ControllerRefs) = %v, want %v", got, want) + return fmt.Errorf("len(ControllerRefs) = %v, want %v", got, want) } // Make sure the ControllerRefs are correct. for _, controllerRef := range fakePodControl.ControllerRefs { if got, want := controllerRef.APIVersion, "apps/v1"; got != want { - t.Errorf("controllerRef.APIVersion = %q, want %q", got, want) + return fmt.Errorf("controllerRef.APIVersion = %q, want %q", got, want) } if got, want := controllerRef.Kind, "DaemonSet"; got != want { - t.Errorf("controllerRef.Kind = %q, want %q", got, want) + return fmt.Errorf("controllerRef.Kind = %q, want %q", got, want) } if controllerRef.Controller == nil || *controllerRef.Controller != true { - t.Errorf("controllerRef.Controller is not set to true") + return fmt.Errorf("controllerRef.Controller is not set to true") } } + return nil } -func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) { +func syncAndValidateDaemonSets(manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) error { key, err := controller.KeyFunc(ds) if err != nil { - t.Errorf("Could not get key for daemon.") + return fmt.Errorf("could not get key for daemon") } - manager.syncHandler(key) - validateSyncDaemonSets(t, manager, podControl, expectedCreates, expectedDeletes, expectedEvents) + + err = manager.syncHandler(key) + if err != nil { + klog.Warning(err) + } + + err = validateSyncDaemonSets(manager, podControl, expectedCreates, expectedDeletes, expectedEvents) + if err != nil { + return err + } + + return nil } // clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations. @@ -436,6 +451,185 @@ func TestDeleteFinalStateUnknown(t *testing.T) { } } +func TestExpectationsOnRecreate(t *testing.T) { + client := fake.NewSimpleClientset() + stopCh := make(chan struct{}) + defer close(stopCh) + + f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) + dsc, err := NewDaemonSetsController( + f.Apps().V1().DaemonSets(), + f.Apps().V1().ControllerRevisions(), + f.Core().V1().Pods(), + f.Core().V1().Nodes(), + client, + flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, clock.NewFakeClock(time.Now())), + ) + if err != nil { + t.Fatal(err) + } + + fakeRecorder := record.NewFakeRecorder(100) + dsc.eventRecorder = fakeRecorder + + fakePodControl := newFakePodControl() + fakePodControl.podStore = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) // fake store that we don't use + fakePodControl.expectations = controller.NewControllerExpectations() // fake expectations that we don't use + dsc.podControl = fakePodControl + + manager := &daemonSetsController{ + DaemonSetsController: dsc, + fakeRecorder: fakeRecorder, + } + + _, err = client.CoreV1().Nodes().Create(context.Background(), newNode("master-0", nil), metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + f.Start(stopCh) + + cacheCtx, cancelCacheCtx := context.WithTimeout(context.Background(), 30*time.Second) + defer cancelCacheCtx() + ok := cache.WaitForNamedCacheSync( + "test dsc", + cacheCtx.Done(), f.Core().V1().Nodes().Informer().HasSynced, + ) + if !ok { + t.Fatal("caches failed to sync") + } + + if dsc.queue.Len() != 0 { + t.Fatal("Unexpected item in the queue") + } + + oldDS := newDaemonSet("test") + oldDS, err = client.AppsV1().DaemonSets(oldDS.Namespace).Create(context.Background(), oldDS, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) { + klog.V(8).Infof("Waiting for queue to have 1 item, currently has: %d", dsc.queue.Len()) + return dsc.queue.Len() == 1, nil + }) + if err != nil { + t.Fatalf("initial DS didn't result in new item in the queue: %v", err) + } + + ok = dsc.processNextWorkItem() + if !ok { + t.Fatal("queue is shutting down") + } + + err = validateSyncDaemonSets(manager, fakePodControl, 1, 0, 0) + if err != nil { + t.Error(err) + } + fakePodControl.Clear() + + oldDSKey, err := controller.KeyFunc(oldDS) + if err != nil { + t.Fatal(err) + } + + dsExp, exists, err := dsc.expectations.GetExpectations(oldDSKey) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("No expectations found for DaemonSet %q", oldDSKey) + } + if dsExp.Fulfilled() { + t.Errorf("There should be unfulfiled expectation for creating new pods for DaemonSet %q", oldDSKey) + } + + if dsc.queue.Len() != 0 { + t.Fatal("Unexpected item in the queue") + } + + err = client.AppsV1().DaemonSets(oldDS.Namespace).Delete(context.Background(), oldDS.Name, metav1.DeleteOptions{}) + if err != nil { + t.Fatal(err) + } + + err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) { + klog.V(8).Infof("Waiting for queue to have 1 item, currently has: %d", dsc.queue.Len()) + return dsc.queue.Len() == 1, nil + }) + if err != nil { + t.Fatalf("Deleting DS didn't result in new item in the queue: %v", err) + } + + _, exists, err = dsc.expectations.GetExpectations(oldDSKey) + if err != nil { + t.Fatal(err) + } + if exists { + t.Errorf("There should be no expectations for DaemonSet %q after it was deleted", oldDSKey) + } + + // skip sync for the delete event so we only see the new RS in sync + key, quit := dsc.queue.Get() + if quit { + t.Fatal("Queue is shutting down!") + } + dsc.queue.Done(key) + if key != oldDSKey { + t.Fatal("Keys should be equal!") + } + + if dsc.queue.Len() != 0 { + t.Fatal("Unexpected item in the queue") + } + + newDS := oldDS.DeepCopy() + newDS.UID = uuid.NewUUID() + newDS, err = client.AppsV1().DaemonSets(newDS.Namespace).Create(context.Background(), newDS, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + // Sanity check + if newDS.UID == oldDS.UID { + t.Fatal("New DS has the same UID as the old one!") + } + + err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) { + klog.V(8).Infof("Waiting for queue to have 1 item, currently has: %d", dsc.queue.Len()) + return dsc.queue.Len() == 1, nil + }) + if err != nil { + t.Fatalf("Re-creating DS didn't result in new item in the queue: %v", err) + } + + ok = dsc.processNextWorkItem() + if !ok { + t.Fatal("Queue is shutting down!") + } + + newDSKey, err := controller.KeyFunc(newDS) + if err != nil { + t.Fatal(err) + } + dsExp, exists, err = dsc.expectations.GetExpectations(newDSKey) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Errorf("No expectations found for DaemonSet %q", oldDSKey) + } + if dsExp.Fulfilled() { + t.Errorf("There should be unfulfiled expectation for creating new pods for DaemonSet %q", oldDSKey) + } + + err = validateSyncDaemonSets(manager, fakePodControl, 1, 0, 0) + if err != nil { + t.Error(err) + } + fakePodControl.Clear() +} + func markPodsReady(store cache.Store) { // mark pods as ready for _, obj := range store.List() { @@ -459,8 +653,15 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Error(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -475,8 +676,15 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } addNodes(manager.nodeStore, 0, nodeNum, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, nodeNum, 0, 0) + if err != nil { + t.Error(err) + } if len(podControl.podIDMap) != nodeNum { t.Fatalf("failed to create pods for DaemonSet") @@ -549,8 +757,16 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) { } podControl.FakePodControl.CreateLimit = 10 addNodes(manager.nodeStore, 0, podControl.FakePodControl.CreateLimit*10, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + if err != nil { + t.Error(err) + } + expectedLimit := 0 for pass := uint8(0); expectedLimit <= podControl.FakePodControl.CreateLimit; pass++ { expectedLimit += controller.SlowStartInitialBatchSize << pass @@ -573,8 +789,16 @@ func TestDaemonSetPodCreateExpectationsError(t *testing.T) { podControl.FakePodControl.CreateLimit = 10 creationExpectations := 100 addNodes(manager.nodeStore, 0, 100, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + if err != nil { + t.Error(err) + } + dsKey, err := controller.KeyFunc(ds) if err != nil { t.Fatalf("error get DaemonSets controller key: %v", err) @@ -608,7 +832,10 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { manager.dsStore.Add(ds) addNodes(manager.nodeStore, 0, 5, nil) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } // Make sure the single sync() updated Status already for the change made // during the manage() phase. @@ -627,8 +854,16 @@ func TestNoNodesDoesNothing(t *testing.T) { } ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } + } } @@ -642,9 +877,19 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.nodeStore.Add(newNode("only-node", nil)) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = manager.nodeStore.Add(newNode("only-node", nil)) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -661,9 +906,20 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionFalse}, } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -716,16 +972,31 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) } node := newNode("too-much-mem", nil) node.Status.Allocatable = allocatableResources("100M", "200m") - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) - manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } switch strategy.Type { case apps.OnDeleteDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } case apps.RollingUpdateDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) } @@ -746,10 +1017,22 @@ func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod( node1.Status.Allocatable = allocatableResources("10M", "20m") node2 := newNode("enough-resource", simpleNodeLabel) node2.Status.Allocatable = allocatableResources("100M", "200m") - manager.nodeStore.Add(node1) - manager.nodeStore.Add(node2) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = manager.nodeStore.Add(node1) + if err != nil { + t.Fatal(err) + } + err = manager.nodeStore.Add(node2) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } // we do not expect any event for insufficient free resource if len(manager.fakeRecorder.Events) != 0 { t.Fatalf("unexpected events, got %v, expected %v: %+v", len(manager.fakeRecorder.Events), 0, manager.fakeRecorder.Events) @@ -770,10 +1053,19 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -792,12 +1084,24 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) { } node := newNode("not-too-much-mem", nil) node.Status.Allocatable = allocatableResources("200M", "200m") - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.podStore.Add(&v1.Pod{ Spec: podSpec, }) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -817,13 +1121,22 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { // Lister (cache) says it's NOT deleted. ds2 := *ds ds2.DeletionTimestamp = nil - manager.dsStore.Add(&ds2) + err = manager.dsStore.Add(&ds2) + if err != nil { + t.Fatal(err) + } // The existence of a matching orphan should block all actions in this state. pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - manager.podStore.Add(pod) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -846,14 +1159,26 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } node := newNode("port-conflict", nil) - manager.nodeStore.Add(node) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec = podSpec - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds) - manager.podStore.Add(pod) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -884,12 +1209,24 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } node := newNode("no-port-conflict", nil) - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.podStore.Add(&v1.Pod{ Spec: podSpec1, }) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -917,9 +1254,12 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.nodeStore.Add(newNode("node1", nil)) + err = manager.nodeStore.Add(newNode("node1", nil)) + if err != nil { + t.Fatal(err) + } // Create pod not controlled by a daemonset. - manager.podStore.Add(&v1.Pod{ + err = manager.podStore.Add(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"bang": "boom"}, Namespace: metav1.NamespaceDefault, @@ -928,9 +1268,18 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { NodeName: "node1", }, }) - manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 1) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 1) + if err != nil { + t.Error(err) + } } } @@ -943,13 +1292,19 @@ func TestDealsWithExistingPods(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 5, nil) addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2) addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5) addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) - syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 2, 5, 0) + if err != nil { + t.Error(err) + } } } @@ -965,8 +1320,14 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) { } addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(daemon) - syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) + err = manager.dsStore.Add(daemon) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, daemon, podControl, 3, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -980,14 +1341,20 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2) addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1) addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 4, 0) + if err != nil { + t.Error(err) + } } } @@ -1001,7 +1368,10 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) @@ -1012,7 +1382,10 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4) addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) - syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 3, 20, 0) + if err != nil { + t.Error(err) + } } } @@ -1028,8 +1401,14 @@ func TestBadSelectorDaemonDoesNothing(t *testing.T) { ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1044,8 +1423,14 @@ func TestNameDaemonSetLaunchesPods(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1060,8 +1445,14 @@ func TestBadNameDaemonSetDoesNothing(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1078,8 +1469,14 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { } addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1096,8 +1493,14 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { } addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1111,8 +1514,14 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) { } addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 3, 0, 0) + if err != nil { + t.Error(err) + } } // Daemon with node affinity should launch pods on nodes matching affinity. @@ -1144,8 +1553,14 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { } addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(daemon) - syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) + err = manager.dsStore.Add(daemon) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, daemon, podControl, 3, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1170,9 +1585,15 @@ func TestNumberReadyStatus(t *testing.T) { addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } if updated.Status.NumberReady != 0 { t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } @@ -1184,7 +1605,10 @@ func TestNumberReadyStatus(t *testing.T) { pod.Status.Conditions = append(pod.Status.Conditions, condition) } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } if updated.Status.NumberReady != 2 { t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } @@ -1213,9 +1637,15 @@ func TestObservedGeneration(t *testing.T) { addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } if updated.Status.ObservedGeneration != ds.Generation { t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) } @@ -1243,11 +1673,17 @@ func TestDaemonKillFailedPods(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 1, nil) addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods) - syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes, test.expectedEvents) + err = syncAndValidateDaemonSets(manager, ds, podControl, test.expectedCreates, test.expectedDeletes, test.expectedEvents) + if err != nil { + t.Error(err) + } } }) } @@ -1265,7 +1701,10 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 1, nil) nodeName := "node-0" @@ -1281,7 +1720,10 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) { backoffKey := failedPodsBackoffKey(ds, nodeName) // First sync will delete the pod, initializing backoff - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 1) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 1, 1) + if err != nil { + t.Error(err) + } initialDelay := manager.failedPodsBackoff.Get(backoffKey) if initialDelay <= 0 { t.Fatal("Initial delay is expected to be set.") @@ -1290,7 +1732,10 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) { resetCounters(manager) // Immediate (second) sync gets limited by the backoff - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } delay := manager.failedPodsBackoff.Get(backoffKey) if delay != initialDelay { t.Fatal("Backoff delay shouldn't be raised while waiting.") @@ -1314,7 +1759,10 @@ func TestDaemonKillFailedPodsBackoff(t *testing.T) { } // After backoff time, it will delete the failed pod - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 1) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 1, 1) + if err != nil { + t.Error(err) + } }) } } @@ -1331,12 +1779,24 @@ func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { } node := newNode("tainted", nil) - manager.nodeStore.Add(node) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } setNodeTaint(node, noScheduleTaints) - manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) - manager.dsStore.Add(ds) + err = manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1352,12 +1812,24 @@ func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { } node := newNode("tainted", nil) - manager.nodeStore.Add(node) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } setNodeTaint(node, noExecuteTaints) - manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) - manager.dsStore.Add(ds) + err = manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 1, 0) + if err != nil { + t.Error(err) + } } } @@ -1373,10 +1845,19 @@ func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { node := newNode("tainted", nil) setNodeTaint(node, noScheduleTaints) - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1393,10 +1874,19 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { node := newNode("tainted", nil) setNodeTaint(node, noScheduleTaints) - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1415,10 +1905,19 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionFalse}, } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1437,10 +1936,19 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { node.Status.Conditions = []v1.NodeCondition{ {Type: v1.NodeReady, Status: v1.ConditionUnknown}, } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1455,9 +1963,15 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) { t.Fatalf("error creating DaemonSets controller: %v", err) } addNodes(manager.nodeStore, 0, 1, nil) - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1475,9 +1989,18 @@ func TestDaemonSetRespectsTermination(t *testing.T) { pod := newPod(fmt.Sprintf("%s-", "node-0"), "node-0", simpleDaemonSetLabel, ds) dt := metav1.Now() pod.DeletionTimestamp = &dt - manager.podStore.Add(pod) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1511,9 +2034,18 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { {Key: v1.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule}, {Key: v1.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule}, } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = manager.nodeStore.Add(node) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } } } @@ -1914,9 +2446,15 @@ func TestUpdateNode(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.nodeStore.Add(c.oldNode) + err = manager.nodeStore.Add(c.oldNode) + if err != nil { + t.Fatal(err) + } c.ds.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(c.ds) + err = manager.dsStore.Add(c.ds) + if err != nil { + t.Fatal(err) + } expectedEvents := 0 if c.expectedEventsFunc != nil { @@ -1926,7 +2464,10 @@ func TestUpdateNode(t *testing.T) { if c.expectedCreates != nil { expectedCreates = c.expectedCreates() } - syncAndValidateDaemonSets(t, manager, c.ds, podControl, expectedCreates, 0, expectedEvents) + err = syncAndValidateDaemonSets(manager, c.ds, podControl, expectedCreates, 0, expectedEvents) + if err != nil { + t.Error(err) + } manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { if ds.Name == "ds" { @@ -2089,15 +2630,27 @@ func TestDeleteNoDaemonPod(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.nodeStore.Add(c.node) + err = manager.nodeStore.Add(c.node) + if err != nil { + t.Fatal(err) + } c.ds.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(c.ds) + err = manager.dsStore.Add(c.ds) + if err != nil { + t.Fatal(err) + } for _, pod := range c.existPods { - manager.podStore.Add(pod) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } } switch strategy.Type { case apps.OnDeleteDaemonSetStrategyType, apps.RollingUpdateDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, c.ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, c.ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) } @@ -2119,7 +2672,10 @@ func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 1, nil) addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) @@ -2142,8 +2698,14 @@ func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) { }, }, } - manager.podStore.Add(podScheduledUsingAffinity) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) + err = manager.podStore.Add(podScheduledUsingAffinity) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 1, 0) + if err != nil { + t.Error(err) + } } } @@ -2157,8 +2719,14 @@ func TestGetNodesToDaemonPods(t *testing.T) { if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - manager.dsStore.Add(ds) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } addNodes(manager.nodeStore, 0, 2, nil) // These pods should be returned. @@ -2182,7 +2750,10 @@ func TestGetNodesToDaemonPods(t *testing.T) { newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2), } for _, pod := range ignoredPods { - manager.podStore.Add(pod) + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } } nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds) @@ -2218,7 +2789,10 @@ func TestAddNode(t *testing.T) { node1 := newNode("node1", nil) ds := newDaemonSet("ds") ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager.dsStore.Add(ds) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } manager.addNode(node1) if got, want := manager.queue.Len(), 0; got != want { @@ -2246,8 +2820,14 @@ func TestAddPod(t *testing.T) { ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) manager.addPod(pod1) @@ -2292,9 +2872,18 @@ func TestAddPodOrphan(t *testing.T) { ds3 := newDaemonSet("foo3") ds3.Spec.UpdateStrategy = *strategy ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - manager.dsStore.Add(ds3) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds3) + if err != nil { + t.Fatal(err) + } // Make pod an orphan. Expect matching sets to be queued. pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) @@ -2318,8 +2907,14 @@ func TestUpdatePod(t *testing.T) { ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) prev := *pod1 @@ -2365,8 +2960,14 @@ func TestUpdatePodOrphanSameLabels(t *testing.T) { ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) prev := *pod @@ -2388,8 +2989,14 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) { ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) prev := *pod @@ -2415,8 +3022,14 @@ func TestUpdatePodChangeControllerRef(t *testing.T) { } ds1 := newDaemonSet("foo1") ds2 := newDaemonSet("foo2") - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) prev := *pod @@ -2439,8 +3052,14 @@ func TestUpdatePodControllerRefRemoved(t *testing.T) { ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) prev := *pod @@ -2463,8 +3082,14 @@ func TestDeletePod(t *testing.T) { ds1.Spec.UpdateStrategy = *strategy ds2 := newDaemonSet("foo2") ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) manager.deletePod(pod1) @@ -2509,9 +3134,18 @@ func TestDeletePodOrphan(t *testing.T) { ds3 := newDaemonSet("foo3") ds3.Spec.UpdateStrategy = *strategy ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - manager.dsStore.Add(ds3) + err = manager.dsStore.Add(ds1) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds2) + if err != nil { + t.Fatal(err) + } + err = manager.dsStore.Add(ds3) + if err != nil { + t.Fatal(err) + } pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) manager.deletePod(pod) diff --git a/pkg/controller/daemon/init_test.go b/pkg/controller/daemon/init_test.go new file mode 100644 index 00000000000..2713797f8e6 --- /dev/null +++ b/pkg/controller/daemon/init_test.go @@ -0,0 +1,25 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package daemon + +import ( + "k8s.io/klog/v2" +) + +func init() { + klog.InitFlags(nil) +} diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 851363a4930..ffbd5815895 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -34,36 +34,66 @@ func TestDaemonSetUpdatesPods(t *testing.T) { } maxUnavailable := 2 addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } markPodsReady(podControl.podStore) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - manager.dsStore.Update(ds) + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, maxUnavailable, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, maxUnavailable, 0, 0) + if err != nil { + t.Error(err) + } markPodsReady(podControl.podStore) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, maxUnavailable, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, maxUnavailable, 0, 0) + if err != nil { + t.Error(err) + } markPodsReady(podControl.podStore) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 1, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 1, 0, 0) + if err != nil { + t.Error(err) + } markPodsReady(podControl.podStore) clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) } @@ -75,24 +105,42 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) { } maxUnavailable := 3 addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } markPodsReady(podControl.podStore) ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - manager.dsStore.Update(ds) + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } // new pods are not ready numUnavailable == maxUnavailable clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, maxUnavailable, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, maxUnavailable, 0, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) } @@ -104,23 +152,41 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { } maxUnavailable := 3 addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - manager.dsStore.Update(ds) + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } // all old pods are unavailable so should be removed clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 5, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 5, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) } @@ -132,17 +198,29 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) { } maxUnavailable := 3 addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + err = syncAndValidateDaemonSets(manager, ds, podControl, 5, 0, 0) + if err != nil { + t.Error(err) + } ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType intStr := intstr.FromInt(maxUnavailable) ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} - manager.dsStore.Update(ds) + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } // template is not changed no pod should be removed clearExpectations(t, manager, ds, podControl) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + err = syncAndValidateDaemonSets(manager, ds, podControl, 0, 0, 0) + if err != nil { + t.Error(err) + } clearExpectations(t, manager, ds, podControl) }