From 35d772e3540e986ccc772ee50a09b8bb58416b5a Mon Sep 17 00:00:00 2001 From: draveness Date: Wed, 18 Sep 2019 00:06:37 +0800 Subject: [PATCH] feat: graduate ScheduleDaemonSetPods to GA --- pkg/controller/daemon/BUILD | 3 - pkg/controller/daemon/daemon_controller.go | 87 +- .../daemon/daemon_controller_test.go | 3045 +++++++---------- pkg/controller/daemon/util/BUILD | 1 - pkg/controller/daemon/util/daemonset_util.go | 4 +- .../daemon/util/daemonset_util_test.go | 5 +- pkg/features/kube_features.go | 3 +- test/integration/daemonset/BUILD | 4 - test/integration/daemonset/daemonset_test.go | 526 ++- 9 files changed, 1525 insertions(+), 2153 deletions(-) diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index 3ca274c7240..372d7e3598f 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -18,8 +18,6 @@ go_library( "//pkg/api/v1/pod:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/daemon/util:go_default_library", - "//pkg/features:go_default_library", - "//pkg/kubelet/types:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/util/labels:go_default_library", @@ -36,7 +34,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index bac4ddc4c20..7a0a92d4790 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -26,7 +26,7 @@ import ( "k8s.io/klog" apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,7 +35,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" appsinformers "k8s.io/client-go/informers/apps/v1" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" @@ -52,8 +51,6 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon/util" - "k8s.io/kubernetes/pkg/features" - kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/utils/integer" @@ -973,9 +970,7 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, nodeList []*v1.Node, // Remove unscheduled pods assigned to not existing nodes when daemonset pods are scheduled by scheduler. // If node doesn't exist then pods are never scheduled and can't be deleted by PodGCController. - if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...) - } + podsToDelete = append(podsToDelete, getUnscheduledPodsWithoutNode(nodeList, nodeToDaemonPods)...) // Label new pods using the hash label value of the current history when creating them if err = dsc.syncNodes(ds, podsToDelete, nodesNeedingDaemonPods, hash); err != nil { @@ -1033,25 +1028,16 @@ func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nod for i := pos; i < pos+batchSize; i++ { go func(ix int) { defer createWait.Done() - var err error podTemplate := template.DeepCopy() - if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - // The pod's NodeAffinity will be updated to make sure the Pod is bound - // to the target node by default scheduler. It is safe to do so because there - // should be no conflicting node affinity with the target node. - podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity( - podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix]) + // The pod's NodeAffinity will be updated to make sure the Pod is bound + // to the target node by default scheduler. It is safe to do so because there + // should be no conflicting node affinity with the target node. + podTemplate.Spec.Affinity = util.ReplaceDaemonSetPodNodeNameNodeAffinity( + podTemplate.Spec.Affinity, nodesNeedingDaemonPods[ix]) - err = dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate, - ds, metav1.NewControllerRef(ds, controllerKind)) - } else { - // If pod is scheduled by DaemonSetController, set its '.spec.scheduleName'. - podTemplate.Spec.SchedulerName = "kubernetes.io/daemonset-controller" - - err = dsc.podControl.CreatePodsOnNode(nodesNeedingDaemonPods[ix], ds.Namespace, podTemplate, - ds, metav1.NewControllerRef(ds, controllerKind)) - } + err := dsc.podControl.CreatePodsWithControllerRef(ds.Namespace, podTemplate, + ds, metav1.NewControllerRef(ds, controllerKind)) if err != nil && errors.IsTimeout(err) { // Pod is created but its initialization has timed out. @@ -1355,14 +1341,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. // TODO(k82cn): When 'ScheduleDaemonSetPods' upgrade to beta or GA, remove unnecessary check on failure reason, // e.g. InsufficientResourceError; and simplify "wantToRun, shouldSchedule, shouldContinueRunning" // into one result, e.g. selectedNode. - var insufficientResourceErr error for _, r := range reasons { klog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason()) switch reason := r.(type) { - case *predicates.InsufficientResourceError: - insufficientResourceErr = reason case *predicates.PredicateFailureError: - var emitEvent bool // we try to partition predicates into two partitions here: intentional on the part of the operator and not. switch reason { // intentional @@ -1384,18 +1366,6 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. return false, false, false, nil } wantToRun, shouldSchedule = false, false - // unintentional - case - predicates.ErrDiskConflict, - predicates.ErrVolumeZoneConflict, - predicates.ErrMaxVolumeCountExceeded, - predicates.ErrNodeUnderMemoryPressure, - predicates.ErrNodeUnderDiskPressure: - // wantToRun and shouldContinueRunning are likely true here. They are - // absolutely true at the time of writing the comment. See first comment - // of this method. - shouldSchedule = false - emitEvent = true // unexpected case predicates.ErrPodAffinityNotMatch, @@ -1405,19 +1375,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps. default: klog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason()) wantToRun, shouldSchedule, shouldContinueRunning = false, false, false - emitEvent = true - } - if emitEvent { dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.GetReason()) } } } - // only emit this event if insufficient resource is the only thing - // preventing the daemon pod from scheduling - if shouldSchedule && insufficientResourceErr != nil { - dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, insufficientResourceErr.Error()) - shouldSchedule = false - } return } @@ -1471,35 +1432,7 @@ func checkNodeFitness(pod *v1.Pod, meta predicates.PredicateMetadata, nodeInfo * func Predicates(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) { var predicateFails []predicates.PredicateFailureReason - // If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match. - if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - fit, reasons, err := checkNodeFitness(pod, nil, nodeInfo) - if err != nil { - return false, predicateFails, err - } - if !fit { - predicateFails = append(predicateFails, reasons...) - } - - return len(predicateFails) == 0, predicateFails, nil - } - - critical := kubelettypes.IsCriticalPod(pod) - - fit, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo) - if err != nil { - return false, predicateFails, err - } - if !fit { - predicateFails = append(predicateFails, reasons...) - } - if critical { - // If the pod is marked as critical and support for critical pod annotations is enabled, - // check predicates for critical pods only. - fit, reasons, err = predicates.EssentialPredicates(pod, nil, nodeInfo) - } else { - fit, reasons, err = predicates.GeneralPredicates(pod, nil, nodeInfo) - } + fit, reasons, err := checkNodeFitness(pod, nil, nodeInfo) if err != nil { return false, predicateFails, err } diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 9c60d359cde..3108c4542ce 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -54,10 +54,6 @@ import ( labelsutil "k8s.io/kubernetes/pkg/util/labels" ) -// IMPORTANT NOTE: Some tests in file need to pass irrespective of ScheduleDaemonSetPods feature is enabled. For rest -// of the tests, an explicit comment is mentioned whether we are testing codepath specific to ScheduleDaemonSetPods or -// without that feature. - var ( simpleDaemonSetLabel = map[string]string{"name": "simple-daemon", "type": "production"} simpleDaemonSetLabel2 = map[string]string{"name": "simple-daemon", "type": "test"} @@ -433,22 +429,19 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.Dae } func TestDeleteFinalStateUnknown(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 1, nil) - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - // DeletedFinalStateUnknown should queue the embedded DS if found. - manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) - enqueuedKey, _ := manager.queue.Get() - if enqueuedKey.(string) != "default/foo" { - t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey) - } + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + addNodes(manager.nodeStore, 0, 1, nil) + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + // DeletedFinalStateUnknown should queue the embedded DS if found. + manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) + enqueuedKey, _ := manager.queue.Get() + if enqueuedKey.(string) != "default/foo" { + t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey) } } } @@ -468,27 +461,21 @@ func markPodReady(pod *v1.Pod) { // DaemonSets without node selectors should launch pods on every node. func TestSimpleDaemonSetLaunchesPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) } } -// When ScheduleDaemonSetPods is enabled, DaemonSets without node selectors should -// launch pods on every node by NodeAffinity. +// DaemonSets without node selectors should launch pods on every node by NodeAffinity. func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)() - nodeNum := 5 for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") @@ -500,9 +487,9 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { addNodes(manager.nodeStore, 0, nodeNum, nil) manager.dsStore.Add(ds) syncAndValidateDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0) - // Check for ScheduleDaemonSetPods feature + if len(podControl.podIDMap) != nodeNum { - t.Fatalf("failed to create pods for DaemonSet when enabled ScheduleDaemonSetPods.") + t.Fatalf("failed to create pods for DaemonSet") } nodeMap := make(map[string]*v1.Node) @@ -563,149 +550,130 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) { // Simulate a cluster with 100 nodes, but simulate a limit (like a quota limit) // of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass func TestSimpleDaemonSetPodCreateErrors(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - podControl.FakePodControl.CreateLimit = 10 - addNodes(manager.nodeStore, 0, podControl.FakePodControl.CreateLimit*10, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) - expectedLimit := 0 - for pass := uint8(0); expectedLimit <= podControl.FakePodControl.CreateLimit; pass++ { - expectedLimit += controller.SlowStartInitialBatchSize << pass - } - if podControl.FakePodControl.CreateCallCount > expectedLimit { - t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount) - } - + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + podControl.FakePodControl.CreateLimit = 10 + addNodes(manager.nodeStore, 0, podControl.FakePodControl.CreateLimit*10, nil) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + expectedLimit := 0 + for pass := uint8(0); expectedLimit <= podControl.FakePodControl.CreateLimit; pass++ { + expectedLimit += controller.SlowStartInitialBatchSize << pass + } + if podControl.FakePodControl.CreateCallCount > expectedLimit { + t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount) } } } func TestDaemonSetPodCreateExpectationsError(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - strategies := updateStrategies() - for _, strategy := range strategies { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - podControl.FakePodControl.CreateLimit = 10 - creationExpectations := 100 - addNodes(manager.nodeStore, 0, 100, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) - dsKey, err := controller.KeyFunc(ds) - if err != nil { - t.Fatalf("error get DaemonSets controller key: %v", err) - } + strategies := updateStrategies() + for _, strategy := range strategies { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + podControl.FakePodControl.CreateLimit = 10 + creationExpectations := 100 + addNodes(manager.nodeStore, 0, 100, nil) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0) + dsKey, err := controller.KeyFunc(ds) + if err != nil { + t.Fatalf("error get DaemonSets controller key: %v", err) + } - if !manager.expectations.SatisfiedExpectations(dsKey) { - t.Errorf("Unsatisfied pod creation expectatitons. Expected %d", creationExpectations) - } + if !manager.expectations.SatisfiedExpectations(dsKey) { + t.Errorf("Unsatisfied pod creation expectatitons. Expected %d", creationExpectations) } } } func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, clientset, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, clientset, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } - var updated *apps.DaemonSet - clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "status" { - return false, nil, nil - } - if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*apps.DaemonSet) - } + var updated *apps.DaemonSet + clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if action.GetSubresource() != "status" { return false, nil, nil - }) - - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 5, nil) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) - - // Make sure the single sync() updated Status already for the change made - // during the manage() phase. - if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want { - t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want) } + if u, ok := action.(core.UpdateAction); ok { + updated = u.GetObject().(*apps.DaemonSet) + } + return false, nil, nil + }) + + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 5, nil) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0) + + // Make sure the single sync() updated Status already for the change made + // during the manage() phase. + if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want { + t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want) } } } // DaemonSets should do nothing if there aren't any nodes func TestNoNodesDoesNothing(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSets without node selectors should launch on a single node in a // single node cluster. func TestOneNodeDaemonLaunchesPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.nodeStore.Add(newNode("only-node", nil)) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + manager.nodeStore.Add(newNode("only-node", nil)) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSets should place onto NotReady nodes func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("not-ready", nil) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + node := newNode("not-ready", nil) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionFalse}, + } + manager.nodeStore.Add(node) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -744,11 +712,11 @@ func allocatableResources(memory, cpu string) v1.ResourceList { } } -// When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes with insufficient free resource -func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() +// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource +func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) { for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("too-much-mem", "75M", "75m") + podSpec.NodeName = "too-much-mem" ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec = podSpec @@ -765,136 +733,69 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { manager.dsStore.Add(ds) switch strategy.Type { case apps.OnDeleteDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) case apps.RollingUpdateDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) } } } -// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource -func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - podSpec := resourcePodSpec("too-much-mem", "75M", "75m") - podSpec.NodeName = "too-much-mem" - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("too-much-mem", nil) - node.Status.Allocatable = allocatableResources("100M", "200m") - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - manager.dsStore.Add(ds) - switch strategy.Type { - case apps.OnDeleteDaemonSetStrategyType: - if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) - } else { - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - } - case apps.RollingUpdateDaemonSetStrategyType: - if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) - } else { - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - } - default: - t.Fatalf("unexpected UpdateStrategy %+v", strategy) - } - } - } -} - // DaemonSets should only place onto nodes with sufficient free resource and matched node selector func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - podSpec := resourcePodSpecWithoutNodeName("50M", "75m") - ds := newDaemonSet("foo") - ds.Spec.Template.Spec = podSpec - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node1 := newNode("not-enough-resource", nil) - node1.Status.Allocatable = allocatableResources("10M", "20m") - node2 := newNode("enough-resource", simpleNodeLabel) - node2.Status.Allocatable = allocatableResources("100M", "200m") - manager.nodeStore.Add(node1) - manager.nodeStore.Add(node2) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - // we do not expect any event for insufficient free resource - if len(manager.fakeRecorder.Events) != 0 { - t.Fatalf("unexpected events, got %v, expected %v: %+v", len(manager.fakeRecorder.Events), 0, manager.fakeRecorder.Events) - } + podSpec := resourcePodSpecWithoutNodeName("50M", "75m") + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + node1 := newNode("not-enough-resource", nil) + node1.Status.Allocatable = allocatableResources("10M", "20m") + node2 := newNode("enough-resource", simpleNodeLabel) + node2.Status.Allocatable = allocatableResources("100M", "200m") + manager.nodeStore.Add(node1) + manager.nodeStore.Add(node2) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + // we do not expect any event for insufficient free resource + if len(manager.fakeRecorder.Events) != 0 { + t.Fatalf("unexpected events, got %v, expected %v: %+v", len(manager.fakeRecorder.Events), 0, manager.fakeRecorder.Events) } } -// When ScheduleDaemonSetPods is disabled, DaemonSetPods should launch onto node with terminated pods if there -// are sufficient resources. -func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() - - validate := func(strategy *apps.DaemonSetUpdateStrategy, expectedEvents int) { - podSpec := resourcePodSpec("too-much-mem", "75M", "75m") - ds := newDaemonSet("foo") +// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition. +func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { + for _, strategy := range updateStrategies() { + ds := newDaemonSet("simple") ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec manager, podControl, _, err := newTestController(ds) if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - node := newNode("too-much-mem", nil) - node.Status.Allocatable = allocatableResources("100M", "200m") + + node := newNode("network-unavailable", nil) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, + } manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - Status: v1.PodStatus{Phase: v1.PodSucceeded}, - }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, expectedEvents) - } - tests := []struct { - strategy *apps.DaemonSetUpdateStrategy - expectedEvents int - }{ - { - strategy: newOnDeleteStrategy(), - expectedEvents: 1, - }, - { - strategy: newRollbackStrategy(), - expectedEvents: 2, - }, - } - - for _, t := range tests { - validate(t.strategy, t.expectedEvents) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } -// When ScheduleDaemonSetPods is disabled, DaemonSets should place onto nodes with sufficient free resources. -func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() - - validate := func(strategy *apps.DaemonSetUpdateStrategy, expectedEvents int) { +// DaemonSets not take any actions when being deleted +func TestDontDoAnythingIfBeingDeleted(t *testing.T) { + for _, strategy := range updateStrategies() { podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec = podSpec + now := metav1.Now() + ds.DeletionTimestamp = &now manager, podControl, _, err := newTestController(ds) if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) @@ -906,111 +807,41 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { Spec: podSpec, }) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, expectedEvents) - } - - tests := []struct { - strategy *apps.DaemonSetUpdateStrategy - expectedEvents int - }{ - { - strategy: newOnDeleteStrategy(), - expectedEvents: 1, - }, - { - strategy: newRollbackStrategy(), - expectedEvents: 2, - }, - } - - for _, t := range tests { - validate(t.strategy, t.expectedEvents) - } -} - -// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition. -func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("simple") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("network-unavailable", nil) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, - } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - } - } -} - -// DaemonSets not take any actions when being deleted -func TestDontDoAnythingIfBeingDeleted(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - now := metav1.Now() - ds.DeletionTimestamp = &now - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("not-too-much-mem", nil) - node.Status.Allocatable = allocatableResources("200M", "200m") - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - } + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - // Bare client says it IS deleted. - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - now := metav1.Now() - ds.DeletionTimestamp = &now - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - - // Lister (cache) says it's NOT deleted. - ds2 := *ds - ds2.DeletionTimestamp = nil - manager.dsStore.Add(&ds2) - - // The existence of a matching orphan should block all actions in this state. - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - manager.podStore.Add(pod) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + // Bare client says it IS deleted. + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + now := metav1.Now() + ds.DeletionTimestamp = &now + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + + // Lister (cache) says it's NOT deleted. + ds2 := *ds + ds2.DeletionTimestamp = nil + manager.dsStore.Add(&ds2) + + // The existence of a matching orphan should block all actions in this state. + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + manager.podStore.Add(pod) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } -// When ScheduleDaemonSetPods is disabled, DaemonSets should not place onto nodes that would cause port conflicts. -func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() +// Test that if the node is already scheduled with a pod using a host port +// but belonging to the same daemonset, we don't delete that pod +// +// Issue: https://github.com/kubernetes/kubernetes/issues/22309 +func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { for _, strategy := range updateStrategies() { podSpec := v1.PodSpec{ NodeName: "port-conflict", @@ -1026,87 +857,49 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { } node := newNode("port-conflict", nil) manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - ds := newDaemonSet("foo") ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec = podSpec manager.dsStore.Add(ds) + pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds) + manager.podStore.Add(pod) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } -// Test that if the node is already scheduled with a pod using a host port -// but belonging to the same daemonset, we don't delete that pod -// -// Issue: https://github.com/kubernetes/kubernetes/issues/22309 -func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - podSpec := v1.PodSpec{ - NodeName: "port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 666, - }}, - }}, - } - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("port-conflict", nil) - manager.nodeStore.Add(node) - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - manager.dsStore.Add(ds) - pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds) - manager.podStore.Add(pod) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - } - } -} - // DaemonSets should place onto nodes that would not cause port conflicts func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - podSpec1 := v1.PodSpec{ - NodeName: "no-port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 6661, - }}, + for _, strategy := range updateStrategies() { + podSpec1 := v1.PodSpec{ + NodeName: "no-port-conflict", + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 6661, }}, - } - podSpec2 := v1.PodSpec{ - NodeName: "no-port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 6662, - }}, - }}, - } - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec2 - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("no-port-conflict", nil) - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec1, - }) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + }}, } + podSpec2 := v1.PodSpec{ + NodeName: "no-port-conflict", + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 6662, + }}, + }}, + } + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec = podSpec2 + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + node := newNode("no-port-conflict", nil) + manager.nodeStore.Add(node) + manager.podStore.Add(&v1.Pod{ + Spec: podSpec1, + }) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1123,237 +916,172 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) { // this case even though it's empty pod selector matches all pods. The DaemonSetController // should detect this misconfiguration and choose not to sync the DaemonSet. We should // not observe a deletion of the pod on node1. - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ls := metav1.LabelSelector{} - ds.Spec.Selector = &ls - ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"} + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ls := metav1.LabelSelector{} + ds.Spec.Selector = &ls + ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"} - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.nodeStore.Add(newNode("node1", nil)) - // Create pod not controlled by a daemonset. - manager.podStore.Add(&v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"bang": "boom"}, - Namespace: metav1.NamespaceDefault, - }, - Spec: v1.PodSpec{ - NodeName: "node1", - }, - }) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 1) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + manager.nodeStore.Add(newNode("node1", nil)) + // Create pod not controlled by a daemonset. + manager.podStore.Add(&v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"bang": "boom"}, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1.PodSpec{ + NodeName: "node1", + }, + }) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 1) } } // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. func TestDealsWithExistingPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 5, nil) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2) - addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5) - addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) - syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 5, nil) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2) + addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5) + addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) + syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5, 0) } } // Daemon with node selector should launch pods on nodes matching selector. func TestSelectorDaemonLaunchesPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - daemon := newDaemonSet("foo") - daemon.Spec.UpdateStrategy = *strategy - daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(daemon) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(daemon) - syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) + for _, strategy := range updateStrategies() { + daemon := newDaemonSet("foo") + daemon.Spec.UpdateStrategy = *strategy + daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(daemon) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + manager.dsStore.Add(daemon) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) } } // Daemon with node selector should delete pods from nodes that do not satisfy selector. func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 5, nil) - addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1) - addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) - syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 5, nil) + addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1) + addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) + syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4, 0) } } // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 5, nil) - addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2) - addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4) - addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13) - addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4) - addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) - syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 5, nil) + addNodes(manager.nodeStore, 5, 5, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2) + addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4) + addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13) + addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4) + addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) + syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20, 0) } } // DaemonSet with node selector which does not match any node labels should not launch pods. func TestBadSelectorDaemonDoesNothing(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSet with node name should launch pod on node with corresponding name. func TestNameDaemonSetLaunchesPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeName = "node-0" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeName = "node-0" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet with node name that does not exist should not launch pods. func TestBadNameDaemonSetDoesNothing(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeName = "node-10" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 5, nil) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeName = "node-10" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 5, nil) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - ds.Spec.Template.Spec.NodeName = "node-6" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - } - } -} - -// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. -func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - ds.Spec.Template.Spec.NodeName = "node-0" - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - } - } -} - -// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes. -func TestSelectorDaemonSetLaunchesPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-6" manager, podControl, _, err := newTestController(ds) if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) @@ -1361,122 +1089,145 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) { addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } +// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. +func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + ds.Spec.Template.Spec.NodeName = "node-0" + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + } +} + +// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes. +func TestSelectorDaemonSetLaunchesPods(t *testing.T) { + ds := newDaemonSet("foo") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 0, 0) +} + // Daemon with node affinity should launch pods on nodes matching affinity. func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - daemon := newDaemonSet("foo") - daemon.Spec.UpdateStrategy = *strategy - daemon.Spec.Template.Spec.Affinity = &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "color", - Operator: v1.NodeSelectorOpIn, - Values: []string{simpleNodeLabel["color"]}, - }, + for _, strategy := range updateStrategies() { + daemon := newDaemonSet("foo") + daemon.Spec.UpdateStrategy = *strategy + daemon.Spec.Template.Spec.Affinity = &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "color", + Operator: v1.NodeSelectorOpIn, + Values: []string{simpleNodeLabel["color"]}, }, }, }, }, }, - } - - manager, podControl, _, err := newTestController(daemon) - if err != nil { - t.Fatalf("rrror creating DaemonSetsController: %v", err) - } - addNodes(manager.nodeStore, 0, 4, nil) - addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) - manager.dsStore.Add(daemon) - syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) + }, } + + manager, podControl, _, err := newTestController(daemon) + if err != nil { + t.Fatalf("rrror creating DaemonSetsController: %v", err) + } + addNodes(manager.nodeStore, 0, 4, nil) + addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) + manager.dsStore.Add(daemon) + syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0, 0) } } func TestNumberReadyStatus(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, clientset, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - var updated *apps.DaemonSet - clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "status" { - return false, nil, nil - } - if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*apps.DaemonSet) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, clientset, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + var updated *apps.DaemonSet + clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if action.GetSubresource() != "status" { return false, nil, nil - }) - addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - if updated.Status.NumberReady != 0 { - t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } - - selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector) - daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector) - for _, pod := range daemonPods { - condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} - pod.Status.Conditions = append(pod.Status.Conditions, condition) + if u, ok := action.(core.UpdateAction); ok { + updated = u.GetObject().(*apps.DaemonSet) } + return false, nil, nil + }) + addNodes(manager.nodeStore, 0, 2, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - if updated.Status.NumberReady != 2 { - t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) - } + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if updated.Status.NumberReady != 0 { + t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) + } + + selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector) + for _, pod := range daemonPods { + condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue} + pod.Status.Conditions = append(pod.Status.Conditions, condition) + } + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if updated.Status.NumberReady != 2 { + t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) } } } func TestObservedGeneration(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds.Generation = 1 - manager, podControl, clientset, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - var updated *apps.DaemonSet - clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { - if action.GetSubresource() != "status" { - return false, nil, nil - } - if u, ok := action.(core.UpdateAction); ok { - updated = u.GetObject().(*apps.DaemonSet) - } + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds.Generation = 1 + manager, podControl, clientset, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + var updated *apps.DaemonSet + clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + if action.GetSubresource() != "status" { return false, nil, nil - }) - - addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - if updated.Status.ObservedGeneration != ds.Generation { - t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) } + if u, ok := action.(core.UpdateAction); ok { + updated = u.GetObject().(*apps.DaemonSet) + } + return false, nil, nil + }) + + addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + if updated.Status.ObservedGeneration != ds.Generation { + t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) } } } @@ -1495,21 +1246,18 @@ func TestDaemonKillFailedPods(t *testing.T) { for _, test := range tests { t.Run(test.test, func(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 1, nil) - addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods) - syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes, test.expectedEvents) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 1, nil) + addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods) + syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes, test.expectedEvents) } }) } @@ -1517,256 +1265,229 @@ func TestDaemonKillFailedPods(t *testing.T) { // DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet. func TestDaemonKillFailedPodsBackoff(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - t.Run(string(strategy.Type), func(t *testing.T) { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy + for _, strategy := range updateStrategies() { + t.Run(string(strategy.Type), func(t *testing.T) { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 1, nil) + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 1, nil) - nodeName := "node-0" - pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, simpleDaemonSetLabel, ds) + nodeName := "node-0" + pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, simpleDaemonSetLabel, ds) - // Add a failed Pod - pod.Status.Phase = v1.PodFailed - err = manager.podStore.Add(pod) - if err != nil { - t.Fatal(err) - } + // Add a failed Pod + pod.Status.Phase = v1.PodFailed + err = manager.podStore.Add(pod) + if err != nil { + t.Fatal(err) + } - backoffKey := failedPodsBackoffKey(ds, nodeName) + backoffKey := failedPodsBackoffKey(ds, nodeName) - // First sync will delete the pod, initializing backoff - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 1) - initialDelay := manager.failedPodsBackoff.Get(backoffKey) - if initialDelay <= 0 { - t.Fatal("Initial delay is expected to be set.") - } + // First sync will delete the pod, initializing backoff + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 1) + initialDelay := manager.failedPodsBackoff.Get(backoffKey) + if initialDelay <= 0 { + t.Fatal("Initial delay is expected to be set.") + } - resetCounters(manager) + resetCounters(manager) - // Immediate (second) sync gets limited by the backoff - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - delay := manager.failedPodsBackoff.Get(backoffKey) - if delay != initialDelay { - t.Fatal("Backoff delay shouldn't be raised while waiting.") - } + // Immediate (second) sync gets limited by the backoff + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + delay := manager.failedPodsBackoff.Get(backoffKey) + if delay != initialDelay { + t.Fatal("Backoff delay shouldn't be raised while waiting.") + } - resetCounters(manager) + resetCounters(manager) - // Sleep to wait out backoff - fakeClock := manager.failedPodsBackoff.Clock + // Sleep to wait out backoff + fakeClock := manager.failedPodsBackoff.Clock - // Move just before the backoff end time - fakeClock.Sleep(delay - 1*time.Nanosecond) - if !manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { - t.Errorf("Backoff delay didn't last the whole waitout period.") - } + // Move just before the backoff end time + fakeClock.Sleep(delay - 1*time.Nanosecond) + if !manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { + t.Errorf("Backoff delay didn't last the whole waitout period.") + } - // Move to the backoff end time - fakeClock.Sleep(1 * time.Nanosecond) - if manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { - t.Fatal("Backoff delay hasn't been reset after the period has passed.") - } + // Move to the backoff end time + fakeClock.Sleep(1 * time.Nanosecond) + if manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) { + t.Fatal("Backoff delay hasn't been reset after the period has passed.") + } - // After backoff time, it will delete the failed pod - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 1) - }) - } + // After backoff time, it will delete the failed pod + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 1) + }) } } // Daemonset should not remove a running pod from a node if the pod doesn't // tolerate the nodes NoSchedule taint func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("intolerant") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - manager.nodeStore.Add(node) - setNodeTaint(node, noScheduleTaints) - manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + manager.nodeStore.Add(node) + setNodeTaint(node, noScheduleTaints) + manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds)) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // Daemonset should remove a running pod from a node if the pod doesn't // tolerate the nodes NoExecute taint func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("intolerant") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - manager.nodeStore.Add(node) - setNodeTaint(node, noExecuteTaints) - manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + manager.nodeStore.Add(node) + setNodeTaint(node, noExecuteTaints) + manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds)) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) } } // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("intolerant") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, noScheduleTaints) - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("intolerant") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, noScheduleTaints) + manager.nodeStore.Add(node) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } // DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint. func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("tolerate") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetToleration(ds, noScheduleTolerations) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, noScheduleTaints) - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("tolerate") + ds.Spec.UpdateStrategy = *strategy + setDaemonSetToleration(ds, noScheduleTolerations) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, noScheduleTaints) + manager.nodeStore.Add(node) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("simple") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, nodeNotReady) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("simple") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, nodeNotReady) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionFalse}, + } + manager.nodeStore.Add(node) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute. func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("simple") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("tainted", nil) - setNodeTaint(node, nodeUnreachable) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionUnknown}, - } - manager.nodeStore.Add(node) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("simple") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + node := newNode("tainted", nil) + setNodeTaint(node, nodeUnreachable) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionUnknown}, + } + manager.nodeStore.Add(node) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on an untainted node when the pod has tolerations. func TestNodeDaemonLaunchesToleratePod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("tolerate") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetToleration(ds, noScheduleTolerations) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - addNodes(manager.nodeStore, 0, 1, nil) - manager.dsStore.Add(ds) - - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("tolerate") + ds.Spec.UpdateStrategy = *strategy + setDaemonSetToleration(ds, noScheduleTolerations) + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + addNodes(manager.nodeStore, 0, 1, nil) + manager.dsStore.Add(ds) + + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. func TestDaemonSetRespectsTermination(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) - pod := newPod(fmt.Sprintf("%s-", "node-0"), "node-0", simpleDaemonSetLabel, ds) - dt := metav1.Now() - pod.DeletionTimestamp = &dt - manager.podStore.Add(pod) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) } + + addNodes(manager.nodeStore, 0, 1, simpleNodeLabel) + pod := newPod(fmt.Sprintf("%s-", "node-0"), "node-0", simpleDaemonSetLabel, ds) + dt := metav1.Now() + pod.DeletionTimestamp = &dt + manager.podStore.Add(pod) + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) } } @@ -1780,126 +1501,32 @@ func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) { // DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure/PIDPressure taints. func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("critical") - ds.Spec.UpdateStrategy = *strategy - setDaemonSetCritical(ds) - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - - node := newNode("resources-pressure", nil) - node.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}, - {Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}, - {Type: v1.NodePIDPressure, Status: v1.ConditionTrue}, - } - node.Spec.Taints = []v1.Taint{ - {Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule}, - {Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule}, - {Key: schedulerapi.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule}, - } - manager.nodeStore.Add(node) - - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - } - } -} - -// When ScheduleDaemonSetPods is disabled, DaemonSet should launch a critical pod even when the node has insufficient free resource. -func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() for _, strategy := range updateStrategies() { - podSpec := resourcePodSpec("too-much-mem", "75M", "75m") ds := newDaemonSet("critical") ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - + setDaemonSetCritical(ds) manager, podControl, _, err := newTestController(ds) if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - node := newNode("too-much-mem", nil) - node.Status.Allocatable = allocatableResources("100M", "200m") + + node := newNode("resources-pressure", nil) + node.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}, + {Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue}, + {Type: v1.NodePIDPressure, Status: v1.ConditionTrue}, + } + node.Spec.Taints = []v1.Taint{ + {Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule}, + {Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule}, + {Key: schedulerapi.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule}, + } manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) + // Enabling critical pod and taint nodes by condition feature gate should create critical pod + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)() manager.dsStore.Add(ds) - switch strategy.Type { - case apps.OnDeleteDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2) - case apps.RollingUpdateDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3) - default: - t.Fatalf("unexpected UpdateStrategy %+v", strategy) - } - } - - for _, strategy := range updateStrategies() { - podSpec := resourcePodSpec("too-much-mem", "75M", "75m") - ds := newDaemonSet("critical") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - setDaemonSetCritical(ds) - - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("too-much-mem", nil) - node.Status.Allocatable = allocatableResources("100M", "200m") - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - - manager.dsStore.Add(ds) - - switch strategy.Type { - case apps.OnDeleteDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - case apps.RollingUpdateDaemonSetStrategyType: - syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) - default: - t.Fatalf("unexpected UpdateStrategy %+v", strategy) - } - } -} - -// When ScheduleDaemonSetPods is disabled, DaemonSets should NOT launch a critical pod when there are port conflicts. -func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() - for _, strategy := range updateStrategies() { - podSpec := v1.PodSpec{ - NodeName: "port-conflict", - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 666, - }}, - }}, - } - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - node := newNode("port-conflict", nil) - manager.nodeStore.Add(node) - manager.podStore.Add(&v1.Pod{ - Spec: podSpec, - }) - - ds := newDaemonSet("critical") - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec = podSpec - setDaemonSetCritical(ds) - manager.dsStore.Add(ds) - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) + syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0) } } @@ -1913,78 +1540,90 @@ func setDaemonSetCritical(ds *apps.DaemonSet) { } func TestNodeShouldRunDaemonPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - var shouldCreate, wantToRun, shouldContinueRunning bool - if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - shouldCreate = true - wantToRun = true - shouldContinueRunning = true - } - cases := []struct { - predicateName string - podsOnNode []*v1.Pod - nodeCondition []v1.NodeCondition - nodeUnschedulable bool - ds *apps.DaemonSet - wantToRun, shouldCreate, shouldContinueRunning bool - err error - }{ - { - predicateName: "ShouldRunDaemonPod", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: resourcePodSpec("", "50M", "0.5"), + var shouldCreate, wantToRun, shouldContinueRunning bool + shouldCreate = true + wantToRun = true + shouldContinueRunning = true + cases := []struct { + predicateName string + podsOnNode []*v1.Pod + nodeCondition []v1.NodeCondition + nodeUnschedulable bool + ds *apps.DaemonSet + wantToRun, shouldCreate, shouldContinueRunning bool + err error + }{ + { + predicateName: "ShouldRunDaemonPod", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, }, + Spec: resourcePodSpec("", "50M", "0.5"), }, }, - wantToRun: true, - shouldCreate: true, - shouldContinueRunning: true, }, - { - predicateName: "InsufficientResourceError", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: resourcePodSpec("", "200M", "0.5"), + wantToRun: true, + shouldCreate: true, + shouldContinueRunning: true, + }, + { + predicateName: "InsufficientResourceError", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, }, + Spec: resourcePodSpec("", "200M", "0.5"), }, }, - wantToRun: true, - shouldCreate: shouldCreate, - shouldContinueRunning: true, }, - { - predicateName: "ErrPodNotMatchHostName", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: resourcePodSpec("other-node", "50M", "0.5"), + wantToRun: true, + shouldCreate: shouldCreate, + shouldContinueRunning: true, + }, + { + predicateName: "ErrPodNotMatchHostName", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, }, + Spec: resourcePodSpec("other-node", "50M", "0.5"), }, }, - wantToRun: false, - shouldCreate: false, - shouldContinueRunning: false, }, - { - predicateName: "ErrPodNotFitsHostPorts", - podsOnNode: []*v1.Pod{ - { + wantToRun: false, + shouldCreate: false, + shouldContinueRunning: false, + }, + { + predicateName: "ErrPodNotFitsHostPorts", + podsOnNode: []*v1.Pod{ + { + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 666, + }}, + }}, + }, + }, + }, + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, Spec: v1.PodSpec{ Containers: []v1.Container{{ Ports: []v1.ContainerPort{{ @@ -1994,144 +1633,127 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, }, }, - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 666, - }}, - }}, - }, - }, - }, - }, - wantToRun: wantToRun, - shouldCreate: shouldCreate, - shouldContinueRunning: shouldContinueRunning, }, - { - predicateName: "InsufficientResourceError", - podsOnNode: []*v1.Pod{ - { - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 666, - }}, - Resources: resourceContainerSpec("50M", "0.5"), + wantToRun: wantToRun, + shouldCreate: shouldCreate, + shouldContinueRunning: shouldContinueRunning, + }, + { + predicateName: "InsufficientResourceError", + podsOnNode: []*v1.Pod{ + { + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 666, }}, - }, + Resources: resourceContainerSpec("50M", "0.5"), + }}, }, }, - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: resourcePodSpec("", "100M", "0.5"), - }, - }, - }, - wantToRun: true, - shouldCreate: shouldCreate, // This is because we don't care about the resource constraints any more and let default scheduler handle it. - shouldContinueRunning: true, }, - { - predicateName: "ShouldRunDaemonPod", - podsOnNode: []*v1.Pod{ - { - Spec: v1.PodSpec{ - Containers: []v1.Container{{ - Ports: []v1.ContainerPort{{ - HostPort: 666, - }}, - Resources: resourceContainerSpec("50M", "0.5"), + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: resourcePodSpec("", "100M", "0.5"), + }, + }, + }, + wantToRun: true, + shouldCreate: shouldCreate, // This is because we don't care about the resource constraints any more and let default scheduler handle it. + shouldContinueRunning: true, + }, + { + predicateName: "ShouldRunDaemonPod", + podsOnNode: []*v1.Pod{ + { + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Ports: []v1.ContainerPort{{ + HostPort: 666, }}, - }, + Resources: resourceContainerSpec("50M", "0.5"), + }}, }, }, - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: resourcePodSpec("", "50M", "0.5"), - }, - }, - }, - wantToRun: true, - shouldCreate: true, - shouldContinueRunning: true, }, - { - predicateName: "ErrNodeSelectorNotMatch", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: v1.PodSpec{ - NodeSelector: simpleDaemonSetLabel2, - }, + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: resourcePodSpec("", "50M", "0.5"), + }, + }, + }, + wantToRun: true, + shouldCreate: true, + shouldContinueRunning: true, + }, + { + predicateName: "ErrNodeSelectorNotMatch", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + NodeSelector: simpleDaemonSetLabel2, }, }, }, - wantToRun: false, - shouldCreate: false, - shouldContinueRunning: false, }, - { - predicateName: "ShouldRunDaemonPod", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: v1.PodSpec{ - NodeSelector: simpleDaemonSetLabel, - }, + wantToRun: false, + shouldCreate: false, + shouldContinueRunning: false, + }, + { + predicateName: "ShouldRunDaemonPod", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + NodeSelector: simpleDaemonSetLabel, }, }, }, - wantToRun: true, - shouldCreate: true, - shouldContinueRunning: true, }, - { - predicateName: "ErrPodAffinityNotMatch", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: v1.PodSpec{ - Affinity: &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "type", - Operator: v1.NodeSelectorOpIn, - Values: []string{"test"}, - }, + wantToRun: true, + shouldCreate: true, + shouldContinueRunning: true, + }, + { + predicateName: "ErrPodAffinityNotMatch", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "type", + Operator: v1.NodeSelectorOpIn, + Values: []string{"test"}, }, }, }, @@ -2142,31 +1764,31 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, }, }, - wantToRun: false, - shouldCreate: false, - shouldContinueRunning: false, }, - { - predicateName: "ShouldRunDaemonPod", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: v1.PodSpec{ - Affinity: &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "type", - Operator: v1.NodeSelectorOpIn, - Values: []string{"production"}, - }, + wantToRun: false, + shouldCreate: false, + shouldContinueRunning: false, + }, + { + predicateName: "ShouldRunDaemonPod", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "type", + Operator: v1.NodeSelectorOpIn, + Values: []string{"production"}, }, }, }, @@ -2177,61 +1799,61 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { }, }, }, - wantToRun: true, - shouldCreate: true, - shouldContinueRunning: true, }, - { - predicateName: "ShouldRunDaemonPodOnUnscheduableNode", - ds: &apps.DaemonSet{ - Spec: apps.DaemonSetSpec{ - Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: simpleDaemonSetLabel, - }, - Spec: resourcePodSpec("", "50M", "0.5"), + wantToRun: true, + shouldCreate: true, + shouldContinueRunning: true, + }, + { + predicateName: "ShouldRunDaemonPodOnUnscheduableNode", + ds: &apps.DaemonSet{ + Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, }, + Spec: resourcePodSpec("", "50M", "0.5"), }, }, - nodeUnschedulable: true, - wantToRun: true, - shouldCreate: true, - shouldContinueRunning: true, }, - } + nodeUnschedulable: true, + wantToRun: true, + shouldCreate: true, + shouldContinueRunning: true, + }, + } - for i, c := range cases { - for _, strategy := range updateStrategies() { - node := newNode("test-node", simpleDaemonSetLabel) - node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) - node.Status.Allocatable = allocatableResources("100M", "1") - node.Spec.Unschedulable = c.nodeUnschedulable - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.nodeStore.Add(node) - for _, p := range c.podsOnNode { - manager.podStore.Add(p) - p.Spec.NodeName = "test-node" - manager.podNodeIndex.Add(p) - } - c.ds.Spec.UpdateStrategy = *strategy - wantToRun, shouldRun, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds) + for i, c := range cases { + for _, strategy := range updateStrategies() { + node := newNode("test-node", simpleDaemonSetLabel) + node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) + node.Status.Allocatable = allocatableResources("100M", "1") + node.Spec.Unschedulable = c.nodeUnschedulable + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + manager.nodeStore.Add(node) + for _, p := range c.podsOnNode { + manager.podStore.Add(p) + p.Spec.NodeName = "test-node" + manager.podNodeIndex.Add(p) + } + c.ds.Spec.UpdateStrategy = *strategy + wantToRun, shouldRun, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds) - if wantToRun != c.wantToRun { - t.Errorf("[%v] strategy: %v, predicateName: %v expected wantToRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.wantToRun, wantToRun) - } - if shouldRun != c.shouldCreate { - t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldCreate, shouldRun) - } - if shouldContinueRunning != c.shouldContinueRunning { - t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning) - } - if err != c.err { - t.Errorf("[%v] strategy: %v, predicateName: %v expected err: %v, got: %v", i, c.predicateName, c.ds.Spec.UpdateStrategy.Type, c.err, err) - } + if wantToRun != c.wantToRun { + t.Errorf("[%v] strategy: %v, predicateName: %v expected wantToRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.wantToRun, wantToRun) + } + if shouldRun != c.shouldCreate { + t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldCreate, shouldRun) + } + if shouldContinueRunning != c.shouldContinueRunning { + t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning) + } + if err != c.err { + t.Errorf("[%v] strategy: %v, predicateName: %v expected err: %v, got: %v", i, c.predicateName, c.ds.Spec.UpdateStrategy.Type, c.err, err) } } } @@ -2240,124 +1862,111 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { // DaemonSets should be resynced when node labels or taints changed func TestUpdateNode(t *testing.T) { var enqueued bool - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - cases := []struct { - test string - newNode *v1.Node - oldNode *v1.Node - ds *apps.DaemonSet - expectedEventsFunc func(strategyType apps.DaemonSetUpdateStrategyType) int - shouldEnqueue bool - expectedCreates func() int - }{ - { - test: "Nothing changed, should not enqueue", - oldNode: newNode("node1", nil), - newNode: newNode("node1", nil), - ds: func() *apps.DaemonSet { - ds := newDaemonSet("ds") - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - return ds - }(), - shouldEnqueue: false, - expectedCreates: func() int { return 0 }, - }, - { - test: "Node labels changed", - oldNode: newNode("node1", nil), - newNode: newNode("node1", simpleNodeLabel), - ds: func() *apps.DaemonSet { - ds := newDaemonSet("ds") - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - return ds - }(), - shouldEnqueue: true, - expectedCreates: func() int { return 0 }, - }, - { - test: "Node taints changed", - oldNode: func() *v1.Node { - node := newNode("node1", nil) - setNodeTaint(node, noScheduleTaints) - return node - }(), - newNode: newNode("node1", nil), - ds: newDaemonSet("ds"), - shouldEnqueue: true, - expectedCreates: func() int { return 0 }, - }, - { - test: "Node Allocatable changed", - oldNode: newNode("node1", nil), - newNode: func() *v1.Node { - node := newNode("node1", nil) - node.Status.Allocatable = allocatableResources("200M", "200m") - return node - }(), - ds: func() *apps.DaemonSet { - ds := newDaemonSet("ds") - ds.Spec.Template.Spec = resourcePodSpecWithoutNodeName("200M", "200m") - return ds - }(), - expectedEventsFunc: func(strategyType apps.DaemonSetUpdateStrategyType) int { - switch strategyType { - case apps.OnDeleteDaemonSetStrategyType: - if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - return 2 - } - return 0 - case apps.RollingUpdateDaemonSetStrategyType: - if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - return 3 - } - return 0 - default: - t.Fatalf("unexpected UpdateStrategy %+v", strategyType) - } + cases := []struct { + test string + newNode *v1.Node + oldNode *v1.Node + ds *apps.DaemonSet + expectedEventsFunc func(strategyType apps.DaemonSetUpdateStrategyType) int + shouldEnqueue bool + expectedCreates func() int + }{ + { + test: "Nothing changed, should not enqueue", + oldNode: newNode("node1", nil), + newNode: newNode("node1", nil), + ds: func() *apps.DaemonSet { + ds := newDaemonSet("ds") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + return ds + }(), + shouldEnqueue: false, + expectedCreates: func() int { return 0 }, + }, + { + test: "Node labels changed", + oldNode: newNode("node1", nil), + newNode: newNode("node1", simpleNodeLabel), + ds: func() *apps.DaemonSet { + ds := newDaemonSet("ds") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + return ds + }(), + shouldEnqueue: true, + expectedCreates: func() int { return 0 }, + }, + { + test: "Node taints changed", + oldNode: func() *v1.Node { + node := newNode("node1", nil) + setNodeTaint(node, noScheduleTaints) + return node + }(), + newNode: newNode("node1", nil), + ds: newDaemonSet("ds"), + shouldEnqueue: true, + expectedCreates: func() int { return 0 }, + }, + { + test: "Node Allocatable changed", + oldNode: newNode("node1", nil), + newNode: func() *v1.Node { + node := newNode("node1", nil) + node.Status.Allocatable = allocatableResources("200M", "200m") + return node + }(), + ds: func() *apps.DaemonSet { + ds := newDaemonSet("ds") + ds.Spec.Template.Spec = resourcePodSpecWithoutNodeName("200M", "200m") + return ds + }(), + expectedEventsFunc: func(strategyType apps.DaemonSetUpdateStrategyType) int { + switch strategyType { + case apps.OnDeleteDaemonSetStrategyType: return 0 - }, - shouldEnqueue: !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods), - expectedCreates: func() int { - if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - return 0 - } else { - return 1 - } - }, + case apps.RollingUpdateDaemonSetStrategyType: + return 0 + default: + t.Fatalf("unexpected UpdateStrategy %+v", strategyType) + } + return 0 }, - } - for _, c := range cases { - for _, strategy := range updateStrategies() { - manager, podControl, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.nodeStore.Add(c.oldNode) - c.ds.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(c.ds) + shouldEnqueue: false, + expectedCreates: func() int { + return 1 + }, + }, + } + for _, c := range cases { + for _, strategy := range updateStrategies() { + manager, podControl, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + manager.nodeStore.Add(c.oldNode) + c.ds.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(c.ds) - expectedEvents := 0 - if c.expectedEventsFunc != nil { - expectedEvents = c.expectedEventsFunc(strategy.Type) - } - expectedCreates := 0 - if c.expectedCreates != nil { - expectedCreates = c.expectedCreates() - } - syncAndValidateDaemonSets(t, manager, c.ds, podControl, expectedCreates, 0, expectedEvents) + expectedEvents := 0 + if c.expectedEventsFunc != nil { + expectedEvents = c.expectedEventsFunc(strategy.Type) + } + expectedCreates := 0 + if c.expectedCreates != nil { + expectedCreates = c.expectedCreates() + } + syncAndValidateDaemonSets(t, manager, c.ds, podControl, expectedCreates, 0, expectedEvents) - manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { - if ds.Name == "ds" { - enqueued = true - } + manager.enqueueDaemonSet = func(ds *apps.DaemonSet) { + if ds.Name == "ds" { + enqueued = true } + } - enqueued = false - manager.updateNode(c.oldNode, c.newNode) - if enqueued != c.shouldEnqueue { - t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) - } + enqueued = false + manager.updateNode(c.oldNode, c.newNode) + if enqueued != c.shouldEnqueue { + t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued) } } } @@ -2413,7 +2022,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") return ds }(), - shouldEnqueue: !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods), + shouldEnqueue: false, }, { test: "Deleted non-daemon pods (with controller) to release resources", @@ -2458,7 +2067,7 @@ func TestDeleteNoDaemonPod(t *testing.T) { ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m") return ds }(), - shouldEnqueue: !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods), + shouldEnqueue: false, }, { test: "Deleted no scheduled pods", @@ -2517,18 +2126,8 @@ func TestDeleteNoDaemonPod(t *testing.T) { manager.podStore.Add(pod) } switch strategy.Type { - case apps.OnDeleteDaemonSetStrategyType: - if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - syncAndValidateDaemonSets(t, manager, c.ds, podControl, 1, 0, 0) - } else { - syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 2) - } - case apps.RollingUpdateDaemonSetStrategyType: - if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - syncAndValidateDaemonSets(t, manager, c.ds, podControl, 1, 0, 0) - } else { - syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 3) - } + case apps.OnDeleteDaemonSetStrategyType, apps.RollingUpdateDaemonSetStrategyType: + syncAndValidateDaemonSets(t, manager, c.ds, podControl, 1, 0, 0) default: t.Fatalf("unexpected UpdateStrategy %+v", strategy) } @@ -2549,457 +2148,411 @@ func TestDeleteNoDaemonPod(t *testing.T) { } func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, podControl, _, err := newTestController(ds) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.dsStore.Add(ds) - addNodes(manager.nodeStore, 0, 1, nil) - addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) - addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 1, nil) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) - podScheduledUsingAffinity := newPod("pod1-node-3", "", simpleDaemonSetLabel, ds) - podScheduledUsingAffinity.Spec.Affinity = &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchFields: []v1.NodeSelectorRequirement{ - { - Key: schedulerapi.NodeFieldSelectorKeyNodeName, - Operator: v1.NodeSelectorOpIn, - Values: []string{"node-2"}, - }, + podScheduledUsingAffinity := newPod("pod1-node-3", "", simpleDaemonSetLabel, ds) + podScheduledUsingAffinity.Spec.Affinity = &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchFields: []v1.NodeSelectorRequirement{ + { + Key: schedulerapi.NodeFieldSelectorKeyNodeName, + Operator: v1.NodeSelectorOpIn, + Values: []string{"node-2"}, }, }, }, }, }, - } - manager.podStore.Add(podScheduledUsingAffinity) - if f { - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) - } else { - syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0) - } + }, } + manager.podStore.Add(podScheduledUsingAffinity) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) } } func TestGetNodesToDaemonPods(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager, _, _, err := newTestController(ds, ds2) - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - manager.dsStore.Add(ds) - manager.dsStore.Add(ds2) - addNodes(manager.nodeStore, 0, 2, nil) + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager, _, _, err := newTestController(ds, ds2) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + manager.dsStore.Add(ds) + manager.dsStore.Add(ds2) + addNodes(manager.nodeStore, 0, 2, nil) - // These pods should be returned. - wantedPods := []*v1.Pod{ - newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds), - newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil), - newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds), - newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil), - } - failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds) - failedPod.Status = v1.PodStatus{Phase: v1.PodFailed} - wantedPods = append(wantedPods, failedPod) - for _, pod := range wantedPods { - manager.podStore.Add(pod) - } + // These pods should be returned. + wantedPods := []*v1.Pod{ + newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds), + newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil), + newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds), + newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil), + } + failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds) + failedPod.Status = v1.PodStatus{Phase: v1.PodFailed} + wantedPods = append(wantedPods, failedPod) + for _, pod := range wantedPods { + manager.podStore.Add(pod) + } - // These pods should be ignored. - ignoredPods := []*v1.Pod{ - newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds), - newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil), - newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2), - } - for _, pod := range ignoredPods { - manager.podStore.Add(pod) - } + // These pods should be ignored. + ignoredPods := []*v1.Pod{ + newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds), + newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil), + newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2), + } + for _, pod := range ignoredPods { + manager.podStore.Add(pod) + } - nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds) - if err != nil { - t.Fatalf("getNodesToDaemonPods() error: %v", err) - } - gotPods := map[string]bool{} - for node, pods := range nodesToDaemonPods { - for _, pod := range pods { - if pod.Spec.NodeName != node { - t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName) - } - gotPods[pod.Name] = true + nodesToDaemonPods, err := manager.getNodesToDaemonPods(ds) + if err != nil { + t.Fatalf("getNodesToDaemonPods() error: %v", err) + } + gotPods := map[string]bool{} + for node, pods := range nodesToDaemonPods { + for _, pod := range pods { + if pod.Spec.NodeName != node { + t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName) } + gotPods[pod.Name] = true } - for _, pod := range wantedPods { - if !gotPods[pod.Name] { - t.Errorf("expected pod %v but didn't get it", pod.Name) - } - delete(gotPods, pod.Name) - } - for podName := range gotPods { - t.Errorf("unexpected pod %v was returned", podName) + } + for _, pod := range wantedPods { + if !gotPods[pod.Name] { + t.Errorf("expected pod %v but didn't get it", pod.Name) } + delete(gotPods, pod.Name) + } + for podName := range gotPods { + t.Errorf("unexpected pod %v was returned", podName) } } } func TestAddNode(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + node1 := newNode("node1", nil) + ds := newDaemonSet("ds") + ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel + manager.dsStore.Add(ds) + + manager.addNode(node1) + if got, want := manager.queue.Len(), 0; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + + node2 := newNode("node2", simpleNodeLabel) + manager.addNode(node2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done := manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for node %v", node2.Name) + } +} + +func TestAddPod(t *testing.T) { + for _, strategy := range updateStrategies() { manager, _, _, err := newTestController() if err != nil { t.Fatalf("error creating DaemonSets controller: %v", err) } - node1 := newNode("node1", nil) - ds := newDaemonSet("ds") - ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel - manager.dsStore.Add(ds) + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - manager.addNode(node1) - if got, want := manager.queue.Len(), 0; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - - node2 := newNode("node2", simpleNodeLabel) - manager.addNode(node2) + pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + manager.addPod(pod1) if got, want := manager.queue.Len(), 1; got != want { t.Fatalf("queue.Len() = %v, want %v", got, want) } key, done := manager.queue.Get() if key == nil || done { - t.Fatalf("failed to enqueue controller for node %v", node2.Name) + t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) + } + expectedKey, _ := controller.KeyFunc(ds1) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } - } -} -func TestAddPod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - - pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - manager.addPod(pod1) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done := manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) - } - expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } - - pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) - manager.addPod(pod2) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done = manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) - } - expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) + manager.addPod(pod2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done = manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) + } + expectedKey, _ = controller.KeyFunc(ds2) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } } } func TestAddPodOrphan(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - ds3 := newDaemonSet("foo3") - ds3.Spec.UpdateStrategy = *strategy - ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - manager.dsStore.Add(ds3) + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + ds3 := newDaemonSet("foo3") + ds3.Spec.UpdateStrategy = *strategy + ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) + manager.dsStore.Add(ds3) - // Make pod an orphan. Expect matching sets to be queued. - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - manager.addPod(pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { - t.Errorf("getQueuedKeys() = %v, want %v", got, want) - } + // Make pod an orphan. Expect matching sets to be queued. + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + manager.addPod(pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { + t.Errorf("getQueuedKeys() = %v, want %v", got, want) } } } func TestUpdatePod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - prev := *pod1 - bumpResourceVersion(pod1) - manager.updatePod(&prev, pod1) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done := manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) - } - expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + prev := *pod1 + bumpResourceVersion(pod1) + manager.updatePod(&prev, pod1) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done := manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) + } + expectedKey, _ := controller.KeyFunc(ds1) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) + } - pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) - prev = *pod2 - bumpResourceVersion(pod2) - manager.updatePod(&prev, pod2) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done = manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) - } - expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) + prev = *pod2 + bumpResourceVersion(pod2) + manager.updatePod(&prev, pod2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done = manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) + } + expectedKey, _ = controller.KeyFunc(ds2) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } } } func TestUpdatePodOrphanSameLabels(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - prev := *pod - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 0; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + prev := *pod + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 0; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } func TestUpdatePodOrphanWithNewLabels(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - prev := *pod - prev.Labels = map[string]string{"foo2": "bar2"} - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { - t.Errorf("getQueuedKeys() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + prev := *pod + prev.Labels = map[string]string{"foo2": "bar2"} + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) { + t.Errorf("getQueuedKeys() = %v, want %v", got, want) } } } func TestUpdatePodChangeControllerRef(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds2 := newDaemonSet("foo2") + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - for _, strategy := range updateStrategies() { - ds := newDaemonSet("foo") - ds.Spec.UpdateStrategy = *strategy - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds2 := newDaemonSet("foo2") - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - prev := *pod - prev.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ds2, controllerKind)} - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + prev := *pod + prev.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ds2, controllerKind)} + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } func TestUpdatePodControllerRefRemoved(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - prev := *pod - pod.OwnerReferences = nil - bumpResourceVersion(pod) - manager.updatePod(&prev, pod) - if got, want := manager.queue.Len(), 2; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + prev := *pod + pod.OwnerReferences = nil + bumpResourceVersion(pod) + manager.updatePod(&prev, pod) + if got, want := manager.queue.Len(), 2; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } func TestDeletePod(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) + pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) + manager.deletePod(pod1) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done := manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) + } + expectedKey, _ := controller.KeyFunc(ds1) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) + } - pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1) - manager.deletePod(pod1) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done := manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod1.Name) - } - expectedKey, _ := controller.KeyFunc(ds1) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } - - pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) - manager.deletePod(pod2) - if got, want := manager.queue.Len(), 1; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } - key, done = manager.queue.Get() - if key == nil || done { - t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) - } - expectedKey, _ = controller.KeyFunc(ds2) - if got, want := key.(string), expectedKey; got != want { - t.Errorf("queue.Get() = %v, want %v", got, want) - } + pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2) + manager.deletePod(pod2) + if got, want := manager.queue.Len(), 1; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) + } + key, done = manager.queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller for pod %v", pod2.Name) + } + expectedKey, _ = controller.KeyFunc(ds2) + if got, want := key.(string), expectedKey; got != want { + t.Errorf("queue.Get() = %v, want %v", got, want) } } } func TestDeletePodOrphan(t *testing.T) { - for _, f := range []bool{true, false} { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + manager, _, _, err := newTestController() + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + ds1 := newDaemonSet("foo1") + ds1.Spec.UpdateStrategy = *strategy + ds2 := newDaemonSet("foo2") + ds2.Spec.UpdateStrategy = *strategy + ds3 := newDaemonSet("foo3") + ds3.Spec.UpdateStrategy = *strategy + ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 + manager.dsStore.Add(ds1) + manager.dsStore.Add(ds2) + manager.dsStore.Add(ds3) - for _, strategy := range updateStrategies() { - manager, _, _, err := newTestController() - if err != nil { - t.Fatalf("error creating DaemonSets controller: %v", err) - } - ds1 := newDaemonSet("foo1") - ds1.Spec.UpdateStrategy = *strategy - ds2 := newDaemonSet("foo2") - ds2.Spec.UpdateStrategy = *strategy - ds3 := newDaemonSet("foo3") - ds3.Spec.UpdateStrategy = *strategy - ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 - manager.dsStore.Add(ds1) - manager.dsStore.Add(ds2) - manager.dsStore.Add(ds3) - - pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) - manager.deletePod(pod) - if got, want := manager.queue.Len(), 0; got != want { - t.Fatalf("queue.Len() = %v, want %v", got, want) - } + pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil) + manager.deletePod(pod) + if got, want := manager.queue.Len(), 0; got != want { + t.Fatalf("queue.Len() = %v, want %v", got, want) } } } diff --git a/pkg/controller/daemon/util/BUILD b/pkg/controller/daemon/util/BUILD index c0a40dd7c14..6e4ca9df5a0 100644 --- a/pkg/controller/daemon/util/BUILD +++ b/pkg/controller/daemon/util/BUILD @@ -39,7 +39,6 @@ go_test( srcs = ["daemonset_util_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/features:go_default_library", "//pkg/scheduler/api:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", diff --git a/pkg/controller/daemon/util/daemonset_util.go b/pkg/controller/daemon/util/daemonset_util.go index 264e1940f2f..fb7ec26f3c2 100644 --- a/pkg/controller/daemon/util/daemonset_util.go +++ b/pkg/controller/daemon/util/daemonset_util.go @@ -21,7 +21,7 @@ import ( "strconv" apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" podutil "k8s.io/kubernetes/pkg/api/v1/pod" @@ -204,7 +204,7 @@ func GetTargetNodeName(pod *v1.Pod) (string, error) { return pod.Spec.NodeName, nil } - // If ScheduleDaemonSetPods was enabled before, retrieve node name of unscheduled pods from NodeAffinity + // Retrieve node name of unscheduled pods from NodeAffinity if pod.Spec.Affinity == nil || pod.Spec.Affinity.NodeAffinity == nil || pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { diff --git a/pkg/controller/daemon/util/daemonset_util_test.go b/pkg/controller/daemon/util/daemonset_util_test.go index 9a86b801e2f..7291ee504df 100644 --- a/pkg/controller/daemon/util/daemonset_util_test.go +++ b/pkg/controller/daemon/util/daemonset_util_test.go @@ -21,13 +21,12 @@ import ( "reflect" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/component-base/featuregate" featuregatetesting "k8s.io/component-base/featuregate/testing" - "k8s.io/kubernetes/pkg/features" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" utilpointer "k8s.io/utils/pointer" ) @@ -586,5 +585,5 @@ func TestGetTargetNodeName(t *testing.T) { } } - forEachFeatureGate(t, testFun, features.ScheduleDaemonSetPods) + forEachFeatureGate(t, testFun) } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index d7bfe2147cc..820239a2c49 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -241,6 +241,7 @@ const ( // owner: @k82cn // beta: v1.12 + // GA: v1.17 // // Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller ScheduleDaemonSetPods featuregate.Feature = "ScheduleDaemonSetPods" @@ -534,7 +535,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta}, SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta}, HyperVContainer: {Default: false, PreRelease: featuregate.Alpha}, - ScheduleDaemonSetPods: {Default: true, PreRelease: featuregate.Beta}, + ScheduleDaemonSetPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.18 TokenRequest: {Default: true, PreRelease: featuregate.Beta}, TokenRequestProjection: {Default: true, PreRelease: featuregate.Beta}, BoundServiceAccountTokenVolume: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/test/integration/daemonset/BUILD b/test/integration/daemonset/BUILD index 2c289702393..4cd7bf6c7ab 100644 --- a/test/integration/daemonset/BUILD +++ b/test/integration/daemonset/BUILD @@ -18,7 +18,6 @@ go_test( "//pkg/api/v1/pod:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/daemon:go_default_library", - "//pkg/features:go_default_library", "//pkg/scheduler:go_default_library", "//pkg/scheduler/algorithmprovider:go_default_library", "//pkg/scheduler/api:go_default_library", @@ -32,7 +31,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", @@ -42,8 +40,6 @@ go_test( "//staging/src/k8s.io/client-go/tools/events:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//staging/src/k8s.io/component-base/featuregate:go_default_library", - "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//test/integration/framework:go_default_library", ], ) diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go index 5dffcfbe871..a9424a13b9d 100644 --- a/test/integration/daemonset/daemonset_test.go +++ b/test/integration/daemonset/daemonset_test.go @@ -31,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" appstyped "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -41,13 +40,10 @@ import ( "k8s.io/client-go/tools/events" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/retry" - "k8s.io/component-base/featuregate" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler/algorithmprovider" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" @@ -90,12 +86,6 @@ func setupScheduler( cs clientset.Interface, informerFactory informers.SharedInformerFactory, ) (restoreFeatureGates func()) { - restoreFeatureGates = func() {} - // If ScheduleDaemonSetPods is disabled, do not start scheduler. - if !utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { - return - } - // Enable Features. restoreFeatureGates = algorithmprovider.ApplyFeatureGates() @@ -221,12 +211,6 @@ func updateStrategies() []*apps.DaemonSetUpdateStrategy { return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()} } -func featureGates() []featuregate.Feature { - return []featuregate.Feature{ - features.ScheduleDaemonSetPods, - } -} - func allocatableResources(memory, cpu string) v1.ResourceList { return v1.ResourceList{ v1.ResourceMemory: resource.MustParse(memory), @@ -427,31 +411,6 @@ func validateDaemonSetStatus( } } -func validateFailedPlacementEvent(eventClient corev1client.EventInterface, t *testing.T) { - if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - eventList, err := eventClient.List(metav1.ListOptions{}) - if err != nil { - return false, err - } - if len(eventList.Items) == 0 { - return false, nil - } - if len(eventList.Items) > 1 { - t.Errorf("Expected 1 event got %d", len(eventList.Items)) - } - event := eventList.Items[0] - if event.Type != v1.EventTypeWarning { - t.Errorf("Event type expected %s got %s", v1.EventTypeWarning, event.Type) - } - if event.Reason != daemon.FailedPlacementReason { - t.Errorf("Event reason expected %s got %s", daemon.FailedPlacementReason, event.Reason) - } - return true, nil - }); err != nil { - t.Fatal(err) - } -} - func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet { var ds *apps.DaemonSet if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { @@ -468,17 +427,6 @@ func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string return ds } -func forEachFeatureGate(t *testing.T, tf func(t *testing.T)) { - for _, fg := range featureGates() { - for _, f := range []bool{true, false} { - func() { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, fg, f)() - t.Run(fmt.Sprintf("%v (%t)", fg, f), tf) - }() - } - } -} - func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) { for _, strategy := range updateStrategies() { t.Run(fmt.Sprintf("%s (%v)", t.Name(), strategy), @@ -487,152 +435,146 @@ func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSe } func TestOneNodeDaemonLaunchesPod(t *testing.T) { - forEachFeatureGate(t, func(t *testing.T) { - forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { - server, closeFn, dc, informers, clientset := setup(t) - defer closeFn() - ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t) - defer framework.DeleteTestingNamespace(ns, server, t) + forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { + server, closeFn, dc, informers, clientset := setup(t) + defer closeFn() + ns := framework.CreateTestingNamespace("one-node-daemonset-test", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.AppsV1().DaemonSets(ns.Name) - podClient := clientset.CoreV1().Pods(ns.Name) - nodeClient := clientset.CoreV1().Nodes() - podInformer := informers.Core().V1().Pods().Informer() + dsClient := clientset.AppsV1().DaemonSets(ns.Name) + podClient := clientset.CoreV1().Pods(ns.Name) + nodeClient := clientset.CoreV1().Nodes() + podInformer := informers.Core().V1().Pods().Informer() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // Start Scheduler - defer setupScheduler(ctx, t, clientset, informers)() + // Start Scheduler + defer setupScheduler(ctx, t, clientset, informers)() - informers.Start(ctx.Done()) - go dc.Run(5, ctx.Done()) + informers.Start(ctx.Done()) + go dc.Run(5, ctx.Done()) - ds := newDaemonSet("foo", ns.Name) - ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(ds) - if err != nil { - t.Fatalf("Failed to create DaemonSet: %v", err) - } - defer cleanupDaemonSets(t, clientset, ds) + ds := newDaemonSet("foo", ns.Name) + ds.Spec.UpdateStrategy = *strategy + _, err := dsClient.Create(ds) + if err != nil { + t.Fatalf("Failed to create DaemonSet: %v", err) + } + defer cleanupDaemonSets(t, clientset, ds) - _, err = nodeClient.Create(newNode("single-node", nil)) - if err != nil { - t.Fatalf("Failed to create node: %v", err) - } + _, err = nodeClient.Create(newNode("single-node", nil)) + if err != nil { + t.Fatalf("Failed to create node: %v", err) + } - validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t) - validateDaemonSetStatus(dsClient, ds.Name, 1, t) - }) + validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t) + validateDaemonSetStatus(dsClient, ds.Name, 1, t) }) } func TestSimpleDaemonSetLaunchesPods(t *testing.T) { - forEachFeatureGate(t, func(t *testing.T) { - forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { - server, closeFn, dc, informers, clientset := setup(t) - defer closeFn() - ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) - defer framework.DeleteTestingNamespace(ns, server, t) + forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { + server, closeFn, dc, informers, clientset := setup(t) + defer closeFn() + ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.AppsV1().DaemonSets(ns.Name) - podClient := clientset.CoreV1().Pods(ns.Name) - nodeClient := clientset.CoreV1().Nodes() - podInformer := informers.Core().V1().Pods().Informer() + dsClient := clientset.AppsV1().DaemonSets(ns.Name) + podClient := clientset.CoreV1().Pods(ns.Name) + nodeClient := clientset.CoreV1().Nodes() + podInformer := informers.Core().V1().Pods().Informer() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - informers.Start(ctx.Done()) - go dc.Run(5, ctx.Done()) + informers.Start(ctx.Done()) + go dc.Run(5, ctx.Done()) - // Start Scheduler - defer setupScheduler(ctx, t, clientset, informers)() + // Start Scheduler + defer setupScheduler(ctx, t, clientset, informers)() - ds := newDaemonSet("foo", ns.Name) - ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(ds) - if err != nil { - t.Fatalf("Failed to create DaemonSet: %v", err) - } - defer cleanupDaemonSets(t, clientset, ds) + ds := newDaemonSet("foo", ns.Name) + ds.Spec.UpdateStrategy = *strategy + _, err := dsClient.Create(ds) + if err != nil { + t.Fatalf("Failed to create DaemonSet: %v", err) + } + defer cleanupDaemonSets(t, clientset, ds) - addNodes(nodeClient, 0, 5, nil, t) + addNodes(nodeClient, 0, 5, nil, t) - validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t) - validateDaemonSetStatus(dsClient, ds.Name, 5, t) - }) + validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t) + validateDaemonSetStatus(dsClient, ds.Name, 5, t) }) } func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) { - forEachFeatureGate(t, func(t *testing.T) { - forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { - server, closeFn, dc, informers, clientset := setup(t) - defer closeFn() - ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) - defer framework.DeleteTestingNamespace(ns, server, t) + forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { + server, closeFn, dc, informers, clientset := setup(t) + defer closeFn() + ns := framework.CreateTestingNamespace("simple-daemonset-test", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.AppsV1().DaemonSets(ns.Name) - podClient := clientset.CoreV1().Pods(ns.Name) - nodeClient := clientset.CoreV1().Nodes() - podInformer := informers.Core().V1().Pods().Informer() + dsClient := clientset.AppsV1().DaemonSets(ns.Name) + podClient := clientset.CoreV1().Pods(ns.Name) + nodeClient := clientset.CoreV1().Nodes() + podInformer := informers.Core().V1().Pods().Informer() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - informers.Start(ctx.Done()) - go dc.Run(5, ctx.Done()) + informers.Start(ctx.Done()) + go dc.Run(5, ctx.Done()) - // Start Scheduler - defer setupScheduler(ctx, t, clientset, informers)() + // Start Scheduler + defer setupScheduler(ctx, t, clientset, informers)() - ds := newDaemonSet("foo", ns.Name) - ds.Spec.UpdateStrategy = *strategy + ds := newDaemonSet("foo", ns.Name) + ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.Affinity = &v1.Affinity{ - NodeAffinity: &v1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "zone", - Operator: v1.NodeSelectorOpIn, - Values: []string{"test"}, - }, + ds.Spec.Template.Spec.Affinity = &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "zone", + Operator: v1.NodeSelectorOpIn, + Values: []string{"test"}, }, }, - { - MatchFields: []v1.NodeSelectorRequirement{ - { - Key: schedulerapi.NodeFieldSelectorKeyNodeName, - Operator: v1.NodeSelectorOpIn, - Values: []string{"node-1"}, - }, + }, + { + MatchFields: []v1.NodeSelectorRequirement{ + { + Key: schedulerapi.NodeFieldSelectorKeyNodeName, + Operator: v1.NodeSelectorOpIn, + Values: []string{"node-1"}, }, }, }, }, }, - } + }, + } - _, err := dsClient.Create(ds) - if err != nil { - t.Fatalf("Failed to create DaemonSet: %v", err) - } - defer cleanupDaemonSets(t, clientset, ds) + _, err := dsClient.Create(ds) + if err != nil { + t.Fatalf("Failed to create DaemonSet: %v", err) + } + defer cleanupDaemonSets(t, clientset, ds) - addNodes(nodeClient, 0, 2, nil, t) - // Two nodes with labels - addNodes(nodeClient, 2, 2, map[string]string{ - "zone": "test", - }, t) - addNodes(nodeClient, 4, 2, nil, t) + addNodes(nodeClient, 0, 2, nil, t) + // Two nodes with labels + addNodes(nodeClient, 2, 2, map[string]string{ + "zone": "test", + }, t) + addNodes(nodeClient, 4, 2, nil, t) - validateDaemonSetPodsAndMarkReady(podClient, podInformer, 3, t) - validateDaemonSetStatus(dsClient, ds.Name, 3, t) - }) + validateDaemonSetPodsAndMarkReady(podClient, podInformer, 3, t) + validateDaemonSetStatus(dsClient, ds.Name, 3, t) }) } @@ -680,52 +622,10 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) { }) } -// When ScheduleDaemonSetPods is disabled, DaemonSets should not launch onto nodes with insufficient capacity. -// Look for TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled, we don't need this test anymore. -func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, false)() - forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { - server, closeFn, dc, informers, clientset := setup(t) - defer closeFn() - ns := framework.CreateTestingNamespace("insufficient-capacity", server, t) - defer framework.DeleteTestingNamespace(ns, server, t) - - dsClient := clientset.AppsV1().DaemonSets(ns.Name) - nodeClient := clientset.CoreV1().Nodes() - eventClient := clientset.CoreV1().Events(ns.Namespace) - - stopCh := make(chan struct{}) - defer close(stopCh) - - informers.Start(stopCh) - go dc.Run(5, stopCh) - - ds := newDaemonSet("foo", ns.Name) - ds.Spec.Template.Spec = resourcePodSpec("node-with-limited-memory", "120M", "75m") - ds.Spec.UpdateStrategy = *strategy - _, err := dsClient.Create(ds) - if err != nil { - t.Fatalf("Failed to create DaemonSet: %v", err) - } - defer cleanupDaemonSets(t, clientset, ds) - - node := newNode("node-with-limited-memory", nil) - node.Status.Allocatable = allocatableResources("100M", "200m") - _, err = nodeClient.Create(node) - if err != nil { - t.Fatalf("Failed to create node: %v", err) - } - - validateFailedPlacementEvent(eventClient, t) - }) -} - -// TestInsufficientCapacityNodeDaemonSetCreateButNotLaunchPod tests that when "ScheduleDaemonSetPods" -// feature is enabled, the DaemonSet should create Pods for all the nodes regardless of available resource -// on the nodes, and kube-scheduler should not schedule Pods onto the nodes with insufficient resource. -func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, true)() - +// TestInsufficientCapacityNodeDaemonSetCreateButNotLaunchPod tests thaat the DaemonSet should create +// Pods for all the nodes regardless of available resource on the nodes, and kube-scheduler should +// not schedule Pods onto the nodes with insufficient resource. +func TestInsufficientCapacityNode(t *testing.T) { forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { server, closeFn, dc, informers, clientset := setup(t) defer closeFn() @@ -782,8 +682,7 @@ func TestInsufficientCapacityNodeWhenScheduleDaemonSetPodsEnabled(t *testing.T) t.Fatalf("Failed to create node: %v", err) } - // When ScheduleDaemonSetPods enabled, 2 pods are created. But only one - // of two Pods is scheduled by default scheduler. + // 2 pods are created. But only one of two Pods is scheduled by default scheduler. validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t) validateDaemonSetStatus(dsClient, ds.Name, 1, t) }) @@ -898,142 +797,137 @@ func TestLaunchWithHashCollision(t *testing.T) { validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t) } -// TestTaintedNode tests that no matter "ScheduleDaemonSetPods" feature is enabled or not -// tainted node isn't expected to have pod scheduled +// TestTaintedNode tests tainted node isn't expected to have pod scheduled func TestTaintedNode(t *testing.T) { - forEachFeatureGate(t, func(t *testing.T) { - forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { - server, closeFn, dc, informers, clientset := setup(t) - defer closeFn() - ns := framework.CreateTestingNamespace("tainted-node", server, t) - defer framework.DeleteTestingNamespace(ns, server, t) + forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { + server, closeFn, dc, informers, clientset := setup(t) + defer closeFn() + ns := framework.CreateTestingNamespace("tainted-node", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.AppsV1().DaemonSets(ns.Name) - podClient := clientset.CoreV1().Pods(ns.Name) - podInformer := informers.Core().V1().Pods().Informer() - nodeClient := clientset.CoreV1().Nodes() + dsClient := clientset.AppsV1().DaemonSets(ns.Name) + podClient := clientset.CoreV1().Pods(ns.Name) + podInformer := informers.Core().V1().Pods().Informer() + nodeClient := clientset.CoreV1().Nodes() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - informers.Start(ctx.Done()) - go dc.Run(5, ctx.Done()) + informers.Start(ctx.Done()) + go dc.Run(5, ctx.Done()) - // Start Scheduler - defer setupScheduler(ctx, t, clientset, informers)() + // Start Scheduler + defer setupScheduler(ctx, t, clientset, informers)() - ds := newDaemonSet("foo", ns.Name) - ds.Spec.UpdateStrategy = *strategy - ds, err := dsClient.Create(ds) - if err != nil { - t.Fatalf("Failed to create DaemonSet: %v", err) - } + ds := newDaemonSet("foo", ns.Name) + ds.Spec.UpdateStrategy = *strategy + ds, err := dsClient.Create(ds) + if err != nil { + t.Fatalf("Failed to create DaemonSet: %v", err) + } - defer cleanupDaemonSets(t, clientset, ds) + defer cleanupDaemonSets(t, clientset, ds) - nodeWithTaint := newNode("node-with-taint", nil) - nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}} - _, err = nodeClient.Create(nodeWithTaint) - if err != nil { - t.Fatalf("Failed to create nodeWithTaint: %v", err) - } + nodeWithTaint := newNode("node-with-taint", nil) + nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}} + _, err = nodeClient.Create(nodeWithTaint) + if err != nil { + t.Fatalf("Failed to create nodeWithTaint: %v", err) + } - nodeWithoutTaint := newNode("node-without-taint", nil) - _, err = nodeClient.Create(nodeWithoutTaint) - if err != nil { - t.Fatalf("Failed to create nodeWithoutTaint: %v", err) - } + nodeWithoutTaint := newNode("node-without-taint", nil) + _, err = nodeClient.Create(nodeWithoutTaint) + if err != nil { + t.Fatalf("Failed to create nodeWithoutTaint: %v", err) + } - validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t) - validateDaemonSetStatus(dsClient, ds.Name, 1, t) + validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t) + validateDaemonSetStatus(dsClient, ds.Name, 1, t) - // remove taint from nodeWithTaint - nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{}) - if err != nil { - t.Fatalf("Failed to retrieve nodeWithTaint: %v", err) - } - nodeWithTaintCopy := nodeWithTaint.DeepCopy() - nodeWithTaintCopy.Spec.Taints = []v1.Taint{} - _, err = nodeClient.Update(nodeWithTaintCopy) - if err != nil { - t.Fatalf("Failed to update nodeWithTaint: %v", err) - } + // remove taint from nodeWithTaint + nodeWithTaint, err = nodeClient.Get("node-with-taint", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to retrieve nodeWithTaint: %v", err) + } + nodeWithTaintCopy := nodeWithTaint.DeepCopy() + nodeWithTaintCopy.Spec.Taints = []v1.Taint{} + _, err = nodeClient.Update(nodeWithTaintCopy) + if err != nil { + t.Fatalf("Failed to update nodeWithTaint: %v", err) + } - validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t) - validateDaemonSetStatus(dsClient, ds.Name, 2, t) - }) + validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t) + validateDaemonSetStatus(dsClient, ds.Name, 2, t) }) } // TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled // to the Unschedulable nodes. func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) { - forEachFeatureGate(t, func(t *testing.T) { - forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { - server, closeFn, dc, informers, clientset := setup(t) - defer closeFn() - ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t) - defer framework.DeleteTestingNamespace(ns, server, t) + forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) { + server, closeFn, dc, informers, clientset := setup(t) + defer closeFn() + ns := framework.CreateTestingNamespace("daemonset-unschedulable-test", server, t) + defer framework.DeleteTestingNamespace(ns, server, t) - dsClient := clientset.AppsV1().DaemonSets(ns.Name) - podClient := clientset.CoreV1().Pods(ns.Name) - nodeClient := clientset.CoreV1().Nodes() - podInformer := informers.Core().V1().Pods().Informer() + dsClient := clientset.AppsV1().DaemonSets(ns.Name) + podClient := clientset.CoreV1().Pods(ns.Name) + nodeClient := clientset.CoreV1().Nodes() + podInformer := informers.Core().V1().Pods().Informer() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - informers.Start(ctx.Done()) - go dc.Run(5, ctx.Done()) + informers.Start(ctx.Done()) + go dc.Run(5, ctx.Done()) - // Start Scheduler - defer setupScheduler(ctx, t, clientset, informers)() + // Start Scheduler + defer setupScheduler(ctx, t, clientset, informers)() - ds := newDaemonSet("foo", ns.Name) - ds.Spec.UpdateStrategy = *strategy - ds.Spec.Template.Spec.HostNetwork = true - _, err := dsClient.Create(ds) - if err != nil { - t.Fatalf("Failed to create DaemonSet: %v", err) - } + ds := newDaemonSet("foo", ns.Name) + ds.Spec.UpdateStrategy = *strategy + ds.Spec.Template.Spec.HostNetwork = true + _, err := dsClient.Create(ds) + if err != nil { + t.Fatalf("Failed to create DaemonSet: %v", err) + } - defer cleanupDaemonSets(t, clientset, ds) + defer cleanupDaemonSets(t, clientset, ds) - // Creates unschedulable node. - node := newNode("unschedulable-node", nil) - node.Spec.Unschedulable = true - node.Spec.Taints = []v1.Taint{ - { - Key: schedulerapi.TaintNodeUnschedulable, - Effect: v1.TaintEffectNoSchedule, - }, - } + // Creates unschedulable node. + node := newNode("unschedulable-node", nil) + node.Spec.Unschedulable = true + node.Spec.Taints = []v1.Taint{ + { + Key: schedulerapi.TaintNodeUnschedulable, + Effect: v1.TaintEffectNoSchedule, + }, + } - _, err = nodeClient.Create(node) - if err != nil { - t.Fatalf("Failed to create node: %v", err) - } + _, err = nodeClient.Create(node) + if err != nil { + t.Fatalf("Failed to create node: %v", err) + } - // Creates network-unavailable node. - nodeNU := newNode("network-unavailable-node", nil) - nodeNU.Status.Conditions = []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionFalse}, - {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, - } - nodeNU.Spec.Taints = []v1.Taint{ - { - Key: schedulerapi.TaintNodeNetworkUnavailable, - Effect: v1.TaintEffectNoSchedule, - }, - } + // Creates network-unavailable node. + nodeNU := newNode("network-unavailable-node", nil) + nodeNU.Status.Conditions = []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionFalse}, + {Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue}, + } + nodeNU.Spec.Taints = []v1.Taint{ + { + Key: schedulerapi.TaintNodeNetworkUnavailable, + Effect: v1.TaintEffectNoSchedule, + }, + } - _, err = nodeClient.Create(nodeNU) - if err != nil { - t.Fatalf("Failed to create node: %v", err) - } + _, err = nodeClient.Create(nodeNU) + if err != nil { + t.Fatalf("Failed to create node: %v", err) + } - validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t) - validateDaemonSetStatus(dsClient, ds.Name, 2, t) - }) + validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t) + validateDaemonSetStatus(dsClient, ds.Name, 2, t) }) }