Test both strategies in all daemonSet controller unit tests

This commit is contained in:
Janet Kuo 2017-05-19 17:34:04 -07:00
parent d02f40a5e7
commit d2cf00fcd6

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apiserver/pkg/storage/names" "k8s.io/apiserver/pkg/storage/names"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -119,6 +120,24 @@ func newDaemonSet(name string) *extensions.DaemonSet {
} }
} }
func newRollbackStrategy() *extensions.DaemonSetUpdateStrategy {
one := intstr.FromInt(1)
return &extensions.DaemonSetUpdateStrategy{
Type: extensions.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &extensions.RollingUpdateDaemonSet{MaxUnavailable: &one},
}
}
func newOnDeleteStrategy() *extensions.DaemonSetUpdateStrategy {
return &extensions.DaemonSetUpdateStrategy{
Type: extensions.OnDeleteDaemonSetStrategyType,
}
}
func updateStrategies() []*extensions.DaemonSetUpdateStrategy {
return []*extensions.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
}
func newNode(name string, label map[string]string) *v1.Node { func newNode(name string, label map[string]string) *v1.Node {
return &v1.Node{ return &v1.Node{
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()}, TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
@ -334,9 +353,11 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensio
} }
func TestDeleteFinalStateUnknown(t *testing.T) { func TestDeleteFinalStateUnknown(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
addNodes(manager.nodeStore, 0, 1, nil) addNodes(manager.nodeStore, 0, 1, nil)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
// DeletedFinalStateUnknown should queue the embedded DS if found. // DeletedFinalStateUnknown should queue the embedded DS if found.
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds}) manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
enqueuedKey, _ := manager.queue.Get() enqueuedKey, _ := manager.queue.Get()
@ -344,6 +365,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey) t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
} }
} }
}
func markPodsReady(store cache.Store) { func markPodsReady(store cache.Store) {
// mark pods as ready // mark pods as ready
@ -360,15 +382,20 @@ func markPodReady(pod *v1.Pod) {
// DaemonSets without node selectors should launch pods on every node. // DaemonSets without node selectors should launch pods on every node.
func TestSimpleDaemonSetLaunchesPods(t *testing.T) { func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
} }
}
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) { func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, clientset := newTestController(ds) manager, podControl, clientset := newTestController(ds)
var updated *extensions.DaemonSet var updated *extensions.DaemonSet
@ -392,28 +419,37 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want) t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
} }
} }
}
// DaemonSets should do nothing if there aren't any nodes // DaemonSets should do nothing if there aren't any nodes
func TestNoNodesDoesNothing(t *testing.T) { func TestNoNodesDoesNothing(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, podControl, _ := newTestController() manager, podControl, _ := newTestController()
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSets without node selectors should launch on a single node in a // DaemonSets without node selectors should launch on a single node in a
// single node cluster. // single node cluster.
func TestOneNodeDaemonLaunchesPod(t *testing.T) { func TestOneNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
manager.nodeStore.Add(newNode("only-node", nil)) manager.nodeStore.Add(newNode("only-node", nil))
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSets should place onto NotReady nodes // DaemonSets should place onto NotReady nodes
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("not-ready", nil) node := newNode("not-ready", nil)
node.Status.Conditions = []v1.NodeCondition{ node.Status.Conditions = []v1.NodeCondition{
@ -423,10 +459,13 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSets should not place onto OutOfDisk nodes // DaemonSets should not place onto OutOfDisk nodes
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("not-enough-disk", nil) node := newNode("not-enough-disk", nil)
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}} node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
@ -434,6 +473,7 @@ func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec { func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
return v1.PodSpec{ return v1.PodSpec{
@ -456,8 +496,10 @@ func allocatableResources(memory, cpu string) v1.ResourceList {
// DaemonSets should not place onto nodes with insufficient free resource // DaemonSets should not place onto nodes with insufficient free resource
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("too-much-mem", nil) node := newNode("too-much-mem", nil)
@ -469,12 +511,15 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource // DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
podSpec.NodeName = "too-much-mem" podSpec.NodeName = "too-much-mem"
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("too-much-mem", nil) node := newNode("too-much-mem", nil)
@ -486,10 +531,13 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) { func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("too-much-mem", nil) node := newNode("too-much-mem", nil)
@ -502,11 +550,14 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSets should place onto nodes with sufficient free resource // DaemonSets should place onto nodes with sufficient free resource
func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) { func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("not-too-much-mem", nil) node := newNode("not-too-much-mem", nil)
@ -518,10 +569,13 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition. // DaemonSet should launch a pod on a node with taint NetworkUnavailable condition.
func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) { func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("simple") ds := newDaemonSet("simple")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("network-unavailable", nil) node := newNode("network-unavailable", nil)
@ -533,11 +587,14 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSets not take any actions when being deleted // DaemonSets not take any actions when being deleted
func TestDontDoAnythingIfBeingDeleted(t *testing.T) { func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
now := metav1.Now() now := metav1.Now()
ds.DeletionTimestamp = &now ds.DeletionTimestamp = &now
@ -551,10 +608,13 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) { func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
for _, strategy := range updateStrategies() {
// Bare client says it IS deleted. // Bare client says it IS deleted.
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
now := metav1.Now() now := metav1.Now()
ds.DeletionTimestamp = &now ds.DeletionTimestamp = &now
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
@ -571,9 +631,11 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSets should not place onto nodes that would cause port conflicts // DaemonSets should not place onto nodes that would cause port conflicts
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
Containers: []v1.Container{{ Containers: []v1.Container{{
@ -590,16 +652,19 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
}) })
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// Test that if the node is already scheduled with a pod using a host port // Test that if the node is already scheduled with a pod using a host port
// but belonging to the same daemonset, we don't delete that pod // but belonging to the same daemonset, we don't delete that pod
// //
// Issue: https://github.com/kubernetes/kubernetes/issues/22309 // Issue: https://github.com/kubernetes/kubernetes/issues/22309
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
Containers: []v1.Container{{ Containers: []v1.Container{{
@ -612,6 +677,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
node := newNode("port-conflict", nil) node := newNode("port-conflict", nil)
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
manager.podStore.Add(&v1.Pod{ manager.podStore.Add(&v1.Pod{
@ -624,9 +690,11 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
}) })
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSets should place onto nodes that would not cause port conflicts // DaemonSets should place onto nodes that would not cause port conflicts
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec1 := v1.PodSpec{ podSpec1 := v1.PodSpec{
NodeName: "no-port-conflict", NodeName: "no-port-conflict",
Containers: []v1.Container{{ Containers: []v1.Container{{
@ -644,6 +712,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
}}, }},
} }
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec2 ds.Spec.Template.Spec = podSpec2
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("no-port-conflict", nil) node := newNode("no-port-conflict", nil)
@ -654,6 +723,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSetController should not sync DaemonSets with empty pod selectors. // DaemonSetController should not sync DaemonSets with empty pod selectors.
// //
@ -668,7 +738,9 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
// this case even though it's empty pod selector matches all pods. The DaemonSetController // this case even though it's empty pod selector matches all pods. The DaemonSetController
// should detect this misconfiguration and choose not to sync the DaemonSet. We should // should detect this misconfiguration and choose not to sync the DaemonSet. We should
// not observe a deletion of the pod on node1. // not observe a deletion of the pod on node1.
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ls := metav1.LabelSelector{} ls := metav1.LabelSelector{}
ds.Spec.Selector = &ls ds.Spec.Selector = &ls
ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"} ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
@ -689,10 +761,13 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods. // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
func TestDealsWithExistingPods(t *testing.T) { func TestDealsWithExistingPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 0, 5, nil)
@ -702,10 +777,13 @@ func TestDealsWithExistingPods(t *testing.T) {
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2) addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5) syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
} }
}
// Daemon with node selector should launch pods on nodes matching selector. // Daemon with node selector should launch pods on nodes matching selector.
func TestSelectorDaemonLaunchesPods(t *testing.T) { func TestSelectorDaemonLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() {
daemon := newDaemonSet("foo") daemon := newDaemonSet("foo")
daemon.Spec.UpdateStrategy = *strategy
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager, podControl, _ := newTestController(daemon) manager, podControl, _ := newTestController(daemon)
addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 0, 4, nil)
@ -713,10 +791,13 @@ func TestSelectorDaemonLaunchesPods(t *testing.T) {
manager.dsStore.Add(daemon) manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0) syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
} }
}
// Daemon with node selector should delete pods from nodes that do not satisfy selector. // Daemon with node selector should delete pods from nodes that do not satisfy selector.
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) { func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
@ -728,10 +809,13 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1) addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4) syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4)
} }
}
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes. // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) { func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
@ -747,41 +831,53 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1) addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20) syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
} }
}
// DaemonSet with node selector which does not match any node labels should not launch pods. // DaemonSet with node selector which does not match any node labels should not launch pods.
func TestBadSelectorDaemonDoesNothing(t *testing.T) { func TestBadSelectorDaemonDoesNothing(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, podControl, _ := newTestController() manager, podControl, _ := newTestController()
addNodes(manager.nodeStore, 0, 4, nil) addNodes(manager.nodeStore, 0, 4, nil)
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel) addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2 ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSet with node name should launch pod on node with corresponding name. // DaemonSet with node name should launch pod on node with corresponding name.
func TestNameDaemonSetLaunchesPods(t *testing.T) { func TestNameDaemonSetLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeName = "node-0" ds.Spec.Template.Spec.NodeName = "node-0"
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet with node name that does not exist should not launch pods. // DaemonSet with node name that does not exist should not launch pods.
func TestBadNameDaemonSetDoesNothing(t *testing.T) { func TestBadNameDaemonSetDoesNothing(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeName = "node-10" ds.Spec.Template.Spec.NodeName = "node-10"
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
addNodes(manager.nodeStore, 0, 5, nil) addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node. // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) { func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
ds.Spec.Template.Spec.NodeName = "node-6" ds.Spec.Template.Spec.NodeName = "node-6"
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
@ -790,10 +886,13 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing. // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) { func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
ds.Spec.Template.Spec.NodeName = "node-0" ds.Spec.Template.Spec.NodeName = "node-0"
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
@ -802,6 +901,7 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes. // DaemonSet with node selector, matching some nodes, should launch pods on all the nodes.
func TestSelectorDaemonSetLaunchesPods(t *testing.T) { func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
@ -816,7 +916,9 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
// Daemon with node affinity should launch pods on nodes matching affinity. // Daemon with node affinity should launch pods on nodes matching affinity.
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) { func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
for _, strategy := range updateStrategies() {
daemon := newDaemonSet("foo") daemon := newDaemonSet("foo")
daemon.Spec.UpdateStrategy = *strategy
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{ daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
@ -841,9 +943,12 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
manager.dsStore.Add(daemon) manager.dsStore.Add(daemon)
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0) syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
} }
}
func TestNumberReadyStatus(t *testing.T) { func TestNumberReadyStatus(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, clientset := newTestController(ds) manager, podControl, clientset := newTestController(ds)
var updated *extensions.DaemonSet var updated *extensions.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) { clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
@ -877,9 +982,12 @@ func TestNumberReadyStatus(t *testing.T) {
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status) t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
} }
} }
}
func TestObservedGeneration(t *testing.T) { func TestObservedGeneration(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds.Generation = 1 ds.Generation = 1
manager, podControl, clientset := newTestController(ds) manager, podControl, clientset := newTestController(ds)
var updated *extensions.DaemonSet var updated *extensions.DaemonSet
@ -902,6 +1010,7 @@ func TestObservedGeneration(t *testing.T) {
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration) t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
} }
} }
}
// DaemonSet controller should kill all failed pods and create at most 1 pod on every node. // DaemonSet controller should kill all failed pods and create at most 1 pod on every node.
func TestDaemonKillFailedPods(t *testing.T) { func TestDaemonKillFailedPods(t *testing.T) {
@ -918,7 +1027,9 @@ func TestDaemonKillFailedPods(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Logf("test case: %s\n", test.test) t.Logf("test case: %s\n", test.test)
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 1, nil) addNodes(manager.nodeStore, 0, 1, nil)
@ -927,10 +1038,13 @@ func TestDaemonKillFailedPods(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes) syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
} }
} }
}
// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint. // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint.
func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) { func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("untolerate") ds := newDaemonSet("untolerate")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("tainted", nil) node := newNode("tainted", nil)
@ -940,10 +1054,13 @@ func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
// DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint. // DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint.
func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) { func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("tolerate") ds := newDaemonSet("tolerate")
ds.Spec.UpdateStrategy = *strategy
setDaemonSetToleration(ds, noScheduleTolerations) setDaemonSetToleration(ds, noScheduleTolerations)
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
@ -954,10 +1071,13 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute. // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) { func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("simple") ds := newDaemonSet("simple")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("tainted", nil) node := newNode("tainted", nil)
@ -970,10 +1090,13 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute. // DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute.
func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) { func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("simple") ds := newDaemonSet("simple")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
node := newNode("tainted", nil) node := newNode("tainted", nil)
@ -986,10 +1109,13 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet should launch a pod on an untainted node when the pod has tolerations. // DaemonSet should launch a pod on an untainted node when the pod has tolerations.
func TestNodeDaemonLaunchesToleratePod(t *testing.T) { func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("tolerate") ds := newDaemonSet("tolerate")
ds.Spec.UpdateStrategy = *strategy
setDaemonSetToleration(ds, noScheduleTolerations) setDaemonSetToleration(ds, noScheduleTolerations)
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
@ -999,6 +1125,7 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
func setNodeTaint(node *v1.Node, taints []v1.Taint) { func setNodeTaint(node *v1.Node, taints []v1.Taint) {
node.Spec.Taints = taints node.Spec.Taints = taints
@ -1010,7 +1137,9 @@ func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleratio
// DaemonSet should launch a critical pod even when the node is OutOfDisk. // DaemonSet should launch a critical pod even when the node is OutOfDisk.
func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) { func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
setDaemonSetCritical(ds) setDaemonSetCritical(ds)
manager, podControl, _ := newTestController(ds) manager, podControl, _ := newTestController(ds)
@ -1027,11 +1156,14 @@ func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSet should launch a critical pod even when the node has insufficient free resource. // DaemonSet should launch a critical pod even when the node has insufficient free resource.
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) { func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := resourcePodSpec("too-much-mem", "75M", "75m") podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
setDaemonSetCritical(ds) setDaemonSetCritical(ds)
@ -1052,9 +1184,11 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
} }
}
// DaemonSets should NOT launch a critical pod when there are port conflicts. // DaemonSets should NOT launch a critical pod when there are port conflicts.
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) { func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
for _, strategy := range updateStrategies() {
podSpec := v1.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
Containers: []v1.Container{{ Containers: []v1.Container{{
@ -1072,11 +1206,13 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True") utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
ds := newDaemonSet("critical") ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
setDaemonSetCritical(ds) setDaemonSetCritical(ds)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
}
func setDaemonSetCritical(ds *extensions.DaemonSet) { func setDaemonSetCritical(ds *extensions.DaemonSet) {
ds.Namespace = api.NamespaceSystem ds.Namespace = api.NamespaceSystem
@ -1177,6 +1313,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
} }
for i, c := range cases { for i, c := range cases {
for _, strategy := range updateStrategies() {
node := newNode("test-node", nil) node := newNode("test-node", nil)
node.Status.Allocatable = allocatableResources("100M", "1") node.Status.Allocatable = allocatableResources("100M", "1")
manager, _, _ := newTestController() manager, _, _ := newTestController()
@ -1185,6 +1322,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
manager.podStore.Add(p) manager.podStore.Add(p)
p.Spec.NodeName = "test-node" p.Spec.NodeName = "test-node"
} }
c.ds.Spec.UpdateStrategy = *strategy
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds) wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
if wantToRun != c.wantToRun { if wantToRun != c.wantToRun {
@ -1201,6 +1339,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
} }
} }
} }
}
// DaemonSets should be resynced when node labels or taints changed // DaemonSets should be resynced when node labels or taints changed
func TestUpdateNode(t *testing.T) { func TestUpdateNode(t *testing.T) {
@ -1285,8 +1424,10 @@ func TestUpdateNode(t *testing.T) {
}, },
} }
for _, c := range cases { for _, c := range cases {
for _, strategy := range updateStrategies() {
manager, podControl, _ := newTestController() manager, podControl, _ := newTestController()
manager.nodeStore.Add(c.oldNode) manager.nodeStore.Add(c.oldNode)
c.ds.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(c.ds) manager.dsStore.Add(c.ds)
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0)
@ -1303,10 +1444,14 @@ func TestUpdateNode(t *testing.T) {
} }
} }
} }
}
func TestGetNodesToDaemonPods(t *testing.T) { func TestGetNodesToDaemonPods(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager, _, _ := newTestController(ds, ds2) manager, _, _ := newTestController(ds, ds2)
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1365,6 +1510,7 @@ func TestGetNodesToDaemonPods(t *testing.T) {
t.Errorf("unexpected pod %v was returned", podName) t.Errorf("unexpected pod %v was returned", podName)
} }
} }
}
func TestAddNode(t *testing.T) { func TestAddNode(t *testing.T) {
manager, _, _ := newTestController() manager, _, _ := newTestController()
@ -1390,9 +1536,12 @@ func TestAddNode(t *testing.T) {
} }
func TestAddPod(t *testing.T) { func TestAddPod(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1424,12 +1573,17 @@ func TestAddPod(t *testing.T) {
t.Errorf("queue.Get() = %v, want %v", got, want) t.Errorf("queue.Get() = %v, want %v", got, want)
} }
} }
}
func TestAddPodOrphan(t *testing.T) { func TestAddPodOrphan(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
ds3 := newDaemonSet("foo3") ds3 := newDaemonSet("foo3")
ds3.Spec.UpdateStrategy = *strategy
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1445,11 +1599,15 @@ func TestAddPodOrphan(t *testing.T) {
t.Errorf("getQueuedKeys() = %v, want %v", got, want) t.Errorf("getQueuedKeys() = %v, want %v", got, want)
} }
} }
}
func TestUpdatePod(t *testing.T) { func TestUpdatePod(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1485,11 +1643,15 @@ func TestUpdatePod(t *testing.T) {
t.Errorf("queue.Get() = %v, want %v", got, want) t.Errorf("queue.Get() = %v, want %v", got, want)
} }
} }
}
func TestUpdatePodOrphanSameLabels(t *testing.T) { func TestUpdatePodOrphanSameLabels(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1501,11 +1663,15 @@ func TestUpdatePodOrphanSameLabels(t *testing.T) {
t.Fatalf("queue.Len() = %v, want %v", got, want) t.Fatalf("queue.Len() = %v, want %v", got, want)
} }
} }
}
func TestUpdatePodOrphanWithNewLabels(t *testing.T) { func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1521,8 +1687,12 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
t.Errorf("getQueuedKeys() = %v, want %v", got, want) t.Errorf("getQueuedKeys() = %v, want %v", got, want)
} }
} }
}
func TestUpdatePodChangeControllerRef(t *testing.T) { func TestUpdatePodChangeControllerRef(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
@ -1538,11 +1708,15 @@ func TestUpdatePodChangeControllerRef(t *testing.T) {
t.Fatalf("queue.Len() = %v, want %v", got, want) t.Fatalf("queue.Len() = %v, want %v", got, want)
} }
} }
}
func TestUpdatePodControllerRefRemoved(t *testing.T) { func TestUpdatePodControllerRefRemoved(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1555,11 +1729,15 @@ func TestUpdatePodControllerRefRemoved(t *testing.T) {
t.Fatalf("queue.Len() = %v, want %v", got, want) t.Fatalf("queue.Len() = %v, want %v", got, want)
} }
} }
}
func TestDeletePod(t *testing.T) { func TestDeletePod(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1591,12 +1769,17 @@ func TestDeletePod(t *testing.T) {
t.Errorf("queue.Get() = %v, want %v", got, want) t.Errorf("queue.Get() = %v, want %v", got, want)
} }
} }
}
func TestDeletePodOrphan(t *testing.T) { func TestDeletePodOrphan(t *testing.T) {
for _, strategy := range updateStrategies() {
manager, _, _ := newTestController() manager, _, _ := newTestController()
ds1 := newDaemonSet("foo1") ds1 := newDaemonSet("foo1")
ds1.Spec.UpdateStrategy = *strategy
ds2 := newDaemonSet("foo2") ds2 := newDaemonSet("foo2")
ds2.Spec.UpdateStrategy = *strategy
ds3 := newDaemonSet("foo3") ds3 := newDaemonSet("foo3")
ds3.Spec.UpdateStrategy = *strategy
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2 ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
manager.dsStore.Add(ds1) manager.dsStore.Add(ds1)
manager.dsStore.Add(ds2) manager.dsStore.Add(ds2)
@ -1608,6 +1791,7 @@ func TestDeletePodOrphan(t *testing.T) {
t.Fatalf("queue.Len() = %v, want %v", got, want) t.Fatalf("queue.Len() = %v, want %v", got, want)
} }
} }
}
func bumpResourceVersion(obj metav1.Object) { func bumpResourceVersion(obj metav1.Object) {
ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32) ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32)