mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Test both strategies in all daemonSet controller unit tests
This commit is contained in:
parent
d02f40a5e7
commit
d2cf00fcd6
@ -27,6 +27,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@ -119,6 +120,24 @@ func newDaemonSet(name string) *extensions.DaemonSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newRollbackStrategy() *extensions.DaemonSetUpdateStrategy {
|
||||||
|
one := intstr.FromInt(1)
|
||||||
|
return &extensions.DaemonSetUpdateStrategy{
|
||||||
|
Type: extensions.RollingUpdateDaemonSetStrategyType,
|
||||||
|
RollingUpdate: &extensions.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOnDeleteStrategy() *extensions.DaemonSetUpdateStrategy {
|
||||||
|
return &extensions.DaemonSetUpdateStrategy{
|
||||||
|
Type: extensions.OnDeleteDaemonSetStrategyType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateStrategies() []*extensions.DaemonSetUpdateStrategy {
|
||||||
|
return []*extensions.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||||
|
}
|
||||||
|
|
||||||
func newNode(name string, label map[string]string) *v1.Node {
|
func newNode(name string, label map[string]string) *v1.Node {
|
||||||
return &v1.Node{
|
return &v1.Node{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
TypeMeta: metav1.TypeMeta{APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||||
@ -334,15 +353,18 @@ func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensio
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
addNodes(manager.nodeStore, 0, 1, nil)
|
addNodes(manager.nodeStore, 0, 1, nil)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
// DeletedFinalStateUnknown should queue the embedded DS if found.
|
// DeletedFinalStateUnknown should queue the embedded DS if found.
|
||||||
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
|
manager.deleteDaemonset(cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
|
||||||
enqueuedKey, _ := manager.queue.Get()
|
enqueuedKey, _ := manager.queue.Get()
|
||||||
if enqueuedKey.(string) != "default/foo" {
|
if enqueuedKey.(string) != "default/foo" {
|
||||||
t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
|
t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func markPodsReady(store cache.Store) {
|
func markPodsReady(store cache.Store) {
|
||||||
@ -360,15 +382,20 @@ func markPodReady(pod *v1.Pod) {
|
|||||||
|
|
||||||
// DaemonSets without node selectors should launch pods on every node.
|
// DaemonSets without node selectors should launch pods on every node.
|
||||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, clientset := newTestController(ds)
|
manager, podControl, clientset := newTestController(ds)
|
||||||
|
|
||||||
var updated *extensions.DaemonSet
|
var updated *extensions.DaemonSet
|
||||||
@ -391,29 +418,38 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
|||||||
if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want {
|
if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want {
|
||||||
t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
|
t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should do nothing if there aren't any nodes
|
// DaemonSets should do nothing if there aren't any nodes
|
||||||
func TestNoNodesDoesNothing(t *testing.T) {
|
func TestNoNodesDoesNothing(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets without node selectors should launch on a single node in a
|
// DaemonSets without node selectors should launch on a single node in a
|
||||||
// single node cluster.
|
// single node cluster.
|
||||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
manager.nodeStore.Add(newNode("only-node", nil))
|
manager.nodeStore.Add(newNode("only-node", nil))
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should place onto NotReady nodes
|
// DaemonSets should place onto NotReady nodes
|
||||||
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("not-ready", nil)
|
node := newNode("not-ready", nil)
|
||||||
node.Status.Conditions = []v1.NodeCondition{
|
node.Status.Conditions = []v1.NodeCondition{
|
||||||
@ -422,17 +458,21 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
|||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should not place onto OutOfDisk nodes
|
// DaemonSets should not place onto OutOfDisk nodes
|
||||||
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("not-enough-disk", nil)
|
node := newNode("not-enough-disk", nil)
|
||||||
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||||
@ -456,8 +496,10 @@ func allocatableResources(memory, cpu string) v1.ResourceList {
|
|||||||
|
|
||||||
// DaemonSets should not place onto nodes with insufficient free resource
|
// DaemonSets should not place onto nodes with insufficient free resource
|
||||||
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("too-much-mem", nil)
|
node := newNode("too-much-mem", nil)
|
||||||
@ -468,13 +510,16 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
|||||||
})
|
})
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
|
// DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
|
||||||
func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
|
func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||||
podSpec.NodeName = "too-much-mem"
|
podSpec.NodeName = "too-much-mem"
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("too-much-mem", nil)
|
node := newNode("too-much-mem", nil)
|
||||||
@ -485,11 +530,14 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
|||||||
})
|
})
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("too-much-mem", nil)
|
node := newNode("too-much-mem", nil)
|
||||||
@ -501,12 +549,15 @@ func TestSufficientCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
|||||||
})
|
})
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should place onto nodes with sufficient free resource
|
// DaemonSets should place onto nodes with sufficient free resource
|
||||||
func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("not-too-much-mem", nil)
|
node := newNode("not-too-much-mem", nil)
|
||||||
@ -517,11 +568,14 @@ func TestSufficientCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
})
|
})
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition.
|
// DaemonSet should launch a pod on a node with taint NetworkUnavailable condition.
|
||||||
func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
|
func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("simple")
|
ds := newDaemonSet("simple")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
node := newNode("network-unavailable", nil)
|
node := newNode("network-unavailable", nil)
|
||||||
@ -532,12 +586,15 @@ func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets not take any actions when being deleted
|
// DaemonSets not take any actions when being deleted
|
||||||
func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
ds.DeletionTimestamp = &now
|
ds.DeletionTimestamp = &now
|
||||||
@ -550,11 +607,14 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
|||||||
})
|
})
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
|
func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
// Bare client says it IS deleted.
|
// Bare client says it IS deleted.
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
now := metav1.Now()
|
now := metav1.Now()
|
||||||
ds.DeletionTimestamp = &now
|
ds.DeletionTimestamp = &now
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
@ -570,10 +630,12 @@ func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
|
|||||||
manager.podStore.Add(pod)
|
manager.podStore.Add(pod)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should not place onto nodes that would cause port conflicts
|
// DaemonSets should not place onto nodes that would cause port conflicts
|
||||||
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := v1.PodSpec{
|
podSpec := v1.PodSpec{
|
||||||
NodeName: "port-conflict",
|
NodeName: "port-conflict",
|
||||||
Containers: []v1.Container{{
|
Containers: []v1.Container{{
|
||||||
@ -590,9 +652,11 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that if the node is already scheduled with a pod using a host port
|
// Test that if the node is already scheduled with a pod using a host port
|
||||||
@ -600,6 +664,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// Issue: https://github.com/kubernetes/kubernetes/issues/22309
|
// Issue: https://github.com/kubernetes/kubernetes/issues/22309
|
||||||
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := v1.PodSpec{
|
podSpec := v1.PodSpec{
|
||||||
NodeName: "port-conflict",
|
NodeName: "port-conflict",
|
||||||
Containers: []v1.Container{{
|
Containers: []v1.Container{{
|
||||||
@ -612,6 +677,7 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
|||||||
node := newNode("port-conflict", nil)
|
node := newNode("port-conflict", nil)
|
||||||
manager.nodeStore.Add(node)
|
manager.nodeStore.Add(node)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
manager.podStore.Add(&v1.Pod{
|
manager.podStore.Add(&v1.Pod{
|
||||||
@ -623,10 +689,12 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
|||||||
Spec: podSpec,
|
Spec: podSpec,
|
||||||
})
|
})
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should place onto nodes that would not cause port conflicts
|
// DaemonSets should place onto nodes that would not cause port conflicts
|
||||||
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec1 := v1.PodSpec{
|
podSpec1 := v1.PodSpec{
|
||||||
NodeName: "no-port-conflict",
|
NodeName: "no-port-conflict",
|
||||||
Containers: []v1.Container{{
|
Containers: []v1.Container{{
|
||||||
@ -644,6 +712,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec2
|
ds.Spec.Template.Spec = podSpec2
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
node := newNode("no-port-conflict", nil)
|
node := newNode("no-port-conflict", nil)
|
||||||
@ -653,6 +722,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
})
|
})
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSetController should not sync DaemonSets with empty pod selectors.
|
// DaemonSetController should not sync DaemonSets with empty pod selectors.
|
||||||
@ -668,7 +738,9 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
|||||||
// this case even though it's empty pod selector matches all pods. The DaemonSetController
|
// this case even though it's empty pod selector matches all pods. The DaemonSetController
|
||||||
// should detect this misconfiguration and choose not to sync the DaemonSet. We should
|
// should detect this misconfiguration and choose not to sync the DaemonSet. We should
|
||||||
// not observe a deletion of the pod on node1.
|
// not observe a deletion of the pod on node1.
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ls := metav1.LabelSelector{}
|
ls := metav1.LabelSelector{}
|
||||||
ds.Spec.Selector = &ls
|
ds.Spec.Selector = &ls
|
||||||
ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
|
ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
|
||||||
@ -688,11 +760,14 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||||
func TestDealsWithExistingPods(t *testing.T) {
|
func TestDealsWithExistingPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
@ -701,22 +776,28 @@ func TestDealsWithExistingPods(t *testing.T) {
|
|||||||
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5)
|
addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5)
|
||||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
|
addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 2, 5)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Daemon with node selector should launch pods on nodes matching selector.
|
// Daemon with node selector should launch pods on nodes matching selector.
|
||||||
func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
func TestSelectorDaemonLaunchesPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
daemon := newDaemonSet("foo")
|
daemon := newDaemonSet("foo")
|
||||||
|
daemon.Spec.UpdateStrategy = *strategy
|
||||||
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager, podControl, _ := newTestController(daemon)
|
manager, podControl, _ := newTestController(daemon)
|
||||||
addNodes(manager.nodeStore, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(daemon)
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
|
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
// Daemon with node selector should delete pods from nodes that do not satisfy selector.
|
||||||
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
@ -727,11 +808,14 @@ func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
|
|||||||
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1)
|
addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1)
|
||||||
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
|
addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 4)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
// DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
|
||||||
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
@ -746,42 +830,54 @@ func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
|
|||||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1)
|
addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1)
|
||||||
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
|
addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 3, 20)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node selector which does not match any node labels should not launch pods.
|
// DaemonSet with node selector which does not match any node labels should not launch pods.
|
||||||
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
func TestBadSelectorDaemonDoesNothing(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
addNodes(manager.nodeStore, 0, 4, nil)
|
addNodes(manager.nodeStore, 0, 4, nil)
|
||||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node name should launch pod on node with corresponding name.
|
// DaemonSet with node name should launch pod on node with corresponding name.
|
||||||
func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
func TestNameDaemonSetLaunchesPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeName = "node-0"
|
ds.Spec.Template.Spec.NodeName = "node-0"
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node name that does not exist should not launch pods.
|
// DaemonSet with node name that does not exist should not launch pods.
|
||||||
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
func TestBadNameDaemonSetDoesNothing(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeName = "node-10"
|
ds.Spec.Template.Spec.NodeName = "node-10"
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
addNodes(manager.nodeStore, 0, 5, nil)
|
addNodes(manager.nodeStore, 0, 5, nil)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
|
// DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
|
||||||
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
ds.Spec.Template.Spec.NodeName = "node-6"
|
ds.Spec.Template.Spec.NodeName = "node-6"
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
@ -789,11 +885,14 @@ func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
|
|||||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
|
// DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
|
||||||
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||||
ds.Spec.Template.Spec.NodeName = "node-0"
|
ds.Spec.Template.Spec.NodeName = "node-0"
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
@ -801,6 +900,7 @@ func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
|
|||||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes.
|
// DaemonSet with node selector, matching some nodes, should launch pods on all the nodes.
|
||||||
@ -816,7 +916,9 @@ func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
|
|||||||
|
|
||||||
// Daemon with node affinity should launch pods on nodes matching affinity.
|
// Daemon with node affinity should launch pods on nodes matching affinity.
|
||||||
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
daemon := newDaemonSet("foo")
|
daemon := newDaemonSet("foo")
|
||||||
|
daemon.Spec.UpdateStrategy = *strategy
|
||||||
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
|
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
@ -840,10 +942,13 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
|||||||
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
|
||||||
manager.dsStore.Add(daemon)
|
manager.dsStore.Add(daemon)
|
||||||
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
|
syncAndValidateDaemonSets(t, manager, daemon, podControl, 3, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNumberReadyStatus(t *testing.T) {
|
func TestNumberReadyStatus(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, clientset := newTestController(ds)
|
manager, podControl, clientset := newTestController(ds)
|
||||||
var updated *extensions.DaemonSet
|
var updated *extensions.DaemonSet
|
||||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
@ -876,10 +981,13 @@ func TestNumberReadyStatus(t *testing.T) {
|
|||||||
if updated.Status.NumberReady != 2 {
|
if updated.Status.NumberReady != 2 {
|
||||||
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestObservedGeneration(t *testing.T) {
|
func TestObservedGeneration(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Generation = 1
|
ds.Generation = 1
|
||||||
manager, podControl, clientset := newTestController(ds)
|
manager, podControl, clientset := newTestController(ds)
|
||||||
var updated *extensions.DaemonSet
|
var updated *extensions.DaemonSet
|
||||||
@ -901,6 +1009,7 @@ func TestObservedGeneration(t *testing.T) {
|
|||||||
if updated.Status.ObservedGeneration != ds.Generation {
|
if updated.Status.ObservedGeneration != ds.Generation {
|
||||||
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
|
t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet controller should kill all failed pods and create at most 1 pod on every node.
|
// DaemonSet controller should kill all failed pods and create at most 1 pod on every node.
|
||||||
@ -918,7 +1027,9 @@ func TestDaemonKillFailedPods(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Logf("test case: %s\n", test.test)
|
t.Logf("test case: %s\n", test.test)
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
addNodes(manager.nodeStore, 0, 1, nil)
|
addNodes(manager.nodeStore, 0, 1, nil)
|
||||||
@ -926,11 +1037,14 @@ func TestDaemonKillFailedPods(t *testing.T) {
|
|||||||
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods)
|
addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint.
|
// DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint.
|
||||||
func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) {
|
func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("untolerate")
|
ds := newDaemonSet("untolerate")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
node := newNode("tainted", nil)
|
node := newNode("tainted", nil)
|
||||||
@ -939,11 +1053,14 @@ func TestTaintedNodeDaemonDoesNotLaunchUntoleratePod(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint.
|
// DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint.
|
||||||
func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
|
func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("tolerate")
|
ds := newDaemonSet("tolerate")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
setDaemonSetToleration(ds, noScheduleTolerations)
|
setDaemonSetToleration(ds, noScheduleTolerations)
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
@ -953,11 +1070,14 @@ func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
|
// DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
|
||||||
func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
|
func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("simple")
|
ds := newDaemonSet("simple")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
node := newNode("tainted", nil)
|
node := newNode("tainted", nil)
|
||||||
@ -969,11 +1089,14 @@ func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute.
|
// DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute.
|
||||||
func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
|
func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("simple")
|
ds := newDaemonSet("simple")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
node := newNode("tainted", nil)
|
node := newNode("tainted", nil)
|
||||||
@ -985,11 +1108,14 @@ func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a pod on an untainted node when the pod has tolerations.
|
// DaemonSet should launch a pod on an untainted node when the pod has tolerations.
|
||||||
func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
|
func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("tolerate")
|
ds := newDaemonSet("tolerate")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
setDaemonSetToleration(ds, noScheduleTolerations)
|
setDaemonSetToleration(ds, noScheduleTolerations)
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
@ -998,6 +1124,7 @@ func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
|
|||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
|
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setNodeTaint(node *v1.Node, taints []v1.Taint) {
|
func setNodeTaint(node *v1.Node, taints []v1.Taint) {
|
||||||
@ -1010,7 +1137,9 @@ func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleratio
|
|||||||
|
|
||||||
// DaemonSet should launch a critical pod even when the node is OutOfDisk.
|
// DaemonSet should launch a critical pod even when the node is OutOfDisk.
|
||||||
func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
setDaemonSetCritical(ds)
|
setDaemonSetCritical(ds)
|
||||||
manager, podControl, _ := newTestController(ds)
|
manager, podControl, _ := newTestController(ds)
|
||||||
|
|
||||||
@ -1026,12 +1155,15 @@ func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
|||||||
// Enabling critical pod annotation feature gate should create critical pod
|
// Enabling critical pod annotation feature gate should create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSet should launch a critical pod even when the node has insufficient free resource.
|
// DaemonSet should launch a critical pod even when the node has insufficient free resource.
|
||||||
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
setDaemonSetCritical(ds)
|
setDaemonSetCritical(ds)
|
||||||
|
|
||||||
@ -1051,10 +1183,12 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
|||||||
// Enabling critical pod annotation feature gate should create critical pod
|
// Enabling critical pod annotation feature gate should create critical pod
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should NOT launch a critical pod when there are port conflicts.
|
// DaemonSets should NOT launch a critical pod when there are port conflicts.
|
||||||
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
podSpec := v1.PodSpec{
|
podSpec := v1.PodSpec{
|
||||||
NodeName: "port-conflict",
|
NodeName: "port-conflict",
|
||||||
Containers: []v1.Container{{
|
Containers: []v1.Container{{
|
||||||
@ -1072,10 +1206,12 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
|||||||
|
|
||||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
||||||
ds := newDaemonSet("critical")
|
ds := newDaemonSet("critical")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec = podSpec
|
ds.Spec.Template.Spec = podSpec
|
||||||
setDaemonSetCritical(ds)
|
setDaemonSetCritical(ds)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setDaemonSetCritical(ds *extensions.DaemonSet) {
|
func setDaemonSetCritical(ds *extensions.DaemonSet) {
|
||||||
@ -1177,6 +1313,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
node := newNode("test-node", nil)
|
node := newNode("test-node", nil)
|
||||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
@ -1185,6 +1322,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
manager.podStore.Add(p)
|
manager.podStore.Add(p)
|
||||||
p.Spec.NodeName = "test-node"
|
p.Spec.NodeName = "test-node"
|
||||||
}
|
}
|
||||||
|
c.ds.Spec.UpdateStrategy = *strategy
|
||||||
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
wantToRun, shouldSchedule, shouldContinueRunning, err := manager.nodeShouldRunDaemonPod(node, c.ds)
|
||||||
|
|
||||||
if wantToRun != c.wantToRun {
|
if wantToRun != c.wantToRun {
|
||||||
@ -1200,6 +1338,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
t.Errorf("[%v] expected err: %v, got: %v", i, c.err, err)
|
t.Errorf("[%v] expected err: %v, got: %v", i, c.err, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DaemonSets should be resynced when node labels or taints changed
|
// DaemonSets should be resynced when node labels or taints changed
|
||||||
@ -1285,8 +1424,10 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, podControl, _ := newTestController()
|
manager, podControl, _ := newTestController()
|
||||||
manager.nodeStore.Add(c.oldNode)
|
manager.nodeStore.Add(c.oldNode)
|
||||||
|
c.ds.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(c.ds)
|
manager.dsStore.Add(c.ds)
|
||||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0)
|
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0)
|
||||||
|
|
||||||
@ -1302,11 +1443,15 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued)
|
t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetNodesToDaemonPods(t *testing.T) {
|
func TestGetNodesToDaemonPods(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager, _, _ := newTestController(ds, ds2)
|
manager, _, _ := newTestController(ds, ds2)
|
||||||
manager.dsStore.Add(ds)
|
manager.dsStore.Add(ds)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
@ -1364,6 +1509,7 @@ func TestGetNodesToDaemonPods(t *testing.T) {
|
|||||||
for podName := range gotPods {
|
for podName := range gotPods {
|
||||||
t.Errorf("unexpected pod %v was returned", podName)
|
t.Errorf("unexpected pod %v was returned", podName)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddNode(t *testing.T) {
|
func TestAddNode(t *testing.T) {
|
||||||
@ -1390,9 +1536,12 @@ func TestAddNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAddPod(t *testing.T) {
|
func TestAddPod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
|
|
||||||
@ -1423,13 +1572,18 @@ func TestAddPod(t *testing.T) {
|
|||||||
if got, want := key.(string), expectedKey; got != want {
|
if got, want := key.(string), expectedKey; got != want {
|
||||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAddPodOrphan(t *testing.T) {
|
func TestAddPodOrphan(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
ds3 := newDaemonSet("foo3")
|
ds3 := newDaemonSet("foo3")
|
||||||
|
ds3.Spec.UpdateStrategy = *strategy
|
||||||
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
|
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
@ -1444,12 +1598,16 @@ func TestAddPodOrphan(t *testing.T) {
|
|||||||
if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
|
if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
|
||||||
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
|
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePod(t *testing.T) {
|
func TestUpdatePod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
|
|
||||||
@ -1484,12 +1642,16 @@ func TestUpdatePod(t *testing.T) {
|
|||||||
if got, want := key.(string), expectedKey; got != want {
|
if got, want := key.(string), expectedKey; got != want {
|
||||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePodOrphanSameLabels(t *testing.T) {
|
func TestUpdatePodOrphanSameLabels(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
|
|
||||||
@ -1500,12 +1662,16 @@ func TestUpdatePodOrphanSameLabels(t *testing.T) {
|
|||||||
if got, want := manager.queue.Len(), 0; got != want {
|
if got, want := manager.queue.Len(), 0; got != want {
|
||||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
|
|
||||||
@ -1520,9 +1686,13 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
|
|||||||
if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
|
if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
|
||||||
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
|
t.Errorf("getQueuedKeys() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePodChangeControllerRef(t *testing.T) {
|
func TestUpdatePodChangeControllerRef(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
|
ds := newDaemonSet("foo")
|
||||||
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
@ -1537,12 +1707,16 @@ func TestUpdatePodChangeControllerRef(t *testing.T) {
|
|||||||
if got, want := manager.queue.Len(), 2; got != want {
|
if got, want := manager.queue.Len(), 2; got != want {
|
||||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdatePodControllerRefRemoved(t *testing.T) {
|
func TestUpdatePodControllerRefRemoved(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
|
|
||||||
@ -1554,12 +1728,16 @@ func TestUpdatePodControllerRefRemoved(t *testing.T) {
|
|||||||
if got, want := manager.queue.Len(), 2; got != want {
|
if got, want := manager.queue.Len(), 2; got != want {
|
||||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletePod(t *testing.T) {
|
func TestDeletePod(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
|
|
||||||
@ -1590,13 +1768,18 @@ func TestDeletePod(t *testing.T) {
|
|||||||
if got, want := key.(string), expectedKey; got != want {
|
if got, want := key.(string), expectedKey; got != want {
|
||||||
t.Errorf("queue.Get() = %v, want %v", got, want)
|
t.Errorf("queue.Get() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDeletePodOrphan(t *testing.T) {
|
func TestDeletePodOrphan(t *testing.T) {
|
||||||
|
for _, strategy := range updateStrategies() {
|
||||||
manager, _, _ := newTestController()
|
manager, _, _ := newTestController()
|
||||||
ds1 := newDaemonSet("foo1")
|
ds1 := newDaemonSet("foo1")
|
||||||
|
ds1.Spec.UpdateStrategy = *strategy
|
||||||
ds2 := newDaemonSet("foo2")
|
ds2 := newDaemonSet("foo2")
|
||||||
|
ds2.Spec.UpdateStrategy = *strategy
|
||||||
ds3 := newDaemonSet("foo3")
|
ds3 := newDaemonSet("foo3")
|
||||||
|
ds3.Spec.UpdateStrategy = *strategy
|
||||||
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
|
ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
|
||||||
manager.dsStore.Add(ds1)
|
manager.dsStore.Add(ds1)
|
||||||
manager.dsStore.Add(ds2)
|
manager.dsStore.Add(ds2)
|
||||||
@ -1607,6 +1790,7 @@ func TestDeletePodOrphan(t *testing.T) {
|
|||||||
if got, want := manager.queue.Len(), 0; got != want {
|
if got, want := manager.queue.Len(), 0; got != want {
|
||||||
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
t.Fatalf("queue.Len() = %v, want %v", got, want)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func bumpResourceVersion(obj metav1.Object) {
|
func bumpResourceVersion(obj metav1.Object) {
|
||||||
|
Loading…
Reference in New Issue
Block a user