mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-10-08 14:29:45 +00:00
daemonset: apply predicated when placing daemon pods
This commit is contained in:
@@ -22,6 +22,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
@@ -200,17 +201,127 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
// DaemonSets should not place onto Unschedulable nodes
|
||||
func TestUnschedulableNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
// DaemonSets should not place onto OutOfDisk nodes
|
||||
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("not-ready", nil)
|
||||
node.Spec.Unschedulable = true
|
||||
node := newNode("not-enough-disk", nil)
|
||||
node.Status.Conditions = []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}
|
||||
manager.nodeStore.Add(node)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
// DaemonSets should not place onto nodes with insufficient free resource
|
||||
func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
NodeName: "too-much-mem",
|
||||
Containers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("75M"),
|
||||
api.ResourceCPU: resource.MustParse("75m"),
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("100M"),
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Add(&api.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
// DaemonSets should place onto nodes with sufficient free resource
|
||||
func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
NodeName: "not-too-much-mem",
|
||||
Containers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("75M"),
|
||||
api.ResourceCPU: resource.MustParse("75m"),
|
||||
},
|
||||
},
|
||||
}},
|
||||
}
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("200M"),
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Add(&api.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||
}
|
||||
|
||||
// DaemonSets should not place onto nodes that would cause port conflicts
|
||||
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
NodeName: "port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Add(&api.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
// DaemonSets should place onto nodes that would not cause port conflicts
|
||||
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
podSpec1 := api.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
HostPort: 6661,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
podSpec2 := api.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
HostPort: 6662,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("no-port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Add(&api.Pod{
|
||||
Spec: podSpec1,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec2
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
|
||||
}
|
||||
|
||||
// Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
|
||||
func TestDealsWithExistingPods(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
|
Reference in New Issue
Block a user