Update the scheduler to handle init containers

This commit is contained in:
Clayton Coleman 2016-04-08 11:20:24 -04:00
parent 205a8b4574
commit 1b6591312d
No known key found for this signature in database
GPG Key ID: 3D16906B4F1C5CB3
3 changed files with 82 additions and 3 deletions

View File

@ -21,7 +21,7 @@ import "fmt"
const (
podCountResourceName string = "PodCount"
cpuResourceName string = "CPU"
memoryResoureceName string = "Memory"
memoryResourceName string = "Memory"
nvidiaGpuResourceName string = "NvidiaGpu"
)

View File

@ -359,6 +359,16 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
result.milliCPU += requests.Cpu().MilliValue()
result.nvidiaGPU += requests.NvidiaGPU().Value()
}
// take max_resource(sum_pod, any_init_container)
for _, container := range pod.Spec.InitContainers {
requests := container.Resources.Requests
if mem := requests.Memory().Value(); mem > result.memory {
result.memory = mem
}
if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU {
result.milliCPU = cpu
}
}
return result
}
@ -428,7 +438,7 @@ func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, er
}
if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory {
return false,
newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
}
if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
return false,

View File

@ -111,6 +111,11 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
}
}
func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod {
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
return pod
}
func TestPodFitsResources(t *testing.T) {
enoughPodsTests := []struct {
pod *api.Pod
@ -135,6 +140,54 @@ func TestPodFitsResources(t *testing.T) {
test: "too many resources fails",
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
fits: false,
test: "too many resources fails due to init container cpu",
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
fits: false,
test: "too many resources fails due to highest init container cpu",
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
fits: false,
test: "too many resources fails due to init container memory",
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
fits: false,
test: "too many resources fails due to highest init container memory",
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
fits: true,
test: "init container fits because it's the max, not sum, of containers and init containers",
wErr: nil,
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
fits: true,
test: "multiple init containers fit because it's the max, not sum, of containers and init containers",
wErr: nil,
},
{
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
@ -149,7 +202,7 @@ func TestPodFitsResources(t *testing.T) {
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
fits: false,
test: "one resources fits",
wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20),
wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20),
},
{
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
@ -159,6 +212,14 @@ func TestPodFitsResources(t *testing.T) {
test: "equal edge case",
wErr: nil,
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
fits: true,
test: "equal edge case for init container",
wErr: nil,
},
}
for _, test := range enoughPodsTests {
@ -205,6 +266,14 @@ func TestPodFitsResources(t *testing.T) {
test: "even for equal edge case predicate fails when there's no space for additional pod",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
},
{
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
fits: false,
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
},
}
for _, test := range notEnoughPodsTests {
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}