From 1b6591312df88a1806d734d191ab84056a92c18f Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Fri, 8 Apr 2016 11:20:24 -0400 Subject: [PATCH] Update the scheduler to handle init containers --- .../scheduler/algorithm/predicates/error.go | 2 +- .../algorithm/predicates/predicates.go | 12 +++- .../algorithm/predicates/predicates_test.go | 71 ++++++++++++++++++- 3 files changed, 82 insertions(+), 3 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/error.go b/plugin/pkg/scheduler/algorithm/predicates/error.go index 9f6a0d1bb39..b95cc4eb2f7 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/error.go +++ b/plugin/pkg/scheduler/algorithm/predicates/error.go @@ -21,7 +21,7 @@ import "fmt" const ( podCountResourceName string = "PodCount" cpuResourceName string = "CPU" - memoryResoureceName string = "Memory" + memoryResourceName string = "Memory" nvidiaGpuResourceName string = "NvidiaGpu" ) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 4b1c8c9b5d9..a01876c3fca 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -359,6 +359,16 @@ func getResourceRequest(pod *api.Pod) resourceRequest { result.milliCPU += requests.Cpu().MilliValue() result.nvidiaGPU += requests.NvidiaGPU().Value() } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + requests := container.Resources.Requests + if mem := requests.Memory().Value(); mem > result.memory { + result.memory = mem + } + if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU { + result.milliCPU = cpu + } + } return result } @@ -428,7 +438,7 @@ func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, er } if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory { return false, - newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory) + newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory) } if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { return false, diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index a2e7e691c37..358a483909e 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -111,6 +111,11 @@ func newResourcePod(usage ...resourceRequest) *api.Pod { } } +func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod { + pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers + return pod +} + func TestPodFitsResources(t *testing.T) { enoughPodsTests := []struct { pod *api.Pod @@ -135,6 +140,54 @@ func TestPodFitsResources(t *testing.T) { test: "too many resources fails", wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10), }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 8, memory: 19})), + fits: false, + test: "too many resources fails due to init container cpu", + wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 8, memory: 19})), + fits: false, + test: "too many resources fails due to highest init container cpu", + wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: false, + test: "too many resources fails due to init container memory", + wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: false, + test: "too many resources fails due to highest init container memory", + wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20), + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: true, + test: "init container fits because it's the max, not sum, of containers and init containers", + wErr: nil, + }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 9, memory: 19})), + fits: true, + test: "multiple init containers fit because it's the max, not sum, of containers and init containers", + wErr: nil, + }, { pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), nodeInfo: schedulercache.NewNodeInfo( @@ -149,7 +202,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), fits: false, test: "one resources fits", - wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20), + wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20), }, { pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), @@ -159,6 +212,14 @@ func TestPodFitsResources(t *testing.T) { test: "equal edge case", wErr: nil, }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), + fits: true, + test: "equal edge case for init container", + wErr: nil, + }, } for _, test := range enoughPodsTests { @@ -205,6 +266,14 @@ func TestPodFitsResources(t *testing.T) { test: "even for equal edge case predicate fails when there's no space for additional pod", wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), }, + { + pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(resourceRequest{milliCPU: 5, memory: 19})), + fits: false, + test: "even for equal edge case predicate fails when there's no space for additional pod due to init container", + wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1), + }, } for _, test := range notEnoughPodsTests { node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}