mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Update the scheduler to handle init containers
This commit is contained in:
parent
205a8b4574
commit
1b6591312d
@ -21,7 +21,7 @@ import "fmt"
|
|||||||
const (
|
const (
|
||||||
podCountResourceName string = "PodCount"
|
podCountResourceName string = "PodCount"
|
||||||
cpuResourceName string = "CPU"
|
cpuResourceName string = "CPU"
|
||||||
memoryResoureceName string = "Memory"
|
memoryResourceName string = "Memory"
|
||||||
nvidiaGpuResourceName string = "NvidiaGpu"
|
nvidiaGpuResourceName string = "NvidiaGpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -359,6 +359,16 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
|
|||||||
result.milliCPU += requests.Cpu().MilliValue()
|
result.milliCPU += requests.Cpu().MilliValue()
|
||||||
result.nvidiaGPU += requests.NvidiaGPU().Value()
|
result.nvidiaGPU += requests.NvidiaGPU().Value()
|
||||||
}
|
}
|
||||||
|
// take max_resource(sum_pod, any_init_container)
|
||||||
|
for _, container := range pod.Spec.InitContainers {
|
||||||
|
requests := container.Resources.Requests
|
||||||
|
if mem := requests.Memory().Value(); mem > result.memory {
|
||||||
|
result.memory = mem
|
||||||
|
}
|
||||||
|
if cpu := requests.Cpu().MilliValue(); cpu > result.milliCPU {
|
||||||
|
result.milliCPU = cpu
|
||||||
|
}
|
||||||
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,7 +438,7 @@ func PodFitsResources(pod *api.Pod, nodeInfo *schedulercache.NodeInfo) (bool, er
|
|||||||
}
|
}
|
||||||
if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory {
|
if totalMemory < podRequest.memory+nodeInfo.RequestedResource().Memory {
|
||||||
return false,
|
return false,
|
||||||
newInsufficientResourceError(memoryResoureceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
|
newInsufficientResourceError(memoryResourceName, podRequest.memory, nodeInfo.RequestedResource().Memory, totalMemory)
|
||||||
}
|
}
|
||||||
if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
if totalNvidiaGPU < podRequest.nvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
||||||
return false,
|
return false,
|
||||||
|
@ -111,6 +111,11 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newResourceInitPod(pod *api.Pod, usage ...resourceRequest) *api.Pod {
|
||||||
|
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
func TestPodFitsResources(t *testing.T) {
|
func TestPodFitsResources(t *testing.T) {
|
||||||
enoughPodsTests := []struct {
|
enoughPodsTests := []struct {
|
||||||
pod *api.Pod
|
pod *api.Pod
|
||||||
@ -135,6 +140,54 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
test: "too many resources fails",
|
test: "too many resources fails",
|
||||||
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
|
wErr: newInsufficientResourceError(cpuResourceName, 1, 10, 10),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to init container cpu",
|
||||||
|
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 3, memory: 1}, resourceRequest{milliCPU: 2, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 8, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to highest init container cpu",
|
||||||
|
wErr: newInsufficientResourceError(cpuResourceName, 3, 8, 10),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to init container memory",
|
||||||
|
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 3}, resourceRequest{milliCPU: 1, memory: 2}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "too many resources fails due to highest init container memory",
|
||||||
|
wErr: newInsufficientResourceError(memoryResourceName, 3, 19, 20),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: true,
|
||||||
|
test: "init container fits because it's the max, not sum, of containers and init containers",
|
||||||
|
wErr: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), resourceRequest{milliCPU: 1, memory: 1}, resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 9, memory: 19})),
|
||||||
|
fits: true,
|
||||||
|
test: "multiple init containers fit because it's the max, not sum, of containers and init containers",
|
||||||
|
wErr: nil,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||||
nodeInfo: schedulercache.NewNodeInfo(
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
@ -149,7 +202,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "one resources fits",
|
test: "one resources fits",
|
||||||
wErr: newInsufficientResourceError(memoryResoureceName, 2, 19, 20),
|
wErr: newInsufficientResourceError(memoryResourceName, 2, 19, 20),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
@ -159,6 +212,14 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
test: "equal edge case",
|
test: "equal edge case",
|
||||||
wErr: nil,
|
wErr: nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 4, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||||
|
fits: true,
|
||||||
|
test: "equal edge case for init container",
|
||||||
|
wErr: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range enoughPodsTests {
|
for _, test := range enoughPodsTests {
|
||||||
@ -205,6 +266,14 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
test: "even for equal edge case predicate fails when there's no space for additional pod",
|
test: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||||
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourceInitPod(newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), resourceRequest{milliCPU: 5, memory: 1}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(resourceRequest{milliCPU: 5, memory: 19})),
|
||||||
|
fits: false,
|
||||||
|
test: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||||
|
wErr: newInsufficientResourceError(podCountResourceName, 1, 1, 1),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, test := range notEnoughPodsTests {
|
for _, test := range notEnoughPodsTests {
|
||||||
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
|
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1)}}
|
||||||
|
Loading…
Reference in New Issue
Block a user