mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 21:12:07 +00:00
Merge pull request #127878 from AxeZhan/sidecar
[scheduler] calculate pod requests resources with sidecar containers
This commit is contained in:
commit
95612e7b3b
@ -675,19 +675,29 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
},
|
||||
}
|
||||
}
|
||||
newPodWithRestartableInitContainers := func() *v1.Pod {
|
||||
newPodWithRestartableInitContainers := func(request, sidecarRequest *v1.ResourceList) *v1.Pod {
|
||||
restartPolicyAlways := v1.ContainerRestartPolicyAlways
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "regular"},
|
||||
},
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
|
||||
container := v1.Container{Name: "regular"}
|
||||
if request != nil {
|
||||
container.Resources = v1.ResourceRequirements{
|
||||
Requests: *request,
|
||||
}
|
||||
}
|
||||
|
||||
sidecarContainer := v1.Container{
|
||||
Name: "restartable-init",
|
||||
RestartPolicy: &restartPolicyAlways,
|
||||
},
|
||||
},
|
||||
}
|
||||
if sidecarRequest != nil {
|
||||
sidecarContainer.Resources = v1.ResourceRequirements{
|
||||
Requests: *sidecarRequest,
|
||||
}
|
||||
}
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{container},
|
||||
InitContainers: []v1.Container{sidecarContainer},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -697,6 +707,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
enableSidecarContainers bool
|
||||
wantPreFilterStatus *framework.Status
|
||||
wantFilterStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
name: "allow pod without restartable init containers if sidecar containers is disabled",
|
||||
@ -704,7 +715,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "not allow pod with restartable init containers if sidecar containers is disabled",
|
||||
pod: newPodWithRestartableInitContainers(),
|
||||
pod: newPodWithRestartableInitContainers(nil, nil),
|
||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled"),
|
||||
},
|
||||
{
|
||||
@ -715,7 +726,24 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
{
|
||||
name: "allow pod with restartable init containers if sidecar containers is enabled",
|
||||
enableSidecarContainers: true,
|
||||
pod: newPodWithRestartableInitContainers(),
|
||||
pod: newPodWithRestartableInitContainers(nil, nil),
|
||||
},
|
||||
{
|
||||
name: "allow pod if the total requested resources do not exceed the node's allocatable resources",
|
||||
enableSidecarContainers: true,
|
||||
pod: newPodWithRestartableInitContainers(
|
||||
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "not allow pod if the total requested resources do exceed the node's allocatable resources",
|
||||
enableSidecarContainers: true,
|
||||
pod: newPodWithRestartableInitContainers(
|
||||
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(2, resource.DecimalSI)},
|
||||
),
|
||||
wantFilterStatus: framework.NewStatus(framework.Unschedulable, "Insufficient cpu"),
|
||||
},
|
||||
}
|
||||
|
||||
@ -724,7 +752,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(0, 0, 1, 0, 0, 0)}}
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(2, 0, 1, 0, 0, 0)}}
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
@ -735,15 +763,15 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
cycleState := framework.NewCycleState()
|
||||
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
|
||||
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
|
||||
t.Error("status does not match (-expected +actual):\n", diff)
|
||||
t.Error("prefilter status does not match (-expected +actual):\n", diff)
|
||||
}
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
return
|
||||
}
|
||||
|
||||
filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
|
||||
if !filterStatus.IsSuccess() {
|
||||
t.Error("status does not match (-expected +actual):\n- Success\n +\n", filterStatus.Code())
|
||||
if diff := cmp.Diff(test.wantFilterStatus, filterStatus); diff != "" {
|
||||
t.Error("filter status does not match (-expected +actual):\n", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -932,6 +960,52 @@ func TestFitScore(t *testing.T) {
|
||||
},
|
||||
runPreScore: false,
|
||||
},
|
||||
{
|
||||
name: "test case for ScoringStrategy MostAllocated with sidecar container",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 45}},
|
||||
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.MostAllocated,
|
||||
Resources: defaultResources,
|
||||
},
|
||||
},
|
||||
runPreScore: true,
|
||||
},
|
||||
{
|
||||
name: "test case for ScoringStrategy LeastAllocated with sidecar container",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 55}},
|
||||
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.LeastAllocated,
|
||||
Resources: defaultResources,
|
||||
},
|
||||
},
|
||||
runPreScore: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
@ -930,40 +930,24 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
|
||||
}
|
||||
|
||||
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
||||
var non0InitCPU, non0InitMem int64
|
||||
var non0CPU, non0Mem int64
|
||||
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
ContainerFn: func(requests v1.ResourceList, containerType podutil.ContainerType) {
|
||||
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&requests)
|
||||
switch containerType {
|
||||
case podutil.Containers:
|
||||
non0CPU += non0CPUReq
|
||||
non0Mem += non0MemReq
|
||||
case podutil.InitContainers:
|
||||
non0InitCPU = max(non0InitCPU, non0CPUReq)
|
||||
non0InitMem = max(non0InitMem, non0MemReq)
|
||||
}
|
||||
})
|
||||
|
||||
non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
|
||||
},
|
||||
})
|
||||
|
||||
non0CPU = max(non0CPU, non0InitCPU)
|
||||
non0Mem = max(non0Mem, non0InitMem)
|
||||
non0CPU := non0Requests[v1.ResourceCPU]
|
||||
non0Mem := non0Requests[v1.ResourceMemory]
|
||||
|
||||
// If Overhead is being utilized, add to the non-zero cpu/memory tracking for the pod. It has already been added
|
||||
// into ScalarResources since it is part of requests
|
||||
if pod.Spec.Overhead != nil {
|
||||
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
||||
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
||||
}
|
||||
|
||||
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
|
||||
non0Mem += pod.Spec.Overhead.Memory().Value()
|
||||
}
|
||||
}
|
||||
var res Resource
|
||||
res.Add(requests)
|
||||
return res, non0CPU, non0Mem
|
||||
return res, non0CPU.MilliValue(), non0Mem.Value()
|
||||
}
|
||||
|
||||
// updateUsedPorts updates the UsedPorts of NodeInfo.
|
||||
|
@ -1525,30 +1525,79 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
Name: "testpod",
|
||||
UID: types.UID("testpod"),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Resize: "",
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
},
|
||||
}
|
||||
|
||||
restartAlways := v1.ContainerRestartPolicyAlways
|
||||
|
||||
preparePod := func(pod v1.Pod,
|
||||
requests, allocatedResources,
|
||||
initRequests, initAllocatedResources,
|
||||
sidecarRequests, sidecarAllocatedResources *v1.ResourceList,
|
||||
resizeStatus v1.PodResizeStatus) v1.Pod {
|
||||
|
||||
if requests != nil {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers,
|
||||
v1.Container{
|
||||
Name: "c1",
|
||||
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Requests: *requests},
|
||||
})
|
||||
}
|
||||
if allocatedResources != nil {
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
|
||||
v1.ContainerStatus{
|
||||
Name: "c1",
|
||||
AllocatedResources: *allocatedResources,
|
||||
})
|
||||
}
|
||||
|
||||
if initRequests != nil {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
|
||||
v1.Container{
|
||||
Name: "i1",
|
||||
Resources: v1.ResourceRequirements{Requests: *initRequests},
|
||||
},
|
||||
)
|
||||
}
|
||||
if initAllocatedResources != nil {
|
||||
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
|
||||
v1.ContainerStatus{
|
||||
Name: "i1",
|
||||
AllocatedResources: *initAllocatedResources,
|
||||
})
|
||||
}
|
||||
|
||||
if sidecarRequests != nil {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
|
||||
v1.Container{
|
||||
Name: "s1",
|
||||
Resources: v1.ResourceRequirements{Requests: *sidecarRequests},
|
||||
RestartPolicy: &restartAlways,
|
||||
},
|
||||
)
|
||||
}
|
||||
if sidecarAllocatedResources != nil {
|
||||
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
|
||||
v1.ContainerStatus{
|
||||
Name: "s1",
|
||||
AllocatedResources: *sidecarAllocatedResources,
|
||||
})
|
||||
}
|
||||
|
||||
pod.Status.Resize = resizeStatus
|
||||
return pod
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
allocatedResources v1.ResourceList
|
||||
initRequests *v1.ResourceList
|
||||
initAllocatedResources *v1.ResourceList
|
||||
sidecarRequests *v1.ResourceList
|
||||
sidecarAllocatedResources *v1.ResourceList
|
||||
resizeStatus v1.PodResizeStatus
|
||||
expectedResource Resource
|
||||
expectedNon0CPU int64
|
||||
@ -1590,16 +1639,44 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with init container and no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
||||
expectedNon0CPU: cpu700m.MilliValue(),
|
||||
expectedNon0Mem: mem800M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with sider container and no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{
|
||||
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||
Memory: mem500M.Value() + mem800M.Value(),
|
||||
},
|
||||
expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value() + mem800M.Value(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pod := testpod.DeepCopy()
|
||||
pod.Spec.Containers[0].Resources.Requests = tt.requests
|
||||
pod.Status.ContainerStatuses[0].AllocatedResources = tt.allocatedResources
|
||||
pod.Status.Resize = tt.resizeStatus
|
||||
pod := preparePod(*testpod.DeepCopy(),
|
||||
&tt.requests, &tt.allocatedResources,
|
||||
tt.initRequests, tt.initAllocatedResources,
|
||||
tt.sidecarRequests, tt.sidecarAllocatedResources,
|
||||
tt.resizeStatus)
|
||||
|
||||
res, non0CPU, non0Mem := calculateResource(pod)
|
||||
res, non0CPU, non0Mem := calculateResource(&pod)
|
||||
if !reflect.DeepEqual(tt.expectedResource, res) {
|
||||
t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res)
|
||||
}
|
||||
|
@ -224,6 +224,12 @@ func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *Co
|
||||
return c
|
||||
}
|
||||
|
||||
// RestartPolicy sets the container's restartPolicy to the given restartPolicy.
|
||||
func (c *ContainerWrapper) RestartPolicy(restartPolicy v1.ContainerRestartPolicy) *ContainerWrapper {
|
||||
c.Container.RestartPolicy = &restartPolicy
|
||||
return c
|
||||
}
|
||||
|
||||
// PodWrapper wraps a Pod inside.
|
||||
type PodWrapper struct{ v1.Pod }
|
||||
|
||||
@ -701,6 +707,17 @@ func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// SidecarReq adds a new sidecar container to the inner pod with given resource map.
|
||||
func (p *PodWrapper) SidecarReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
if len(resMap) == 0 {
|
||||
return p
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("sidecar-con%d", len(p.Spec.InitContainers))
|
||||
p.Spec.InitContainers = append(p.Spec.InitContainers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).RestartPolicy(v1.ContainerRestartPolicyAlways).Resources(resMap).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
// PreemptionPolicy sets the give preemption policy to the inner pod.
|
||||
func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
||||
p.Spec.PreemptionPolicy = &policy
|
||||
|
@ -16,11 +16,6 @@ limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
// For each of these resources, a container that doesn't request the resource explicitly
|
||||
// will be treated as having requested the amount indicated below, for the purpose
|
||||
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
||||
@ -35,41 +30,3 @@ const (
|
||||
// DefaultMemoryRequest defines default memory request size.
|
||||
DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||
)
|
||||
|
||||
// GetNonzeroRequests returns the default cpu in milli-cpu and memory in bytes resource requests if none is found or
|
||||
// what is provided on the request.
|
||||
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||
cpu := GetRequestForResource(v1.ResourceCPU, requests, true)
|
||||
mem := GetRequestForResource(v1.ResourceMemory, requests, true)
|
||||
return cpu.MilliValue(), mem.Value()
|
||||
|
||||
}
|
||||
|
||||
// GetRequestForResource returns the requested values unless nonZero is true and there is no defined request
|
||||
// for CPU and memory.
|
||||
// If nonZero is true and the resource has no defined request for CPU or memory, it returns a default value.
|
||||
func GetRequestForResource(resourceName v1.ResourceName, requests *v1.ResourceList, nonZero bool) resource.Quantity {
|
||||
if requests == nil {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found && nonZero {
|
||||
return *resource.NewMilliQuantity(DefaultMilliCPURequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Cpu().DeepCopy()
|
||||
case v1.ResourceMemory:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found && nonZero {
|
||||
return *resource.NewQuantity(DefaultMemoryRequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Memory().DeepCopy()
|
||||
default:
|
||||
quantity, found := (*requests)[resourceName]
|
||||
if !found {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
return quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
@ -1,177 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func TestGetNonZeroRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
expectedCPU int64
|
||||
expectedMemory int64
|
||||
}{
|
||||
{
|
||||
"cpu_and_memory_not_found",
|
||||
v1.ResourceList{},
|
||||
DefaultMilliCPURequest,
|
||||
DefaultMemoryRequest,
|
||||
},
|
||||
{
|
||||
"only_cpu_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
},
|
||||
200,
|
||||
DefaultMemoryRequest,
|
||||
},
|
||||
{
|
||||
"only_memory_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
DefaultMilliCPURequest,
|
||||
400 * 1024 * 1024,
|
||||
},
|
||||
{
|
||||
"cpu_memory_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
200,
|
||||
400 * 1024 * 1024,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realCPU, realMemory := GetNonzeroRequests(&test.requests)
|
||||
assert.EqualValuesf(t, test.expectedCPU, realCPU, "Failed to test: %s", test.name)
|
||||
assert.EqualValuesf(t, test.expectedMemory, realMemory, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetRequestForResource(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
resource v1.ResourceName
|
||||
expectedQuantity int64
|
||||
nonZero bool
|
||||
}{
|
||||
{
|
||||
"extended_resource_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceName("intel.com/foo"),
|
||||
0,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"extended_resource_found",
|
||||
v1.ResourceList{
|
||||
v1.ResourceName("intel.com/foo"): resource.MustParse("4"),
|
||||
},
|
||||
v1.ResourceName("intel.com/foo"),
|
||||
4,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"cpu_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceCPU,
|
||||
DefaultMilliCPURequest,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"memory_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceMemory,
|
||||
DefaultMemoryRequest,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"cpu_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
},
|
||||
v1.ResourceCPU,
|
||||
200,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"memory_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
v1.ResourceMemory,
|
||||
400 * 1024 * 1024,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"ephemeralStorage_exist",
|
||||
v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("400Mi"),
|
||||
},
|
||||
v1.ResourceEphemeralStorage,
|
||||
400 * 1024 * 1024,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"ephemeralStorage_not_found",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceEphemeralStorage,
|
||||
0,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"cpu_not_found, useRequested is true",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceCPU,
|
||||
0,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"memory_not_found, useRequested is true",
|
||||
v1.ResourceList{},
|
||||
v1.ResourceMemory,
|
||||
0,
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
realQuantity := GetRequestForResource(test.resource, &test.requests, test.nonZero)
|
||||
var realQuantityI64 int64
|
||||
if test.resource == v1.ResourceCPU {
|
||||
realQuantityI64 = realQuantity.MilliValue()
|
||||
} else {
|
||||
realQuantityI64 = realQuantity.Value()
|
||||
}
|
||||
assert.EqualValuesf(t, test.expectedQuantity, realQuantityI64, "Failed to test: %s", test.name)
|
||||
})
|
||||
}
|
||||
}
|
@ -537,9 +537,10 @@ func getNonZeroRequests(pod *v1.Pod) Resource {
|
||||
result := Resource{}
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
cpu, memory := schedutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
result.MilliCPU += cpu
|
||||
result.Memory += memory
|
||||
cpu := getNonZeroRequestForResource(v1.ResourceCPU, &container.Resources.Requests)
|
||||
memory := getNonZeroRequestForResource(v1.ResourceMemory, &container.Resources.Requests)
|
||||
result.MilliCPU += cpu.MilliValue()
|
||||
result.Memory += memory.Value()
|
||||
}
|
||||
return result
|
||||
}
|
||||
@ -556,3 +557,31 @@ func addTaintToNode(ctx context.Context, cs clientset.Interface, nodeName string
|
||||
e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint)
|
||||
e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint)
|
||||
}
|
||||
|
||||
// getNonZeroRequestForResource returns the requested values,
|
||||
// if the resource has undefined request for CPU or memory, it returns a default value.
|
||||
func getNonZeroRequestForResource(resourceName v1.ResourceName, requests *v1.ResourceList) resource.Quantity {
|
||||
if requests == nil {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||
return *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Cpu().DeepCopy()
|
||||
case v1.ResourceMemory:
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||
return *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI)
|
||||
}
|
||||
return requests.Memory().DeepCopy()
|
||||
default:
|
||||
quantity, found := (*requests)[resourceName]
|
||||
if !found {
|
||||
return resource.Quantity{}
|
||||
}
|
||||
return quantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
@ -108,20 +108,17 @@ func initTestSchedulerForPriorityTest(t *testing.T, preScorePluginName, scorePlu
|
||||
return testCtx
|
||||
}
|
||||
|
||||
func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext {
|
||||
func initTestSchedulerForNodeResourcesTest(t *testing.T, strategy configv1.ScoringStrategyType) *testutils.TestContext {
|
||||
cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{
|
||||
Profiles: []configv1.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: pointer.String(v1.DefaultSchedulerName),
|
||||
},
|
||||
{
|
||||
SchedulerName: pointer.String("gpu-binpacking-scheduler"),
|
||||
PluginConfig: []configv1.PluginConfig{
|
||||
{
|
||||
Name: noderesources.Name,
|
||||
Args: runtime.RawExtension{Object: &configv1.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &configv1.ScoringStrategy{
|
||||
Type: configv1.MostAllocated,
|
||||
Type: strategy,
|
||||
Resources: []configv1.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
@ -147,63 +144,146 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
||||
// TestNodeResourcesScoring verifies that scheduler's node resources priority function
|
||||
// works correctly.
|
||||
func TestNodeResourcesScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
||||
// Add a few nodes.
|
||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
||||
tests := []struct {
|
||||
name string
|
||||
pod func(testCtx *testutils.TestContext) *v1.Pod
|
||||
existingPods func(testCtx *testutils.TestContext) []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
strategy configv1.ScoringStrategyType
|
||||
// expectedNodeName is the list of node names. The pod should be scheduled on either of them.
|
||||
expectedNodeName []string
|
||||
}{
|
||||
{
|
||||
name: "with least allocated strategy, take existing sidecars into consideration",
|
||||
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj()
|
||||
},
|
||||
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||
return []*v1.Pod{
|
||||
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-1").Node("node-1").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).
|
||||
SidecarReq(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "2G",
|
||||
}).
|
||||
Obj(),
|
||||
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-2").Node("node-2").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj(),
|
||||
}
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-1").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}), 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Res(
|
||||
}).Obj(),
|
||||
st.MakeNode().Name("node-2").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
},
|
||||
strategy: configv1.LeastAllocated,
|
||||
expectedNodeName: []string{"node-2"},
|
||||
},
|
||||
{
|
||||
name: "with most allocated strategy, take existing sidecars into consideration",
|
||||
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).Obj()
|
||||
},
|
||||
).Obj())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Res(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "1",
|
||||
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||
return []*v1.Pod{
|
||||
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-1").Node("node-1").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
}).
|
||||
SidecarReq(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "2G",
|
||||
resourceGPU: "2",
|
||||
},
|
||||
).Obj())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cpuBoundPod1.Spec.NodeName == "" || gpuBoundPod1.Spec.NodeName == "" {
|
||||
t.Fatalf("pods should have nodeName assigned, got %q and %q",
|
||||
cpuBoundPod1.Spec.NodeName, gpuBoundPod1.Spec.NodeName)
|
||||
}
|
||||
|
||||
// Since both pods used the default scheduler, then they should land on two different
|
||||
// nodes because the default configuration uses LeastAllocated.
|
||||
if cpuBoundPod1.Spec.NodeName == gpuBoundPod1.Spec.NodeName {
|
||||
t.Fatalf("pods should have landed on different nodes, both scheduled on %q",
|
||||
cpuBoundPod1.Spec.NodeName)
|
||||
}
|
||||
|
||||
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
||||
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Res(
|
||||
map[v1.ResourceName]string{
|
||||
}).
|
||||
Obj(),
|
||||
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-2").Node("node-2").
|
||||
Res(map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
},
|
||||
).Obj())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}).Obj(),
|
||||
}
|
||||
if cpuBoundPod2.Spec.NodeName != gpuBoundPod1.Spec.NodeName {
|
||||
t.Errorf("pods should have landed on the same node")
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-1").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
st.MakeNode().Name("node-2").Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}).Obj(),
|
||||
},
|
||||
strategy: configv1.MostAllocated,
|
||||
expectedNodeName: []string{"node-1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
testCtx := initTestSchedulerForNodeResourcesTest(t, tt.strategy)
|
||||
|
||||
for _, n := range tt.nodes {
|
||||
if _, err := createNode(testCtx.ClientSet, n); err != nil {
|
||||
t.Fatalf("failed to create node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := testutils.WaitForNodesInCache(testCtx.Ctx, testCtx.Scheduler, len(tt.nodes)); err != nil {
|
||||
t.Fatalf("failed to wait for nodes in cache: %v", err)
|
||||
}
|
||||
|
||||
if tt.existingPods != nil {
|
||||
for _, p := range tt.existingPods(testCtx) {
|
||||
if _, err := runPausePod(testCtx.ClientSet, p); err != nil {
|
||||
t.Fatalf("failed to create existing pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := runPausePod(testCtx.ClientSet, tt.pod(testCtx))
|
||||
if err != nil {
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
|
||||
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false, podScheduledIn(testCtx.ClientSet, pod.Namespace, pod.Name, tt.expectedNodeName))
|
||||
if err != nil {
|
||||
t.Errorf("Error while trying to wait for a pod to be scheduled: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user