mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 21:12:07 +00:00
Merge pull request #127878 from AxeZhan/sidecar
[scheduler] calculate pod requests resources with sidecar containers
This commit is contained in:
commit
95612e7b3b
@ -675,19 +675,29 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
newPodWithRestartableInitContainers := func() *v1.Pod {
|
newPodWithRestartableInitContainers := func(request, sidecarRequest *v1.ResourceList) *v1.Pod {
|
||||||
restartPolicyAlways := v1.ContainerRestartPolicyAlways
|
restartPolicyAlways := v1.ContainerRestartPolicyAlways
|
||||||
|
|
||||||
|
container := v1.Container{Name: "regular"}
|
||||||
|
if request != nil {
|
||||||
|
container.Resources = v1.ResourceRequirements{
|
||||||
|
Requests: *request,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecarContainer := v1.Container{
|
||||||
|
Name: "restartable-init",
|
||||||
|
RestartPolicy: &restartPolicyAlways,
|
||||||
|
}
|
||||||
|
if sidecarRequest != nil {
|
||||||
|
sidecarContainer.Resources = v1.ResourceRequirements{
|
||||||
|
Requests: *sidecarRequest,
|
||||||
|
}
|
||||||
|
}
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{container},
|
||||||
{Name: "regular"},
|
InitContainers: []v1.Container{sidecarContainer},
|
||||||
},
|
|
||||||
InitContainers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "restartable-init",
|
|
||||||
RestartPolicy: &restartPolicyAlways,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -697,6 +707,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
enableSidecarContainers bool
|
enableSidecarContainers bool
|
||||||
wantPreFilterStatus *framework.Status
|
wantPreFilterStatus *framework.Status
|
||||||
|
wantFilterStatus *framework.Status
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "allow pod without restartable init containers if sidecar containers is disabled",
|
name: "allow pod without restartable init containers if sidecar containers is disabled",
|
||||||
@ -704,7 +715,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "not allow pod with restartable init containers if sidecar containers is disabled",
|
name: "not allow pod with restartable init containers if sidecar containers is disabled",
|
||||||
pod: newPodWithRestartableInitContainers(),
|
pod: newPodWithRestartableInitContainers(nil, nil),
|
||||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled"),
|
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -715,7 +726,24 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "allow pod with restartable init containers if sidecar containers is enabled",
|
name: "allow pod with restartable init containers if sidecar containers is enabled",
|
||||||
enableSidecarContainers: true,
|
enableSidecarContainers: true,
|
||||||
pod: newPodWithRestartableInitContainers(),
|
pod: newPodWithRestartableInitContainers(nil, nil),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "allow pod if the total requested resources do not exceed the node's allocatable resources",
|
||||||
|
enableSidecarContainers: true,
|
||||||
|
pod: newPodWithRestartableInitContainers(
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not allow pod if the total requested resources do exceed the node's allocatable resources",
|
||||||
|
enableSidecarContainers: true,
|
||||||
|
pod: newPodWithRestartableInitContainers(
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(2, resource.DecimalSI)},
|
||||||
|
),
|
||||||
|
wantFilterStatus: framework.NewStatus(framework.Unschedulable, "Insufficient cpu"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -724,7 +752,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(0, 0, 1, 0, 0, 0)}}
|
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(2, 0, 1, 0, 0, 0)}}
|
||||||
nodeInfo := framework.NewNodeInfo()
|
nodeInfo := framework.NewNodeInfo()
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
|
|
||||||
@ -735,15 +763,15 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
cycleState := framework.NewCycleState()
|
cycleState := framework.NewCycleState()
|
||||||
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
|
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
|
||||||
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
|
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
|
||||||
t.Error("status does not match (-expected +actual):\n", diff)
|
t.Error("prefilter status does not match (-expected +actual):\n", diff)
|
||||||
}
|
}
|
||||||
if !preFilterStatus.IsSuccess() {
|
if !preFilterStatus.IsSuccess() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
|
filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
|
||||||
if !filterStatus.IsSuccess() {
|
if diff := cmp.Diff(test.wantFilterStatus, filterStatus); diff != "" {
|
||||||
t.Error("status does not match (-expected +actual):\n- Success\n +\n", filterStatus.Code())
|
t.Error("filter status does not match (-expected +actual):\n", diff)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -932,6 +960,52 @@ func TestFitScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
runPreScore: false,
|
runPreScore: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "test case for ScoringStrategy MostAllocated with sidecar container",
|
||||||
|
requestedPod: st.MakePod().
|
||||||
|
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
Obj(),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
},
|
||||||
|
existingPods: []*v1.Pod{
|
||||||
|
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
},
|
||||||
|
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 45}},
|
||||||
|
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||||
|
ScoringStrategy: &config.ScoringStrategy{
|
||||||
|
Type: config.MostAllocated,
|
||||||
|
Resources: defaultResources,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
runPreScore: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "test case for ScoringStrategy LeastAllocated with sidecar container",
|
||||||
|
requestedPod: st.MakePod().
|
||||||
|
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
Obj(),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
},
|
||||||
|
existingPods: []*v1.Pod{
|
||||||
|
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
},
|
||||||
|
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 55}},
|
||||||
|
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||||
|
ScoringStrategy: &config.ScoringStrategy{
|
||||||
|
Type: config.LeastAllocated,
|
||||||
|
Resources: defaultResources,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
runPreScore: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@ -32,7 +32,7 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
|
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||||
@ -930,40 +930,24 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
||||||
var non0InitCPU, non0InitMem int64
|
|
||||||
var non0CPU, non0Mem int64
|
|
||||||
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
||||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||||
ContainerFn: func(requests v1.ResourceList, containerType podutil.ContainerType) {
|
})
|
||||||
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&requests)
|
|
||||||
switch containerType {
|
non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
||||||
case podutil.Containers:
|
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||||
non0CPU += non0CPUReq
|
NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
|
||||||
non0Mem += non0MemReq
|
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
|
||||||
case podutil.InitContainers:
|
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
|
||||||
non0InitCPU = max(non0InitCPU, non0CPUReq)
|
|
||||||
non0InitMem = max(non0InitMem, non0MemReq)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
non0CPU = max(non0CPU, non0InitCPU)
|
non0CPU := non0Requests[v1.ResourceCPU]
|
||||||
non0Mem = max(non0Mem, non0InitMem)
|
non0Mem := non0Requests[v1.ResourceMemory]
|
||||||
|
|
||||||
// If Overhead is being utilized, add to the non-zero cpu/memory tracking for the pod. It has already been added
|
|
||||||
// into ScalarResources since it is part of requests
|
|
||||||
if pod.Spec.Overhead != nil {
|
|
||||||
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
|
||||||
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
|
|
||||||
non0Mem += pod.Spec.Overhead.Memory().Value()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var res Resource
|
var res Resource
|
||||||
res.Add(requests)
|
res.Add(requests)
|
||||||
return res, non0CPU, non0Mem
|
return res, non0CPU.MilliValue(), non0Mem.Value()
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateUsedPorts updates the UsedPorts of NodeInfo.
|
// updateUsedPorts updates the UsedPorts of NodeInfo.
|
||||||
|
@ -1525,34 +1525,83 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
Name: "testpod",
|
Name: "testpod",
|
||||||
UID: types.UID("testpod"),
|
UID: types.UID("testpod"),
|
||||||
},
|
},
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Status: v1.PodStatus{
|
Status: v1.PodStatus{
|
||||||
Phase: v1.PodRunning,
|
Phase: v1.PodRunning,
|
||||||
Resize: "",
|
|
||||||
ContainerStatuses: []v1.ContainerStatus{
|
|
||||||
{
|
|
||||||
Name: "c1",
|
|
||||||
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
restartAlways := v1.ContainerRestartPolicyAlways
|
||||||
|
|
||||||
|
preparePod := func(pod v1.Pod,
|
||||||
|
requests, allocatedResources,
|
||||||
|
initRequests, initAllocatedResources,
|
||||||
|
sidecarRequests, sidecarAllocatedResources *v1.ResourceList,
|
||||||
|
resizeStatus v1.PodResizeStatus) v1.Pod {
|
||||||
|
|
||||||
|
if requests != nil {
|
||||||
|
pod.Spec.Containers = append(pod.Spec.Containers,
|
||||||
|
v1.Container{
|
||||||
|
Name: "c1",
|
||||||
|
Resources: v1.ResourceRequirements{Requests: *requests},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if allocatedResources != nil {
|
||||||
|
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
|
||||||
|
v1.ContainerStatus{
|
||||||
|
Name: "c1",
|
||||||
|
AllocatedResources: *allocatedResources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if initRequests != nil {
|
||||||
|
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
|
||||||
|
v1.Container{
|
||||||
|
Name: "i1",
|
||||||
|
Resources: v1.ResourceRequirements{Requests: *initRequests},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if initAllocatedResources != nil {
|
||||||
|
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
|
||||||
|
v1.ContainerStatus{
|
||||||
|
Name: "i1",
|
||||||
|
AllocatedResources: *initAllocatedResources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if sidecarRequests != nil {
|
||||||
|
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
|
||||||
|
v1.Container{
|
||||||
|
Name: "s1",
|
||||||
|
Resources: v1.ResourceRequirements{Requests: *sidecarRequests},
|
||||||
|
RestartPolicy: &restartAlways,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if sidecarAllocatedResources != nil {
|
||||||
|
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
|
||||||
|
v1.ContainerStatus{
|
||||||
|
Name: "s1",
|
||||||
|
AllocatedResources: *sidecarAllocatedResources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pod.Status.Resize = resizeStatus
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
requests v1.ResourceList
|
requests v1.ResourceList
|
||||||
allocatedResources v1.ResourceList
|
allocatedResources v1.ResourceList
|
||||||
resizeStatus v1.PodResizeStatus
|
initRequests *v1.ResourceList
|
||||||
expectedResource Resource
|
initAllocatedResources *v1.ResourceList
|
||||||
expectedNon0CPU int64
|
sidecarRequests *v1.ResourceList
|
||||||
expectedNon0Mem int64
|
sidecarAllocatedResources *v1.ResourceList
|
||||||
|
resizeStatus v1.PodResizeStatus
|
||||||
|
expectedResource Resource
|
||||||
|
expectedNon0CPU int64
|
||||||
|
expectedNon0Mem int64
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Pod with no pending resize",
|
name: "Pod with no pending resize",
|
||||||
@ -1590,16 +1639,44 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
expectedNon0CPU: cpu500m.MilliValue(),
|
||||||
expectedNon0Mem: mem500M.Value(),
|
expectedNon0Mem: mem500M.Value(),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Pod with init container and no pending resize",
|
||||||
|
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
|
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
|
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
|
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
|
resizeStatus: "",
|
||||||
|
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
||||||
|
expectedNon0CPU: cpu700m.MilliValue(),
|
||||||
|
expectedNon0Mem: mem800M.Value(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Pod with sider container and no pending resize",
|
||||||
|
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
|
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
|
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
|
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
|
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
|
sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
|
resizeStatus: "",
|
||||||
|
expectedResource: Resource{
|
||||||
|
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||||
|
Memory: mem500M.Value() + mem800M.Value(),
|
||||||
|
},
|
||||||
|
expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||||
|
expectedNon0Mem: mem500M.Value() + mem800M.Value(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
pod := testpod.DeepCopy()
|
pod := preparePod(*testpod.DeepCopy(),
|
||||||
pod.Spec.Containers[0].Resources.Requests = tt.requests
|
&tt.requests, &tt.allocatedResources,
|
||||||
pod.Status.ContainerStatuses[0].AllocatedResources = tt.allocatedResources
|
tt.initRequests, tt.initAllocatedResources,
|
||||||
pod.Status.Resize = tt.resizeStatus
|
tt.sidecarRequests, tt.sidecarAllocatedResources,
|
||||||
|
tt.resizeStatus)
|
||||||
|
|
||||||
res, non0CPU, non0Mem := calculateResource(pod)
|
res, non0CPU, non0Mem := calculateResource(&pod)
|
||||||
if !reflect.DeepEqual(tt.expectedResource, res) {
|
if !reflect.DeepEqual(tt.expectedResource, res) {
|
||||||
t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res)
|
t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res)
|
||||||
}
|
}
|
||||||
|
@ -224,6 +224,12 @@ func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *Co
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestartPolicy sets the container's restartPolicy to the given restartPolicy.
|
||||||
|
func (c *ContainerWrapper) RestartPolicy(restartPolicy v1.ContainerRestartPolicy) *ContainerWrapper {
|
||||||
|
c.Container.RestartPolicy = &restartPolicy
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// PodWrapper wraps a Pod inside.
|
// PodWrapper wraps a Pod inside.
|
||||||
type PodWrapper struct{ v1.Pod }
|
type PodWrapper struct{ v1.Pod }
|
||||||
|
|
||||||
@ -701,6 +707,17 @@ func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SidecarReq adds a new sidecar container to the inner pod with given resource map.
|
||||||
|
func (p *PodWrapper) SidecarReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||||
|
if len(resMap) == 0 {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fmt.Sprintf("sidecar-con%d", len(p.Spec.InitContainers))
|
||||||
|
p.Spec.InitContainers = append(p.Spec.InitContainers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).RestartPolicy(v1.ContainerRestartPolicyAlways).Resources(resMap).Obj())
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
// PreemptionPolicy sets the give preemption policy to the inner pod.
|
// PreemptionPolicy sets the give preemption policy to the inner pod.
|
||||||
func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
||||||
p.Spec.PreemptionPolicy = &policy
|
p.Spec.PreemptionPolicy = &policy
|
||||||
|
@ -16,11 +16,6 @@ limitations under the License.
|
|||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
)
|
|
||||||
|
|
||||||
// For each of these resources, a container that doesn't request the resource explicitly
|
// For each of these resources, a container that doesn't request the resource explicitly
|
||||||
// will be treated as having requested the amount indicated below, for the purpose
|
// will be treated as having requested the amount indicated below, for the purpose
|
||||||
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
||||||
@ -35,41 +30,3 @@ const (
|
|||||||
// DefaultMemoryRequest defines default memory request size.
|
// DefaultMemoryRequest defines default memory request size.
|
||||||
DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetNonzeroRequests returns the default cpu in milli-cpu and memory in bytes resource requests if none is found or
|
|
||||||
// what is provided on the request.
|
|
||||||
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
|
||||||
cpu := GetRequestForResource(v1.ResourceCPU, requests, true)
|
|
||||||
mem := GetRequestForResource(v1.ResourceMemory, requests, true)
|
|
||||||
return cpu.MilliValue(), mem.Value()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRequestForResource returns the requested values unless nonZero is true and there is no defined request
|
|
||||||
// for CPU and memory.
|
|
||||||
// If nonZero is true and the resource has no defined request for CPU or memory, it returns a default value.
|
|
||||||
func GetRequestForResource(resourceName v1.ResourceName, requests *v1.ResourceList, nonZero bool) resource.Quantity {
|
|
||||||
if requests == nil {
|
|
||||||
return resource.Quantity{}
|
|
||||||
}
|
|
||||||
switch resourceName {
|
|
||||||
case v1.ResourceCPU:
|
|
||||||
// Override if un-set, but not if explicitly set to zero
|
|
||||||
if _, found := (*requests)[v1.ResourceCPU]; !found && nonZero {
|
|
||||||
return *resource.NewMilliQuantity(DefaultMilliCPURequest, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
return requests.Cpu().DeepCopy()
|
|
||||||
case v1.ResourceMemory:
|
|
||||||
// Override if un-set, but not if explicitly set to zero
|
|
||||||
if _, found := (*requests)[v1.ResourceMemory]; !found && nonZero {
|
|
||||||
return *resource.NewQuantity(DefaultMemoryRequest, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
return requests.Memory().DeepCopy()
|
|
||||||
default:
|
|
||||||
quantity, found := (*requests)[resourceName]
|
|
||||||
if !found {
|
|
||||||
return resource.Quantity{}
|
|
||||||
}
|
|
||||||
return quantity.DeepCopy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -1,177 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetNonZeroRequest(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
requests v1.ResourceList
|
|
||||||
expectedCPU int64
|
|
||||||
expectedMemory int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"cpu_and_memory_not_found",
|
|
||||||
v1.ResourceList{},
|
|
||||||
DefaultMilliCPURequest,
|
|
||||||
DefaultMemoryRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"only_cpu_exist",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("200m"),
|
|
||||||
},
|
|
||||||
200,
|
|
||||||
DefaultMemoryRequest,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"only_memory_exist",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
|
||||||
},
|
|
||||||
DefaultMilliCPURequest,
|
|
||||||
400 * 1024 * 1024,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cpu_memory_exist",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("200m"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
|
||||||
},
|
|
||||||
200,
|
|
||||||
400 * 1024 * 1024,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
realCPU, realMemory := GetNonzeroRequests(&test.requests)
|
|
||||||
assert.EqualValuesf(t, test.expectedCPU, realCPU, "Failed to test: %s", test.name)
|
|
||||||
assert.EqualValuesf(t, test.expectedMemory, realMemory, "Failed to test: %s", test.name)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetRequestForResource(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
requests v1.ResourceList
|
|
||||||
resource v1.ResourceName
|
|
||||||
expectedQuantity int64
|
|
||||||
nonZero bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"extended_resource_not_found",
|
|
||||||
v1.ResourceList{},
|
|
||||||
v1.ResourceName("intel.com/foo"),
|
|
||||||
0,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"extended_resource_found",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceName("intel.com/foo"): resource.MustParse("4"),
|
|
||||||
},
|
|
||||||
v1.ResourceName("intel.com/foo"),
|
|
||||||
4,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cpu_not_found",
|
|
||||||
v1.ResourceList{},
|
|
||||||
v1.ResourceCPU,
|
|
||||||
DefaultMilliCPURequest,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"memory_not_found",
|
|
||||||
v1.ResourceList{},
|
|
||||||
v1.ResourceMemory,
|
|
||||||
DefaultMemoryRequest,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cpu_exist",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("200m"),
|
|
||||||
},
|
|
||||||
v1.ResourceCPU,
|
|
||||||
200,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"memory_exist",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
|
||||||
},
|
|
||||||
v1.ResourceMemory,
|
|
||||||
400 * 1024 * 1024,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ephemeralStorage_exist",
|
|
||||||
v1.ResourceList{
|
|
||||||
v1.ResourceEphemeralStorage: resource.MustParse("400Mi"),
|
|
||||||
},
|
|
||||||
v1.ResourceEphemeralStorage,
|
|
||||||
400 * 1024 * 1024,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ephemeralStorage_not_found",
|
|
||||||
v1.ResourceList{},
|
|
||||||
v1.ResourceEphemeralStorage,
|
|
||||||
0,
|
|
||||||
true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cpu_not_found, useRequested is true",
|
|
||||||
v1.ResourceList{},
|
|
||||||
v1.ResourceCPU,
|
|
||||||
0,
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"memory_not_found, useRequested is true",
|
|
||||||
v1.ResourceList{},
|
|
||||||
v1.ResourceMemory,
|
|
||||||
0,
|
|
||||||
false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
realQuantity := GetRequestForResource(test.resource, &test.requests, test.nonZero)
|
|
||||||
var realQuantityI64 int64
|
|
||||||
if test.resource == v1.ResourceCPU {
|
|
||||||
realQuantityI64 = realQuantity.MilliValue()
|
|
||||||
} else {
|
|
||||||
realQuantityI64 = realQuantity.Value()
|
|
||||||
}
|
|
||||||
assert.EqualValuesf(t, test.expectedQuantity, realQuantityI64, "Failed to test: %s", test.name)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -537,9 +537,10 @@ func getNonZeroRequests(pod *v1.Pod) Resource {
|
|||||||
result := Resource{}
|
result := Resource{}
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
container := &pod.Spec.Containers[i]
|
container := &pod.Spec.Containers[i]
|
||||||
cpu, memory := schedutil.GetNonzeroRequests(&container.Resources.Requests)
|
cpu := getNonZeroRequestForResource(v1.ResourceCPU, &container.Resources.Requests)
|
||||||
result.MilliCPU += cpu
|
memory := getNonZeroRequestForResource(v1.ResourceMemory, &container.Resources.Requests)
|
||||||
result.Memory += memory
|
result.MilliCPU += cpu.MilliValue()
|
||||||
|
result.Memory += memory.Value()
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@ -556,3 +557,31 @@ func addTaintToNode(ctx context.Context, cs clientset.Interface, nodeName string
|
|||||||
e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint)
|
e2enode.AddOrUpdateTaintOnNode(ctx, cs, nodeName, testTaint)
|
||||||
e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint)
|
e2enode.ExpectNodeHasTaint(ctx, cs, nodeName, &testTaint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getNonZeroRequestForResource returns the requested values,
|
||||||
|
// if the resource has undefined request for CPU or memory, it returns a default value.
|
||||||
|
func getNonZeroRequestForResource(resourceName v1.ResourceName, requests *v1.ResourceList) resource.Quantity {
|
||||||
|
if requests == nil {
|
||||||
|
return resource.Quantity{}
|
||||||
|
}
|
||||||
|
switch resourceName {
|
||||||
|
case v1.ResourceCPU:
|
||||||
|
// Override if un-set, but not if explicitly set to zero
|
||||||
|
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||||
|
return *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
return requests.Cpu().DeepCopy()
|
||||||
|
case v1.ResourceMemory:
|
||||||
|
// Override if un-set, but not if explicitly set to zero
|
||||||
|
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||||
|
return *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
return requests.Memory().DeepCopy()
|
||||||
|
default:
|
||||||
|
quantity, found := (*requests)[resourceName]
|
||||||
|
if !found {
|
||||||
|
return resource.Quantity{}
|
||||||
|
}
|
||||||
|
return quantity.DeepCopy()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -108,20 +108,17 @@ func initTestSchedulerForPriorityTest(t *testing.T, preScorePluginName, scorePlu
|
|||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext {
|
func initTestSchedulerForNodeResourcesTest(t *testing.T, strategy configv1.ScoringStrategyType) *testutils.TestContext {
|
||||||
cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{
|
cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{
|
||||||
Profiles: []configv1.KubeSchedulerProfile{
|
Profiles: []configv1.KubeSchedulerProfile{
|
||||||
{
|
{
|
||||||
SchedulerName: pointer.String(v1.DefaultSchedulerName),
|
SchedulerName: pointer.String(v1.DefaultSchedulerName),
|
||||||
},
|
|
||||||
{
|
|
||||||
SchedulerName: pointer.String("gpu-binpacking-scheduler"),
|
|
||||||
PluginConfig: []configv1.PluginConfig{
|
PluginConfig: []configv1.PluginConfig{
|
||||||
{
|
{
|
||||||
Name: noderesources.Name,
|
Name: noderesources.Name,
|
||||||
Args: runtime.RawExtension{Object: &configv1.NodeResourcesFitArgs{
|
Args: runtime.RawExtension{Object: &configv1.NodeResourcesFitArgs{
|
||||||
ScoringStrategy: &configv1.ScoringStrategy{
|
ScoringStrategy: &configv1.ScoringStrategy{
|
||||||
Type: configv1.MostAllocated,
|
Type: strategy,
|
||||||
Resources: []configv1.ResourceSpec{
|
Resources: []configv1.ResourceSpec{
|
||||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||||
@ -147,63 +144,146 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
|||||||
// TestNodeResourcesScoring verifies that scheduler's node resources priority function
|
// TestNodeResourcesScoring verifies that scheduler's node resources priority function
|
||||||
// works correctly.
|
// works correctly.
|
||||||
func TestNodeResourcesScoring(t *testing.T) {
|
func TestNodeResourcesScoring(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
tests := []struct {
|
||||||
// Add a few nodes.
|
name string
|
||||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
pod func(testCtx *testutils.TestContext) *v1.Pod
|
||||||
map[v1.ResourceName]string{
|
existingPods func(testCtx *testutils.TestContext) []*v1.Pod
|
||||||
v1.ResourceCPU: "8",
|
nodes []*v1.Node
|
||||||
v1.ResourceMemory: "16G",
|
strategy configv1.ScoringStrategyType
|
||||||
resourceGPU: "4",
|
// expectedNodeName is the list of node names. The pod should be scheduled on either of them.
|
||||||
}), 2)
|
expectedNodeName []string
|
||||||
if err != nil {
|
}{
|
||||||
t.Fatal(err)
|
{
|
||||||
}
|
name: "with least allocated strategy, take existing sidecars into consideration",
|
||||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Res(
|
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||||
map[v1.ResourceName]string{
|
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||||
v1.ResourceCPU: "2",
|
Res(map[v1.ResourceName]string{
|
||||||
v1.ResourceMemory: "4G",
|
v1.ResourceCPU: "2",
|
||||||
resourceGPU: "1",
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj()
|
||||||
|
},
|
||||||
|
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||||
|
return []*v1.Pod{
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-1").Node("node-1").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "2G",
|
||||||
|
}).
|
||||||
|
Obj(),
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-2").Node("node-2").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node-1").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
st.MakeNode().Name("node-2").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
},
|
||||||
|
strategy: configv1.LeastAllocated,
|
||||||
|
expectedNodeName: []string{"node-2"},
|
||||||
},
|
},
|
||||||
).Obj())
|
{
|
||||||
if err != nil {
|
name: "with most allocated strategy, take existing sidecars into consideration",
|
||||||
t.Fatal(err)
|
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||||
}
|
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Res(
|
Res(map[v1.ResourceName]string{
|
||||||
map[v1.ResourceName]string{
|
v1.ResourceCPU: "2",
|
||||||
v1.ResourceCPU: "1",
|
v1.ResourceMemory: "4G",
|
||||||
v1.ResourceMemory: "2G",
|
resourceGPU: "1",
|
||||||
resourceGPU: "2",
|
}).Obj()
|
||||||
|
},
|
||||||
|
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||||
|
return []*v1.Pod{
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-1").Node("node-1").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "2G",
|
||||||
|
}).
|
||||||
|
Obj(),
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-2").Node("node-2").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node-1").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
st.MakeNode().Name("node-2").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
},
|
||||||
|
strategy: configv1.MostAllocated,
|
||||||
|
expectedNodeName: []string{"node-1"},
|
||||||
},
|
},
|
||||||
).Obj())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if cpuBoundPod1.Spec.NodeName == "" || gpuBoundPod1.Spec.NodeName == "" {
|
|
||||||
t.Fatalf("pods should have nodeName assigned, got %q and %q",
|
|
||||||
cpuBoundPod1.Spec.NodeName, gpuBoundPod1.Spec.NodeName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since both pods used the default scheduler, then they should land on two different
|
for _, tt := range tests {
|
||||||
// nodes because the default configuration uses LeastAllocated.
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if cpuBoundPod1.Spec.NodeName == gpuBoundPod1.Spec.NodeName {
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
t.Fatalf("pods should have landed on different nodes, both scheduled on %q",
|
testCtx := initTestSchedulerForNodeResourcesTest(t, tt.strategy)
|
||||||
cpuBoundPod1.Spec.NodeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
for _, n := range tt.nodes {
|
||||||
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
if _, err := createNode(testCtx.ClientSet, n); err != nil {
|
||||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Res(
|
t.Fatalf("failed to create node: %v", err)
|
||||||
map[v1.ResourceName]string{
|
}
|
||||||
v1.ResourceCPU: "2",
|
}
|
||||||
v1.ResourceMemory: "4G",
|
|
||||||
resourceGPU: "1",
|
if err := testutils.WaitForNodesInCache(testCtx.Ctx, testCtx.Scheduler, len(tt.nodes)); err != nil {
|
||||||
},
|
t.Fatalf("failed to wait for nodes in cache: %v", err)
|
||||||
).Obj())
|
}
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
if tt.existingPods != nil {
|
||||||
}
|
for _, p := range tt.existingPods(testCtx) {
|
||||||
if cpuBoundPod2.Spec.NodeName != gpuBoundPod1.Spec.NodeName {
|
if _, err := runPausePod(testCtx.ClientSet, p); err != nil {
|
||||||
t.Errorf("pods should have landed on the same node")
|
t.Fatalf("failed to create existing pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := runPausePod(testCtx.ClientSet, tt.pod(testCtx))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error running pause pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false, podScheduledIn(testCtx.ClientSet, pod.Namespace, pod.Name, tt.expectedNodeName))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while trying to wait for a pod to be scheduled: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user