Scheduler: remove pkg/features dependency from NodeResources plugins

This commit is contained in:
Mike Dame 2021-01-29 16:35:38 -05:00
parent 7ccd90e7d7
commit 5a77ebe28b
18 changed files with 154 additions and 105 deletions

View File

@ -20,9 +20,11 @@ import (
"fmt" "fmt"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apiserver/pkg/util/feature"
v1affinityhelper "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" v1affinityhelper "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/klog/v2" "k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
@ -226,7 +228,7 @@ func GeneralPredicates(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) ([]Pr
} }
var reasons []PredicateFailureReason var reasons []PredicateFailureReason
for _, r := range noderesources.Fits(pod, nodeInfo) { for _, r := range noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead)) {
reasons = append(reasons, &InsufficientResourceError{ reasons = append(reasons, &InsufficientResourceError{
ResourceName: r.ResourceName, ResourceName: r.ResourceName,
Requested: r.Requested, Requested: r.Requested,

View File

@ -31,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -43,6 +44,7 @@ import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
@ -1310,8 +1312,14 @@ func TestZeroRequest(t *testing.T) {
pluginRegistrations := []st.RegisterPluginFunc{ pluginRegistrations := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterScorePlugin(noderesources.LeastAllocatedName, noderesources.NewLeastAllocated, 1), st.RegisterScorePlugin(noderesources.LeastAllocatedName,
st.RegisterScorePlugin(noderesources.BalancedAllocationName, noderesources.NewBalancedAllocation, 1), func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
return noderesources.NewLeastAllocated(plArgs, fh, feature.Features{})
},
1),
st.RegisterScorePlugin(noderesources.BalancedAllocationName, func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
return noderesources.NewBalancedAllocation(plArgs, fh, feature.Features{})
}, 1),
st.RegisterScorePlugin(selectorspread.Name, selectorspread.New, 1), st.RegisterScorePlugin(selectorspread.Name, selectorspread.New, 1),
st.RegisterPreScorePlugin(selectorspread.Name, selectorspread.New), st.RegisterPreScorePlugin(selectorspread.Name, selectorspread.New),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),

View File

@ -25,10 +25,12 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
v1helper "k8s.io/component-helpers/scheduling/corev1" v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity" "k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
@ -469,7 +471,7 @@ func preCheckForNode(nodeInfo *framework.NodeInfo) queue.PreEnqueueCheck {
// cases (e.g., node resizing), "pod" may still fail a check but preemption helps. We deliberately // cases (e.g., node resizing), "pod" may still fail a check but preemption helps. We deliberately
// chose to ignore those cases as unschedulable pods will be re-queued eventually. // chose to ignore those cases as unschedulable pods will be re-queued eventually.
return func(pod *v1.Pod) bool { return func(pod *v1.Pod) bool {
if len(noderesources.Fits(pod, nodeInfo)) != 0 { if len(noderesources.Fits(pod, nodeInfo, feature.DefaultFeatureGate.Enabled(features.PodOverhead))) != 0 {
return false return false
} }

View File

@ -30,6 +30,7 @@ import (
policy "k8s.io/api/policy/v1" policy "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
@ -98,6 +99,10 @@ func getDefaultDefaultPreemptionArgs() *config.DefaultPreemptionArgs {
return dpa return dpa
} }
var nodeResourcesFitFunc = func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
return noderesources.NewFit(plArgs, fh, feature.Features{})
}
func TestPostFilter(t *testing.T) { func TestPostFilter(t *testing.T) {
onePodRes := map[v1.ResourceName]string{v1.ResourcePods: "1"} onePodRes := map[v1.ResourceName]string{v1.ResourcePods: "1"}
nodeRes := map[v1.ResourceName]string{v1.ResourceCPU: "200m", v1.ResourceMemory: "400"} nodeRes := map[v1.ResourceName]string{v1.ResourceCPU: "200m", v1.ResourceMemory: "400"}
@ -270,7 +275,7 @@ func TestPostFilter(t *testing.T) {
// Register NodeResourceFit as the Filter & PreFilter plugin. // Register NodeResourceFit as the Filter & PreFilter plugin.
registeredPlugins := []st.RegisterPluginFunc{ registeredPlugins := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
} }
var extenders []framework.Extender var extenders []framework.Extender
@ -382,7 +387,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "a pod that fits on both nodes when lower priority pods are preempted", name: "a pod that fits on both nodes when lower priority pods are preempted",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -413,7 +418,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "a pod that would fit on the nodes, but other pods running are higher priority, no preemption would happen", name: "a pod that would fit on the nodes, but other pods running are higher priority, no preemption would happen",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -429,7 +434,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "medium priority pod is preempted, but lower priority one stays as it is small", name: "medium priority pod is preempted, but lower priority one stays as it is small",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -461,7 +466,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "mixed priority pods are preempted", name: "mixed priority pods are preempted",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -492,7 +497,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "mixed priority pods are preempted, pick later StartTime one when priorities are equal", name: "mixed priority pods are preempted, pick later StartTime one when priorities are equal",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -523,7 +528,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "pod with anti-affinity is preempted", name: "pod with anti-affinity is preempted",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
st.RegisterPluginAsExtensions(interpodaffinity.Name, func(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { st.RegisterPluginAsExtensions(interpodaffinity.Name, func(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) {
return interpodaffinity.New(plArgs, fh, feature.Features{}) return interpodaffinity.New(plArgs, fh, feature.Features{})
}, "Filter", "PreFilter"), }, "Filter", "PreFilter"),
@ -594,7 +599,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "get Unschedulable in the preemption phase when the filter plugins filtering the nodes", name: "get Unschedulable in the preemption phase when the filter plugins filtering the nodes",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -611,7 +616,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "preemption with violation of same pdb", name: "preemption with violation of same pdb",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1"}, nodeNames: []string{"node1"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -646,7 +651,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "preemption with violation of the pdb with pod whose eviction was processed, the victim doesn't belong to DisruptedPods", name: "preemption with violation of the pdb with pod whose eviction was processed, the victim doesn't belong to DisruptedPods",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1"}, nodeNames: []string{"node1"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -681,7 +686,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "preemption with violation of the pdb with pod whose eviction was processed, the victim belongs to DisruptedPods", name: "preemption with violation of the pdb with pod whose eviction was processed, the victim belongs to DisruptedPods",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1"}, nodeNames: []string{"node1"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -716,7 +721,7 @@ func TestDryRunPreemption(t *testing.T) {
{ {
name: "preemption with violation of the pdb with pod whose eviction was processed, the victim which belongs to DisruptedPods is treated as 'nonViolating'", name: "preemption with violation of the pdb with pod whose eviction was processed, the victim which belongs to DisruptedPods is treated as 'nonViolating'",
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1"}, nodeNames: []string{"node1"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -754,7 +759,7 @@ func TestDryRunPreemption(t *testing.T) {
name: "all nodes are possible candidates, but DefaultPreemptionArgs limits to 2", name: "all nodes are possible candidates, but DefaultPreemptionArgs limits to 2",
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1}, args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1},
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -791,7 +796,7 @@ func TestDryRunPreemption(t *testing.T) {
name: "some nodes are not possible candidates, DefaultPreemptionArgs limits to 2", name: "some nodes are not possible candidates, DefaultPreemptionArgs limits to 2",
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1}, args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1},
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -828,7 +833,7 @@ func TestDryRunPreemption(t *testing.T) {
name: "preemption offset across multiple scheduling cycles and wrap around", name: "preemption offset across multiple scheduling cycles and wrap around",
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1}, args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1},
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -897,7 +902,7 @@ func TestDryRunPreemption(t *testing.T) {
name: "preemption looks past numCandidates until a non-PDB violating node is found", name: "preemption looks past numCandidates until a non-PDB violating node is found",
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 2}, args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 2},
registerPlugins: []st.RegisterPluginFunc{ registerPlugins: []st.RegisterPluginFunc{
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
}, },
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"}, nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
testPods: []*v1.Pod{ testPods: []*v1.Pod{
@ -1067,7 +1072,7 @@ func TestSelectBestCandidate(t *testing.T) {
}{ }{
{ {
name: "a pod that fits on both nodes when lower priority pods are preempted", name: "a pod that fits on both nodes when lower priority pods are preempted",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2"}, nodeNames: []string{"node1", "node2"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(largeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(largeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1078,7 +1083,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "node with min highest priority pod is picked", name: "node with min highest priority pod is picked",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1093,7 +1098,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "when highest priorities are the same, minimum sum of priorities is picked", name: "when highest priorities are the same, minimum sum of priorities is picked",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1108,7 +1113,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "when highest priority and sum are the same, minimum number of pods is picked", name: "when highest priority and sum are the same, minimum number of pods is picked",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1128,7 +1133,7 @@ func TestSelectBestCandidate(t *testing.T) {
// pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This // pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This
// test ensures that the logic works correctly. // test ensures that the logic works correctly.
name: "sum of adjusted priorities is considered", name: "sum of adjusted priorities is considered",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1145,7 +1150,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "non-overlapping lowest high priority, sum priorities, and number of pods", name: "non-overlapping lowest high priority, sum priorities, and number of pods",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3", "node4"}, nodeNames: []string{"node1", "node2", "node3", "node4"},
pod: st.MakePod().Name("p").UID("p").Priority(veryHighPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(veryHighPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1166,7 +1171,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "same priority, same number of victims, different start time for each node's pod", name: "same priority, same number of victims, different start time for each node's pod",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1181,7 +1186,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "same priority, same number of victims, different start time for all pods", name: "same priority, same number of victims, different start time for all pods",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1196,7 +1201,7 @@ func TestSelectBestCandidate(t *testing.T) {
}, },
{ {
name: "different priority, same number of victims, different start time for all pods", name: "different priority, same number of victims, different start time for all pods",
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(), pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
pods: []*v1.Pod{ pods: []*v1.Pod{
@ -1459,7 +1464,7 @@ func TestPreempt(t *testing.T) {
st.MakePod().Name("p3.1").UID("p3.1").Node("node3").Priority(midPriority).Req(mediumRes).Obj(), st.MakePod().Name("p3.1").UID("p3.1").Node("node3").Priority(midPriority).Req(mediumRes).Obj(),
}, },
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
expectedNode: "node1", expectedNode: "node1",
expectedPods: []string{"p1.1", "p1.2"}, expectedPods: []string{"p1.1", "p1.2"},
}, },
@ -1494,7 +1499,7 @@ func TestPreempt(t *testing.T) {
{Predicates: []st.FitPredicate{st.TruePredicateExtender}}, {Predicates: []st.FitPredicate{st.TruePredicateExtender}},
{Predicates: []st.FitPredicate{st.Node1PredicateExtender}}, {Predicates: []st.FitPredicate{st.Node1PredicateExtender}},
}, },
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
expectedNode: "node1", expectedNode: "node1",
expectedPods: []string{"p1.1", "p1.2"}, expectedPods: []string{"p1.1", "p1.2"},
}, },
@ -1510,7 +1515,7 @@ func TestPreempt(t *testing.T) {
extenders: []*st.FakeExtender{ extenders: []*st.FakeExtender{
{Predicates: []st.FitPredicate{st.FalsePredicateExtender}}, {Predicates: []st.FitPredicate{st.FalsePredicateExtender}},
}, },
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
expectedNode: "", expectedNode: "",
expectedPods: []string{}, expectedPods: []string{},
}, },
@ -1527,7 +1532,7 @@ func TestPreempt(t *testing.T) {
{Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, Ignorable: true}, {Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, Ignorable: true},
{Predicates: []st.FitPredicate{st.Node1PredicateExtender}}, {Predicates: []st.FitPredicate{st.Node1PredicateExtender}},
}, },
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
expectedNode: "node1", expectedNode: "node1",
expectedPods: []string{"p1.1", "p1.2"}, expectedPods: []string{"p1.1", "p1.2"},
}, },
@ -1544,7 +1549,7 @@ func TestPreempt(t *testing.T) {
{Predicates: []st.FitPredicate{st.Node1PredicateExtender}, UnInterested: true}, {Predicates: []st.FitPredicate{st.Node1PredicateExtender}, UnInterested: true},
{Predicates: []st.FitPredicate{st.TruePredicateExtender}}, {Predicates: []st.FitPredicate{st.TruePredicateExtender}},
}, },
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
//sum of priorities of all victims on node1 is larger than node2, node2 is chosen. //sum of priorities of all victims on node1 is larger than node2, node2 is chosen.
expectedNode: "node2", expectedNode: "node2",
expectedPods: []string{"p2.1"}, expectedPods: []string{"p2.1"},
@ -1559,7 +1564,7 @@ func TestPreempt(t *testing.T) {
st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(), st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(),
}, },
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
expectedNode: "", expectedNode: "",
expectedPods: nil, expectedPods: nil,
}, },
@ -1573,7 +1578,7 @@ func TestPreempt(t *testing.T) {
st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(), st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(),
}, },
nodeNames: []string{"node1", "node2", "node3"}, nodeNames: []string{"node1", "node2", "node3"},
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
expectedNode: "node1", expectedNode: "node1",
expectedPods: []string{"p1.1", "p1.2"}, expectedPods: []string{"p1.1", "p1.2"},
}, },

View File

@ -22,4 +22,6 @@ package feature
type Features struct { type Features struct {
EnablePodAffinityNamespaceSelector bool EnablePodAffinityNamespaceSelector bool
EnablePodDisruptionBudget bool EnablePodDisruptionBudget bool
EnablePodOverhead bool
EnableBalanceAttachedNodeVolumes bool
} }

View File

@ -23,9 +23,8 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
) )
// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction // BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction
@ -67,13 +66,15 @@ func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions {
} }
// NewBalancedAllocation initializes a new plugin and returns it. // NewBalancedAllocation initializes a new plugin and returns it.
func NewBalancedAllocation(_ runtime.Object, h framework.Handle) (framework.Plugin, error) { func NewBalancedAllocation(_ runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
return &BalancedAllocation{ return &BalancedAllocation{
handle: h, handle: h,
resourceAllocationScorer: resourceAllocationScorer{ resourceAllocationScorer: resourceAllocationScorer{
BalancedAllocationName, Name: BalancedAllocationName,
balancedResourceScorer, scorer: balancedResourceScorer,
defaultRequestedRatioResources, resourceToWeightMap: defaultRequestedRatioResources,
enablePodOverhead: fts.EnablePodOverhead,
enableBalanceAttachedNodeVolumes: fts.EnableBalanceAttachedNodeVolumes,
}, },
}, nil }, nil
} }
@ -88,7 +89,8 @@ func balancedResourceScorer(requested, allocable resourceToValueMap, includeVolu
return 0 return 0
} }
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 { // includeVolumes is only true when BalanceAttachedNodeVolumes feature gate is enabled (see resource_allocation.go#score())
if includeVolumes && allocatableVolumes > 0 {
volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes) volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes)
if volumeFraction >= 1 { if volumeFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferred. // if requested >= capacity, the corresponding host should never be preferred.

View File

@ -28,6 +28,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
@ -390,7 +391,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
} }
} }
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot)) fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, _ := NewBalancedAllocation(nil, fh) p, _ := NewBalancedAllocation(nil, fh, feature.Features{EnablePodOverhead: true, EnableBalanceAttachedNodeVolumes: true})
for i := range test.nodes { for i := range test.nodes {
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name) hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)

View File

@ -24,12 +24,11 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
) )
var _ framework.PreFilterPlugin = &Fit{} var _ framework.PreFilterPlugin = &Fit{}
@ -49,6 +48,7 @@ const (
type Fit struct { type Fit struct {
ignoredResources sets.String ignoredResources sets.String
ignoredResourceGroups sets.String ignoredResourceGroups sets.String
enablePodOverhead bool
} }
// preFilterState computed at PreFilter and used at Filter. // preFilterState computed at PreFilter and used at Filter.
@ -67,7 +67,7 @@ func (f *Fit) Name() string {
} }
// NewFit initializes a new plugin and returns it. // NewFit initializes a new plugin and returns it.
func NewFit(plArgs runtime.Object, _ framework.Handle) (framework.Plugin, error) { func NewFit(plArgs runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) {
args, ok := plArgs.(*config.NodeResourcesFitArgs) args, ok := plArgs.(*config.NodeResourcesFitArgs)
if !ok { if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs) return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs)
@ -78,6 +78,7 @@ func NewFit(plArgs runtime.Object, _ framework.Handle) (framework.Plugin, error)
return &Fit{ return &Fit{
ignoredResources: sets.NewString(args.IgnoredResources...), ignoredResources: sets.NewString(args.IgnoredResources...),
ignoredResourceGroups: sets.NewString(args.IgnoredResourceGroups...), ignoredResourceGroups: sets.NewString(args.IgnoredResourceGroups...),
enablePodOverhead: fts.EnablePodOverhead,
}, nil }, nil
} }
@ -108,7 +109,7 @@ func NewFit(plArgs runtime.Object, _ framework.Handle) (framework.Plugin, error)
// Memory: 1G // Memory: 1G
// //
// Result: CPU: 3, Memory: 3G // Result: CPU: 3, Memory: 3G
func computePodResourceRequest(pod *v1.Pod) *preFilterState { func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterState {
result := &preFilterState{} result := &preFilterState{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests) result.Add(container.Resources.Requests)
@ -120,7 +121,7 @@ func computePodResourceRequest(pod *v1.Pod) *preFilterState {
} }
// If Overhead is being utilized, add to the total requests for the pod // If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { if pod.Spec.Overhead != nil && enablePodOverhead {
result.Add(pod.Spec.Overhead) result.Add(pod.Spec.Overhead)
} }
@ -129,7 +130,7 @@ func computePodResourceRequest(pod *v1.Pod) *preFilterState {
// PreFilter invoked at the prefilter extension point. // PreFilter invoked at the prefilter extension point.
func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status { func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod)) cycleState.Write(preFilterStateKey, computePodResourceRequest(pod, f.enablePodOverhead))
return nil return nil
} }
@ -198,8 +199,8 @@ type InsufficientResource struct {
} }
// Fits checks if node have enough resources to host the pod. // Fits checks if node have enough resources to host the pod.
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) []InsufficientResource { func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, enablePodOverhead bool) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod), nodeInfo, nil, nil) return fitsRequest(computePodResourceRequest(pod, enablePodOverhead), nodeInfo, nil, nil)
} }
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.String) []InsufficientResource { func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.String) []InsufficientResource {

View File

@ -27,9 +27,9 @@ import (
"k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate" "k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
) )
var ( var (
@ -414,7 +414,7 @@ func TestEnoughRequests(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
test.nodeInfo.SetNode(&node) test.nodeInfo.SetNode(&node)
p, err := NewFit(&test.args, nil) p, err := NewFit(&test.args, nil, plfeature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -429,7 +429,7 @@ func TestEnoughRequests(t *testing.T) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
} }
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups) gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod, true), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) { if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
t.Errorf("insufficient resources do not match: %+v, want: %v", gotInsufficientResources, test.wantInsufficientResources) t.Errorf("insufficient resources do not match: %+v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
} }
@ -442,7 +442,7 @@ func TestPreFilterDisabled(t *testing.T) {
nodeInfo := framework.NewNodeInfo() nodeInfo := framework.NewNodeInfo()
node := v1.Node{} node := v1.Node{}
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil) p, err := NewFit(&config.NodeResourcesFitArgs{}, nil, plfeature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -492,7 +492,7 @@ func TestNotEnoughRequests(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}} node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
test.nodeInfo.SetNode(&node) test.nodeInfo.SetNode(&node)
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil) p, err := NewFit(&config.NodeResourcesFitArgs{}, nil, plfeature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -545,7 +545,7 @@ func TestStorageRequests(t *testing.T) {
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})), newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
name: "ephemeral local storage request is ignored due to disabled feature gate", name: "ephemeral local storage request is ignored due to disabled feature gate",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.LocalStorageCapacityIsolation: false, "LocalStorageCapacityIsolation": false,
}, },
}, },
{ {
@ -564,7 +564,7 @@ func TestStorageRequests(t *testing.T) {
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
test.nodeInfo.SetNode(&node) test.nodeInfo.SetNode(&node)
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil) p, err := NewFit(&config.NodeResourcesFitArgs{}, nil, plfeature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
) )
// LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources. // LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources.
@ -65,7 +66,7 @@ func (la *LeastAllocated) ScoreExtensions() framework.ScoreExtensions {
} }
// NewLeastAllocated initializes a new plugin and returns it. // NewLeastAllocated initializes a new plugin and returns it.
func NewLeastAllocated(laArgs runtime.Object, h framework.Handle) (framework.Plugin, error) { func NewLeastAllocated(laArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
args, ok := laArgs.(*config.NodeResourcesLeastAllocatedArgs) args, ok := laArgs.(*config.NodeResourcesLeastAllocatedArgs)
if !ok { if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesLeastAllocatedArgs, got %T", laArgs) return nil, fmt.Errorf("want args to be of type NodeResourcesLeastAllocatedArgs, got %T", laArgs)
@ -85,6 +86,7 @@ func NewLeastAllocated(laArgs runtime.Object, h framework.Handle) (framework.Plu
Name: LeastAllocatedName, Name: LeastAllocatedName,
scorer: leastResourceScorer(resToWeightMap), scorer: leastResourceScorer(resToWeightMap),
resourceToWeightMap: resToWeightMap, resourceToWeightMap: resToWeightMap,
enablePodOverhead: fts.EnablePodOverhead,
}, },
}, nil }, nil
} }

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
@ -313,7 +314,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := cache.NewSnapshot(test.pods, test.nodes) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot)) fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, err := NewLeastAllocated(&test.args, fh) p, err := NewLeastAllocated(&test.args, fh, feature.Features{EnablePodOverhead: true})
if test.wantErr != nil { if test.wantErr != nil {
if err != nil { if err != nil {

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
) )
// MostAllocated is a score plugin that favors nodes with high allocation based on requested resources. // MostAllocated is a score plugin that favors nodes with high allocation based on requested resources.
@ -63,7 +64,7 @@ func (ma *MostAllocated) ScoreExtensions() framework.ScoreExtensions {
} }
// NewMostAllocated initializes a new plugin and returns it. // NewMostAllocated initializes a new plugin and returns it.
func NewMostAllocated(maArgs runtime.Object, h framework.Handle) (framework.Plugin, error) { func NewMostAllocated(maArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
args, ok := maArgs.(*config.NodeResourcesMostAllocatedArgs) args, ok := maArgs.(*config.NodeResourcesMostAllocatedArgs)
if !ok { if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesMostAllocatedArgs, got %T", args) return nil, fmt.Errorf("want args to be of type NodeResourcesMostAllocatedArgs, got %T", args)
@ -83,6 +84,7 @@ func NewMostAllocated(maArgs runtime.Object, h framework.Handle) (framework.Plug
Name: MostAllocatedName, Name: MostAllocatedName,
scorer: mostResourceScorer(resToWeightMap), scorer: mostResourceScorer(resToWeightMap),
resourceToWeightMap: resToWeightMap, resourceToWeightMap: resToWeightMap,
enablePodOverhead: fts.EnablePodOverhead,
}, },
}, nil }, nil
} }

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
@ -273,7 +274,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := cache.NewSnapshot(test.pods, test.nodes) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot)) fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
p, err := NewMostAllocated(&test.args, fh) p, err := NewMostAllocated(&test.args, fh, feature.Features{EnablePodOverhead: true})
if test.wantErr != nil { if test.wantErr != nil {
if err != nil { if err != nil {

View File

@ -26,6 +26,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation" "k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
) )
@ -36,7 +37,7 @@ const (
) )
// NewRequestedToCapacityRatio initializes a new plugin and returns it. // NewRequestedToCapacityRatio initializes a new plugin and returns it.
func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) { func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
args, err := getRequestedToCapacityRatioArgs(plArgs) args, err := getRequestedToCapacityRatioArgs(plArgs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -68,9 +69,10 @@ func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle)
return &RequestedToCapacityRatio{ return &RequestedToCapacityRatio{
handle: handle, handle: handle,
resourceAllocationScorer: resourceAllocationScorer{ resourceAllocationScorer: resourceAllocationScorer{
RequestedToCapacityRatioName, Name: RequestedToCapacityRatioName,
buildRequestedToCapacityRatioScorerFunction(shape, resourceToWeightMap), scorer: buildRequestedToCapacityRatioScorerFunction(shape, resourceToWeightMap),
resourceToWeightMap, resourceToWeightMap: resourceToWeightMap,
enablePodOverhead: fts.EnablePodOverhead,
}, },
}, nil }, nil
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
@ -80,7 +81,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
{Name: "cpu", Weight: 1}, {Name: "cpu", Weight: 1},
}, },
} }
p, err := NewRequestedToCapacityRatio(&args, fh) p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -333,7 +334,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
{Name: "intel.com/foo", Weight: 1}, {Name: "intel.com/foo", Weight: 1},
}, },
} }
p, err := NewRequestedToCapacityRatio(&args, fh) p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -577,7 +578,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
{Name: "intel.com/bar", Weight: 5}, {Name: "intel.com/bar", Weight: 5},
}, },
} }
p, err := NewRequestedToCapacityRatio(&args, fh) p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }

View File

@ -18,9 +18,7 @@ package noderesources
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -36,6 +34,9 @@ type resourceAllocationScorer struct {
Name string Name string
scorer func(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 scorer func(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
resourceToWeightMap resourceToWeightMap resourceToWeightMap resourceToWeightMap
enablePodOverhead bool
enableBalanceAttachedNodeVolumes bool
} }
// resourceToValueMap contains resource name and score. // resourceToValueMap contains resource name and score.
@ -55,18 +56,18 @@ func (r *resourceAllocationScorer) score(
requested := make(resourceToValueMap, len(r.resourceToWeightMap)) requested := make(resourceToValueMap, len(r.resourceToWeightMap))
allocatable := make(resourceToValueMap, len(r.resourceToWeightMap)) allocatable := make(resourceToValueMap, len(r.resourceToWeightMap))
for resource := range r.resourceToWeightMap { for resource := range r.resourceToWeightMap {
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource) allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource, r.enablePodOverhead)
} }
var score int64 var score int64
// Check if the pod has volumes and this could be added to scorer function for balanced resource allocation. // Check if the pod has volumes and this could be added to scorer function for balanced resource allocation.
if len(pod.Spec.Volumes) > 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil { if len(pod.Spec.Volumes) > 0 && r.enableBalanceAttachedNodeVolumes && nodeInfo.TransientInfo != nil {
score = r.scorer(requested, allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount) score = r.scorer(requested, allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
} else { } else {
score = r.scorer(requested, allocatable, false, 0, 0) score = r.scorer(requested, allocatable, false, 0, 0)
} }
if klog.V(10).Enabled() { if klog.V(10).Enabled() {
if len(pod.Spec.Volumes) > 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil { if len(pod.Spec.Volumes) > 0 && r.enableBalanceAttachedNodeVolumes && nodeInfo.TransientInfo != nil {
klog.Infof( klog.Infof(
"%v -> %v: %v, map of allocatable resources %v, map of requested resources %v , allocatable volumes %d, requested volumes %d, score %d", "%v -> %v: %v, map of allocatable resources %v, map of requested resources %v , allocatable volumes %d, requested volumes %d, score %d",
pod.Name, node.Name, r.Name, pod.Name, node.Name, r.Name,
@ -88,8 +89,8 @@ func (r *resourceAllocationScorer) score(
} }
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values // calculateResourceAllocatableRequest returns resources Allocatable and Requested values
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) { func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) (int64, int64) {
podRequest := calculatePodResourceRequest(pod, resource) podRequest := calculatePodResourceRequest(pod, resource, enablePodOverhead)
switch resource { switch resource {
case v1.ResourceCPU: case v1.ResourceCPU:
return nodeInfo.Allocatable.MilliCPU, (nodeInfo.NonZeroRequested.MilliCPU + podRequest) return nodeInfo.Allocatable.MilliCPU, (nodeInfo.NonZeroRequested.MilliCPU + podRequest)
@ -114,7 +115,7 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the // calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
// PodOverhead feature is enabled, the Overhead is added to the result. // PodOverhead feature is enabled, the Overhead is added to the result.
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead // podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) int64 {
var podRequest int64 var podRequest int64
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
container := &pod.Spec.Containers[i] container := &pod.Spec.Containers[i]
@ -131,7 +132,7 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
} }
// If Overhead is being utilized, add to the total requests for the pod // If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { if pod.Spec.Overhead != nil && enablePodOverhead {
if quantity, found := pod.Spec.Overhead[resource]; found { if quantity, found := pod.Spec.Overhead[resource]; found {
podRequest += quantity.Value() podRequest += quantity.Value()
} }

View File

@ -18,7 +18,7 @@ package plugins
import ( import (
apiruntime "k8s.io/apimachinery/pkg/runtime" apiruntime "k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
@ -50,33 +50,45 @@ import (
// through the WithFrameworkOutOfTreeRegistry option. // through the WithFrameworkOutOfTreeRegistry option.
func NewInTreeRegistry() runtime.Registry { func NewInTreeRegistry() runtime.Registry {
fts := plfeature.Features{ fts := plfeature.Features{
EnablePodAffinityNamespaceSelector: utilfeature.DefaultFeatureGate.Enabled(features.PodAffinityNamespaceSelector), EnablePodAffinityNamespaceSelector: feature.DefaultFeatureGate.Enabled(features.PodAffinityNamespaceSelector),
EnablePodDisruptionBudget: utilfeature.DefaultFeatureGate.Enabled(features.PodDisruptionBudget), EnablePodDisruptionBudget: feature.DefaultFeatureGate.Enabled(features.PodDisruptionBudget),
EnablePodOverhead: feature.DefaultFeatureGate.Enabled(features.PodOverhead),
EnableBalanceAttachedNodeVolumes: feature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes),
} }
return runtime.Registry{ return runtime.Registry{
selectorspread.Name: selectorspread.New, selectorspread.Name: selectorspread.New,
imagelocality.Name: imagelocality.New, imagelocality.Name: imagelocality.New,
tainttoleration.Name: tainttoleration.New, tainttoleration.Name: tainttoleration.New,
nodename.Name: nodename.New, nodename.Name: nodename.New,
nodeports.Name: nodeports.New, nodeports.Name: nodeports.New,
nodepreferavoidpods.Name: nodepreferavoidpods.New, nodepreferavoidpods.Name: nodepreferavoidpods.New,
nodeaffinity.Name: nodeaffinity.New, nodeaffinity.Name: nodeaffinity.New,
podtopologyspread.Name: podtopologyspread.New, podtopologyspread.Name: podtopologyspread.New,
nodeunschedulable.Name: nodeunschedulable.New, nodeunschedulable.Name: nodeunschedulable.New,
noderesources.FitName: noderesources.NewFit, noderesources.FitName: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
noderesources.BalancedAllocationName: noderesources.NewBalancedAllocation, return noderesources.NewFit(plArgs, fh, fts)
noderesources.MostAllocatedName: noderesources.NewMostAllocated, },
noderesources.LeastAllocatedName: noderesources.NewLeastAllocated, noderesources.BalancedAllocationName: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
noderesources.RequestedToCapacityRatioName: noderesources.NewRequestedToCapacityRatio, return noderesources.NewBalancedAllocation(plArgs, fh, fts)
volumebinding.Name: volumebinding.New, },
volumerestrictions.Name: volumerestrictions.New, noderesources.MostAllocatedName: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
volumezone.Name: volumezone.New, return noderesources.NewMostAllocated(plArgs, fh, fts)
nodevolumelimits.CSIName: nodevolumelimits.NewCSI, },
nodevolumelimits.EBSName: nodevolumelimits.NewEBS, noderesources.LeastAllocatedName: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
nodevolumelimits.GCEPDName: nodevolumelimits.NewGCEPD, return noderesources.NewLeastAllocated(plArgs, fh, fts)
nodevolumelimits.AzureDiskName: nodevolumelimits.NewAzureDisk, },
nodevolumelimits.CinderName: nodevolumelimits.NewCinder, noderesources.RequestedToCapacityRatioName: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
return noderesources.NewRequestedToCapacityRatio(plArgs, fh, fts)
},
volumebinding.Name: volumebinding.New,
volumerestrictions.Name: volumerestrictions.New,
volumezone.Name: volumezone.New,
nodevolumelimits.CSIName: nodevolumelimits.NewCSI,
nodevolumelimits.EBSName: nodevolumelimits.NewEBS,
nodevolumelimits.GCEPDName: nodevolumelimits.NewGCEPD,
nodevolumelimits.AzureDiskName: nodevolumelimits.NewAzureDisk,
nodevolumelimits.CinderName: nodevolumelimits.NewCinder,
interpodaffinity.Name: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) { interpodaffinity.Name: func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
return interpodaffinity.New(plArgs, fh, fts) return interpodaffinity.New(plArgs, fh, fts)
}, },

View File

@ -37,6 +37,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -51,6 +52,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/core" "k8s.io/kubernetes/pkg/scheduler/core"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
@ -770,7 +772,9 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
fns := []st.RegisterPluginFunc{ fns := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New), st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New), st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
st.RegisterPluginAsExtensions(noderesources.FitName, noderesources.NewFit, "Filter", "PreFilter"), st.RegisterPluginAsExtensions(noderesources.FitName, func(plArgs apiruntime.Object, fh framework.Handle) (framework.Plugin, error) {
return noderesources.NewFit(plArgs, fh, feature.Features{})
}, "Filter", "PreFilter"),
} }
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, nil, fns...) scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, nil, fns...)