mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Merge pull request #106747 from ahg-g/ahg-test
Added an integration test for NodeResourcesFit scoring
This commit is contained in:
commit
d7f8234b6d
@ -120,7 +120,7 @@ func (c *Configurator) create() (*Scheduler, error) {
|
||||
prof := &c.profiles[i]
|
||||
var found = false
|
||||
for k := range prof.PluginConfig {
|
||||
if prof.PluginConfig[k].Name == noderesources.FitName {
|
||||
if prof.PluginConfig[k].Name == noderesources.Name {
|
||||
// Update the existing args
|
||||
pc := &prof.PluginConfig[k]
|
||||
args, ok := pc.Args.(*schedulerapi.NodeResourcesFitArgs)
|
||||
|
@ -341,7 +341,7 @@ func TestPostFilter(t *testing.T) {
|
||||
// Register NodeResourceFit as the Filter & PreFilter plugin.
|
||||
registeredPlugins := []st.RegisterPluginFunc{
|
||||
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions("test-plugin", newTestPlugin, "PreFilter"),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
}
|
||||
@ -464,7 +464,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "a pod that fits on both nodes when lower priority pods are preempted",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -495,7 +495,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "a pod that would fit on the nodes, but other pods running are higher priority, no preemption would happen",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -511,7 +511,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "medium priority pod is preempted, but lower priority one stays as it is small",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -543,7 +543,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "mixed priority pods are preempted",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -574,7 +574,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "mixed priority pods are preempted, pick later StartTime one when priorities are equal",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -605,7 +605,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "pod with anti-affinity is preempted",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(interpodaffinity.Name, frameworkruntime.FactoryAdapter(feature.Features{}, interpodaffinity.New), "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
@ -675,7 +675,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "get Unschedulable in the preemption phase when the filter plugins filtering the nodes",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -692,7 +692,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "preemption with violation of same pdb",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -727,7 +727,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "preemption with violation of the pdb with pod whose eviction was processed, the victim doesn't belong to DisruptedPods",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -762,7 +762,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "preemption with violation of the pdb with pod whose eviction was processed, the victim belongs to DisruptedPods",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -797,7 +797,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
{
|
||||
name: "preemption with violation of the pdb with pod whose eviction was processed, the victim which belongs to DisruptedPods is treated as 'nonViolating'",
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -835,7 +835,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
name: "all nodes are possible candidates, but DefaultPreemptionArgs limits to 2",
|
||||
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1},
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -872,7 +872,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
name: "some nodes are not possible candidates, DefaultPreemptionArgs limits to 2",
|
||||
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1},
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -909,7 +909,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
name: "preemption offset across multiple scheduling cycles and wrap around",
|
||||
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 1},
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -978,7 +978,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
name: "preemption looks past numCandidates until a non-PDB violating node is found",
|
||||
args: &config.DefaultPreemptionArgs{MinCandidateNodesPercentage: 40, MinCandidateNodesAbsolute: 2},
|
||||
registerPlugins: []st.RegisterPluginFunc{
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3", "node4", "node5"},
|
||||
testPods: []*v1.Pod{
|
||||
@ -1174,7 +1174,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "a pod that fits on both nodes when lower priority pods are preempted",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(largeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1185,7 +1185,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "node with min highest priority pod is picked",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1200,7 +1200,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "when highest priorities are the same, minimum sum of priorities is picked",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1215,7 +1215,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "when highest priority and sum are the same, minimum number of pods is picked",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1235,7 +1235,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
// pickOneNodeForPreemption adjusts pod priorities when finding the sum of the victims. This
|
||||
// test ensures that the logic works correctly.
|
||||
name: "sum of adjusted priorities is considered",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1252,7 +1252,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "non-overlapping lowest high priority, sum priorities, and number of pods",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3", "node4"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(veryHighPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1273,7 +1273,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "same priority, same number of victims, different start time for each node's pod",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1288,7 +1288,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "same priority, same number of victims, different start time for all pods",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1303,7 +1303,7 @@ func TestSelectBestCandidate(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "different priority, same number of victims, different start time for all pods",
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
pod: st.MakePod().Name("p").UID("p").Priority(highPriority).Req(veryLargeRes).Obj(),
|
||||
pods: []*v1.Pod{
|
||||
@ -1478,7 +1478,7 @@ func TestPreempt(t *testing.T) {
|
||||
st.MakePod().Name("p3.1").UID("p3.1").Node("node3").Priority(midPriority).Req(mediumRes).Obj(),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
expectedNode: "node1",
|
||||
expectedPods: []string{"p1.1", "p1.2"},
|
||||
},
|
||||
@ -1513,7 +1513,7 @@ func TestPreempt(t *testing.T) {
|
||||
{Predicates: []st.FitPredicate{st.TruePredicateExtender}},
|
||||
{Predicates: []st.FitPredicate{st.Node1PredicateExtender}},
|
||||
},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
expectedNode: "node1",
|
||||
expectedPods: []string{"p1.1", "p1.2"},
|
||||
},
|
||||
@ -1529,7 +1529,7 @@ func TestPreempt(t *testing.T) {
|
||||
extenders: []*st.FakeExtender{
|
||||
{Predicates: []st.FitPredicate{st.FalsePredicateExtender}},
|
||||
},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
expectedNode: "",
|
||||
expectedPods: []string{},
|
||||
},
|
||||
@ -1546,7 +1546,7 @@ func TestPreempt(t *testing.T) {
|
||||
{Predicates: []st.FitPredicate{st.ErrorPredicateExtender}, Ignorable: true},
|
||||
{Predicates: []st.FitPredicate{st.Node1PredicateExtender}},
|
||||
},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
expectedNode: "node1",
|
||||
expectedPods: []string{"p1.1", "p1.2"},
|
||||
},
|
||||
@ -1563,7 +1563,7 @@ func TestPreempt(t *testing.T) {
|
||||
{Predicates: []st.FitPredicate{st.Node1PredicateExtender}, UnInterested: true},
|
||||
{Predicates: []st.FitPredicate{st.TruePredicateExtender}},
|
||||
},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
//sum of priorities of all victims on node1 is larger than node2, node2 is chosen.
|
||||
expectedNode: "node2",
|
||||
expectedPods: []string{"p2.1"},
|
||||
@ -1578,7 +1578,7 @@ func TestPreempt(t *testing.T) {
|
||||
st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
expectedNode: "",
|
||||
expectedPods: nil,
|
||||
},
|
||||
@ -1592,7 +1592,7 @@ func TestPreempt(t *testing.T) {
|
||||
st.MakePod().Name("p3.1").UID("p3.1").Namespace(v1.NamespaceDefault).Node("node3").Priority(midPriority).Req(mediumRes).Obj(),
|
||||
},
|
||||
nodeNames: []string{"node1", "node2", "node3"},
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.FitName, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
registerPlugin: st.RegisterPluginAsExtensions(noderesources.Name, nodeResourcesFitFunc, "Filter", "PreFilter"),
|
||||
expectedNode: "node1",
|
||||
expectedPods: []string{"p1.1", "p1.2"},
|
||||
},
|
||||
|
@ -38,12 +38,12 @@ var _ framework.EnqueueExtensions = &Fit{}
|
||||
var _ framework.ScorePlugin = &Fit{}
|
||||
|
||||
const (
|
||||
// FitName is the name of the plugin used in the plugin registry and configurations.
|
||||
FitName = names.NodeResourcesFit
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = names.NodeResourcesFit
|
||||
|
||||
// preFilterStateKey is the key in CycleState to NodeResourcesFit pre-computed data.
|
||||
// Using the name of the plugin will likely help us avoid collisions with other plugins.
|
||||
preFilterStateKey = "PreFilter" + FitName
|
||||
preFilterStateKey = "PreFilter" + Name
|
||||
)
|
||||
|
||||
// nodeResourceStrategyTypeMap maps strategy to scorer implementation
|
||||
@ -100,7 +100,7 @@ func (s *preFilterState) Clone() framework.StateData {
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (f *Fit) Name() string {
|
||||
return FitName
|
||||
return Name
|
||||
}
|
||||
|
||||
// NewFit initializes a new plugin and returns it.
|
||||
|
@ -62,7 +62,7 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
nodeaffinity.Name: nodeaffinity.New,
|
||||
podtopologyspread.Name: podtopologyspread.New,
|
||||
nodeunschedulable.Name: nodeunschedulable.New,
|
||||
noderesources.FitName: runtime.FactoryAdapter(fts, noderesources.NewFit),
|
||||
noderesources.Name: runtime.FactoryAdapter(fts, noderesources.NewFit),
|
||||
noderesources.BalancedAllocationName: runtime.FactoryAdapter(fts, noderesources.NewBalancedAllocation),
|
||||
volumebinding.Name: runtime.FactoryAdapter(fts, volumebinding.New),
|
||||
volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New),
|
||||
|
@ -1312,7 +1312,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
fts := feature.Features{}
|
||||
pluginRegistrations := []st.RegisterPluginFunc{
|
||||
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
|
||||
st.RegisterScorePlugin(noderesources.FitName, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
|
||||
st.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
|
||||
st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1),
|
||||
st.RegisterScorePlugin(selectorspread.Name, selectorspread.New, 1),
|
||||
st.RegisterPreScorePlugin(selectorspread.Name, selectorspread.New),
|
||||
|
@ -858,12 +858,12 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
framework.Unschedulable,
|
||||
fmt.Sprintf("Insufficient %v", v1.ResourceCPU),
|
||||
fmt.Sprintf("Insufficient %v", v1.ResourceMemory),
|
||||
).WithFailedPlugin(noderesources.FitName)
|
||||
).WithFailedPlugin(noderesources.Name)
|
||||
}
|
||||
fns := []st.RegisterPluginFunc{
|
||||
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
|
||||
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
st.RegisterPluginAsExtensions(noderesources.FitName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
|
||||
st.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
|
||||
}
|
||||
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, nil, fns...)
|
||||
|
||||
@ -879,7 +879,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
NumAllNodes: len(nodes),
|
||||
Diagnosis: framework.Diagnosis{
|
||||
NodeToStatusMap: failedNodeStatues,
|
||||
UnschedulablePlugins: sets.NewString(noderesources.FitName),
|
||||
UnschedulablePlugins: sets.NewString(noderesources.Name),
|
||||
},
|
||||
}
|
||||
if len(fmt.Sprint(expectErr)) > 150 {
|
||||
|
@ -431,6 +431,7 @@ func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: res,
|
||||
Limits: res,
|
||||
},
|
||||
})
|
||||
return p
|
||||
|
@ -1965,10 +1965,10 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
// would fail first and exit the Filter phase.
|
||||
Enabled: []v1beta3.Plugin{
|
||||
{Name: filterPluginName},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: noderesources.Name},
|
||||
},
|
||||
Disabled: []v1beta3.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: noderesources.Name},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -22,9 +22,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -36,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
@ -43,6 +45,10 @@ import (
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
const (
|
||||
resourceGPU = "example.com/gpu"
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *testutils.TestContext {
|
||||
cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{
|
||||
@ -70,9 +76,108 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes
|
||||
return testCtx
|
||||
}
|
||||
|
||||
// TestNodeAffinity verifies that scheduler's node affinity priority function
|
||||
func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext {
|
||||
cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{
|
||||
Profiles: []v1beta3.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: pointer.StringPtr(v1.DefaultSchedulerName),
|
||||
},
|
||||
{
|
||||
SchedulerName: pointer.StringPtr("gpu-binpacking-scheduler"),
|
||||
PluginConfig: []v1beta3.PluginConfig{
|
||||
{
|
||||
Name: noderesources.Name,
|
||||
Args: runtime.RawExtension{Object: &v1beta3.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &v1beta3.ScoringStrategy{
|
||||
Type: v1beta3.MostAllocated,
|
||||
Resources: []v1beta3.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
{Name: resourceGPU, Weight: 2}},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
testCtx := testutils.InitTestSchedulerWithOptions(
|
||||
t,
|
||||
testutils.InitTestAPIServer(t, strings.ToLower(noderesources.Name), nil),
|
||||
scheduler.WithProfiles(cfg.Profiles...),
|
||||
)
|
||||
testutils.SyncInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
return testCtx
|
||||
}
|
||||
|
||||
// TestNodeResourcesScoring verifies that scheduler's node resources priority function
|
||||
// works correctly.
|
||||
func TestNodeResourcesScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "8",
|
||||
v1.ResourceMemory: "16G",
|
||||
resourceGPU: "4",
|
||||
}), 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Req(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
},
|
||||
).Obj())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Req(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "1",
|
||||
v1.ResourceMemory: "2G",
|
||||
resourceGPU: "2",
|
||||
},
|
||||
).Obj())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cpuBoundPod1.Spec.NodeName == "" || gpuBoundPod1.Spec.NodeName == "" {
|
||||
t.Fatalf("pods should have nodeName assigned, got %q and %q",
|
||||
cpuBoundPod1.Spec.NodeName, gpuBoundPod1.Spec.NodeName)
|
||||
}
|
||||
|
||||
// Since both pods used the default scheduler, then they should land on two different
|
||||
// nodes because the default configuration uses LeastAllocated.
|
||||
if cpuBoundPod1.Spec.NodeName == gpuBoundPod1.Spec.NodeName {
|
||||
t.Fatalf("pods should have landed on different nodes, both scheduled on %q",
|
||||
cpuBoundPod1.Spec.NodeName)
|
||||
}
|
||||
|
||||
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
||||
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Req(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
resourceGPU: "1",
|
||||
},
|
||||
).Obj())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if cpuBoundPod2.Spec.NodeName != gpuBoundPod1.Spec.NodeName {
|
||||
t.Errorf("pods should have landed on the same node")
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeAffinityScoring verifies that scheduler's node affinity priority function
|
||||
// works correctly.s
|
||||
func TestNodeAffinity(t *testing.T) {
|
||||
func TestNodeAffinityScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
@ -122,9 +227,9 @@ func TestNodeAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPodAffinity verifies that scheduler's pod affinity priority function
|
||||
// TestPodAffinityScoring verifies that scheduler's pod affinity priority function
|
||||
// works correctly.
|
||||
func TestPodAffinity(t *testing.T) {
|
||||
func TestPodAffinityScoring(t *testing.T) {
|
||||
labelKey := "service"
|
||||
labelValue := "S1"
|
||||
topologyKey := "node-topologykey"
|
||||
@ -236,9 +341,9 @@ func TestPodAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestImageLocality verifies that the scheduler's image locality priority function
|
||||
// TestImageLocalityScoring verifies that the scheduler's image locality priority function
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocality(t *testing.T) {
|
||||
func TestImageLocalityScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
@ -295,8 +400,8 @@ func makeContainersWithImages(images []string) []v1.Container {
|
||||
return containers
|
||||
}
|
||||
|
||||
// TestPodTopologySpreadScore verifies that the PodTopologySpread Score plugin works.
|
||||
func TestPodTopologySpreadScore(t *testing.T) {
|
||||
// TestPodTopologySpreadScoring verifies that the PodTopologySpread Score plugin works.
|
||||
func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
@ -403,10 +508,10 @@ func TestPodTopologySpreadScore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestDefaultPodTopologySpreadScore verifies that the PodTopologySpread Score plugin
|
||||
// TestDefaultPodTopologySpreadScoring verifies that the PodTopologySpread Score plugin
|
||||
// with the system default spreading spreads Pods belonging to a Service.
|
||||
// The setup has 300 nodes over 3 zones.
|
||||
func TestDefaultPodTopologySpreadScore(t *testing.T) {
|
||||
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||
t.Cleanup(func() {
|
||||
testutils.CleanupTest(t, testCtx)
|
||||
|
Loading…
Reference in New Issue
Block a user