Sched framework: expose NodeInfo in all functions of PluginsRunner interface

This commit is contained in:
AxeZhan 2023-11-18 18:21:58 +08:00
parent ae185414f4
commit be48c93689
31 changed files with 185 additions and 148 deletions

View File

@ -246,18 +246,18 @@ func convertToMetaVictims(
// unresolvable. // unresolvable.
func (h *HTTPExtender) Filter( func (h *HTTPExtender) Filter(
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodes []*framework.NodeInfo,
) (filteredList []*v1.Node, failedNodes, failedAndUnresolvableNodes extenderv1.FailedNodesMap, err error) { ) (filteredList []*framework.NodeInfo, failedNodes, failedAndUnresolvableNodes extenderv1.FailedNodesMap, err error) {
var ( var (
result extenderv1.ExtenderFilterResult result extenderv1.ExtenderFilterResult
nodeList *v1.NodeList nodeList *v1.NodeList
nodeNames *[]string nodeNames *[]string
nodeResult []*v1.Node nodeResult []*framework.NodeInfo
args *extenderv1.ExtenderArgs args *extenderv1.ExtenderArgs
) )
fromNodeName := make(map[string]*v1.Node) fromNodeName := make(map[string]*framework.NodeInfo)
for _, n := range nodes { for _, n := range nodes {
fromNodeName[n.Name] = n fromNodeName[n.Node().Name] = n
} }
if h.filterVerb == "" { if h.filterVerb == "" {
@ -267,13 +267,13 @@ func (h *HTTPExtender) Filter(
if h.nodeCacheCapable { if h.nodeCacheCapable {
nodeNameSlice := make([]string, 0, len(nodes)) nodeNameSlice := make([]string, 0, len(nodes))
for _, node := range nodes { for _, node := range nodes {
nodeNameSlice = append(nodeNameSlice, node.Name) nodeNameSlice = append(nodeNameSlice, node.Node().Name)
} }
nodeNames = &nodeNameSlice nodeNames = &nodeNameSlice
} else { } else {
nodeList = &v1.NodeList{} nodeList = &v1.NodeList{}
for _, node := range nodes { for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node) nodeList.Items = append(nodeList.Items, *node.Node())
} }
} }
@ -291,7 +291,7 @@ func (h *HTTPExtender) Filter(
} }
if h.nodeCacheCapable && result.NodeNames != nil { if h.nodeCacheCapable && result.NodeNames != nil {
nodeResult = make([]*v1.Node, len(*result.NodeNames)) nodeResult = make([]*framework.NodeInfo, len(*result.NodeNames))
for i, nodeName := range *result.NodeNames { for i, nodeName := range *result.NodeNames {
if n, ok := fromNodeName[nodeName]; ok { if n, ok := fromNodeName[nodeName]; ok {
nodeResult[i] = n nodeResult[i] = n
@ -302,9 +302,10 @@ func (h *HTTPExtender) Filter(
} }
} }
} else if result.Nodes != nil { } else if result.Nodes != nil {
nodeResult = make([]*v1.Node, len(result.Nodes.Items)) nodeResult = make([]*framework.NodeInfo, len(result.Nodes.Items))
for i := range result.Nodes.Items { for i := range result.Nodes.Items {
nodeResult[i] = &result.Nodes.Items[i] nodeResult[i] = framework.NewNodeInfo()
nodeResult[i].SetNode(&result.Nodes.Items[i])
} }
} }
@ -314,7 +315,7 @@ func (h *HTTPExtender) Filter(
// Prioritize based on extender implemented priority functions. Weight*priority is added // Prioritize based on extender implemented priority functions. Weight*priority is added
// up for each such priority function. The returned score is added to the score computed // up for each such priority function. The returned score is added to the score computed
// by Kubernetes scheduler. The total score is used to do the host selection. // by Kubernetes scheduler. The total score is used to do the host selection.
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.HostPriorityList, int64, error) { func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*framework.NodeInfo) (*extenderv1.HostPriorityList, int64, error) {
var ( var (
result extenderv1.HostPriorityList result extenderv1.HostPriorityList
nodeList *v1.NodeList nodeList *v1.NodeList
@ -325,7 +326,7 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.Ho
if h.prioritizeVerb == "" { if h.prioritizeVerb == "" {
result := extenderv1.HostPriorityList{} result := extenderv1.HostPriorityList{}
for _, node := range nodes { for _, node := range nodes {
result = append(result, extenderv1.HostPriority{Host: node.Name, Score: 0}) result = append(result, extenderv1.HostPriority{Host: node.Node().Name, Score: 0})
} }
return &result, 0, nil return &result, 0, nil
} }
@ -333,13 +334,13 @@ func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.Ho
if h.nodeCacheCapable { if h.nodeCacheCapable {
nodeNameSlice := make([]string, 0, len(nodes)) nodeNameSlice := make([]string, 0, len(nodes))
for _, node := range nodes { for _, node := range nodes {
nodeNameSlice = append(nodeNameSlice, node.Name) nodeNameSlice = append(nodeNameSlice, node.Node().Name)
} }
nodeNames = &nodeNameSlice nodeNames = &nodeNameSlice
} else { } else {
nodeList = &v1.NodeList{} nodeList = &v1.NodeList{}
for _, node := range nodes { for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node) nodeList.Items = append(nodeList.Items, *node.Node())
} }
} }

View File

@ -33,12 +33,12 @@ type Extender interface {
// The failedNodes and failedAndUnresolvableNodes optionally contains the list // The failedNodes and failedAndUnresolvableNodes optionally contains the list
// of failed nodes and failure reasons, except nodes in the latter are // of failed nodes and failure reasons, except nodes in the latter are
// unresolvable. // unresolvable.
Filter(pod *v1.Pod, nodes []*v1.Node) (filteredNodes []*v1.Node, failedNodesMap extenderv1.FailedNodesMap, failedAndUnresolvable extenderv1.FailedNodesMap, err error) Filter(pod *v1.Pod, nodes []*NodeInfo) (filteredNodes []*NodeInfo, failedNodesMap extenderv1.FailedNodesMap, failedAndUnresolvable extenderv1.FailedNodesMap, err error)
// Prioritize based on extender-implemented priority functions. The returned scores & weight // Prioritize based on extender-implemented priority functions. The returned scores & weight
// are used to compute the weighted score for an extender. The weighted scores are added to // are used to compute the weighted score for an extender. The weighted scores are added to
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) Prioritize(pod *v1.Pod, nodes []*NodeInfo) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error)
// Bind delegates the action of binding a pod to a node to the extender. // Bind delegates the action of binding a pod to a node to the extender.
Bind(binding *v1.Binding) error Bind(binding *v1.Binding) error

View File

@ -462,7 +462,7 @@ type PreScorePlugin interface {
// the pod will be rejected // the pod will be rejected
// When it returns Skip status, other fields in status are just ignored, // When it returns Skip status, other fields in status are just ignored,
// and coupled Score plugin will be skipped in this scheduling cycle. // and coupled Score plugin will be skipped in this scheduling cycle.
PreScore(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*v1.Node) *Status PreScore(ctx context.Context, state *CycleState, pod *v1.Pod, nodes []*NodeInfo) *Status
} }
// ScoreExtensions is an interface for Score extended functionality. // ScoreExtensions is an interface for Score extended functionality.
@ -776,12 +776,12 @@ type PodNominator interface {
type PluginsRunner interface { type PluginsRunner interface {
// RunPreScorePlugins runs the set of configured PreScore plugins. If any // RunPreScorePlugins runs the set of configured PreScore plugins. If any
// of these plugins returns any status other than "Success", the given pod is rejected. // of these plugins returns any status other than "Success", the given pod is rejected.
RunPreScorePlugins(context.Context, *CycleState, *v1.Pod, []*v1.Node) *Status RunPreScorePlugins(context.Context, *CycleState, *v1.Pod, []*NodeInfo) *Status
// RunScorePlugins runs the set of configured scoring plugins. // RunScorePlugins runs the set of configured scoring plugins.
// It returns a list that stores scores from each plugin and total score for each Node. // It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns // It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status. // a non-success status.
RunScorePlugins(context.Context, *CycleState, *v1.Pod, []*v1.Node) ([]NodePluginScores, *Status) RunScorePlugins(context.Context, *CycleState, *v1.Pod, []*NodeInfo) ([]NodePluginScores, *Status)
// RunFilterPlugins runs the set of configured Filter plugins for pod on // RunFilterPlugins runs the set of configured Filter plugins for pod on
// the given node. Note that for the node being evaluated, the passed nodeInfo // the given node. Note that for the node being evaluated, the passed nodeInfo
// reference could be different from the one in NodeInfoSnapshot map (e.g., pods // reference could be different from the one in NodeInfoSnapshot map (e.g., pods

View File

@ -815,7 +815,7 @@ func (pl *dynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS
// PreScore is passed a list of all nodes that would fit the pod. Not all // PreScore is passed a list of all nodes that would fit the pod. Not all
// claims are necessarily allocated yet, so here we can set the SuitableNodes // claims are necessarily allocated yet, so here we can set the SuitableNodes
// field for those which are pending. // field for those which are pending.
func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if !pl.enabled { if !pl.enabled {
return nil return nil
} }
@ -841,6 +841,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
logger.V(5).Info("no pending claims", "pod", klog.KObj(pod)) logger.V(5).Info("no pending claims", "pod", klog.KObj(pod))
return nil return nil
} }
if haveAllPotentialNodes(state.podSchedulingState.schedulingCtx, nodes) { if haveAllPotentialNodes(state.podSchedulingState.schedulingCtx, nodes) {
logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes)) logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes))
return nil return nil
@ -859,7 +860,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
if numNodes == len(nodes) { if numNodes == len(nodes) {
// Copy all node names. // Copy all node names.
for _, node := range nodes { for _, node := range nodes {
potentialNodes = append(potentialNodes, node.Name) potentialNodes = append(potentialNodes, node.Node().Name)
} }
} else { } else {
// Select a random subset of the nodes to comply with // Select a random subset of the nodes to comply with
@ -868,7 +869,7 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
// randomly. // randomly.
nodeNames := map[string]struct{}{} nodeNames := map[string]struct{}{}
for _, node := range nodes { for _, node := range nodes {
nodeNames[node.Name] = struct{}{} nodeNames[node.Node().Name] = struct{}{}
} }
for nodeName := range nodeNames { for nodeName := range nodeNames {
if len(potentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize { if len(potentialNodes) >= resourcev1alpha2.PodSchedulingNodeListMaxSize {
@ -882,12 +883,12 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
return nil return nil
} }
func haveAllPotentialNodes(schedulingCtx *resourcev1alpha2.PodSchedulingContext, nodes []*v1.Node) bool { func haveAllPotentialNodes(schedulingCtx *resourcev1alpha2.PodSchedulingContext, nodes []*framework.NodeInfo) bool {
if schedulingCtx == nil { if schedulingCtx == nil {
return false return false
} }
for _, node := range nodes { for _, node := range nodes {
if !haveNode(schedulingCtx.Spec.PotentialNodes, node.Name) { if !haveNode(schedulingCtx.Spec.PotentialNodes, node.Node().Name) {
return false return false
} }
} }

View File

@ -550,7 +550,7 @@ func TestPlugin(t *testing.T) {
} }
unschedulable := status.Code() != framework.Success unschedulable := status.Code() != framework.Success
var potentialNodes []*v1.Node var potentialNodes []*framework.NodeInfo
initialObjects = testCtx.listAll(t) initialObjects = testCtx.listAll(t)
testCtx.updateAPIServer(t, initialObjects, tc.prepare.filter) testCtx.updateAPIServer(t, initialObjects, tc.prepare.filter)
@ -565,7 +565,7 @@ func TestPlugin(t *testing.T) {
if status.Code() != framework.Success { if status.Code() != framework.Success {
unschedulable = true unschedulable = true
} else { } else {
potentialNodes = append(potentialNodes, nodeInfo.Node()) potentialNodes = append(potentialNodes, nodeInfo)
} }
} }
} }
@ -582,13 +582,13 @@ func TestPlugin(t *testing.T) {
} }
} }
var selectedNode *v1.Node var selectedNode *framework.NodeInfo
if !unschedulable && len(potentialNodes) > 0 { if !unschedulable && len(potentialNodes) > 0 {
selectedNode = potentialNodes[0] selectedNode = potentialNodes[0]
initialObjects = testCtx.listAll(t) initialObjects = testCtx.listAll(t)
initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.reserve) initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.reserve)
status := testCtx.p.Reserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Name) status := testCtx.p.Reserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
t.Run("reserve", func(t *testing.T) { t.Run("reserve", func(t *testing.T) {
testCtx.verify(t, tc.want.reserve, initialObjects, nil, status) testCtx.verify(t, tc.want.reserve, initialObjects, nil, status)
}) })
@ -601,14 +601,14 @@ func TestPlugin(t *testing.T) {
if unschedulable { if unschedulable {
initialObjects = testCtx.listAll(t) initialObjects = testCtx.listAll(t)
initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.unreserve) initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.unreserve)
testCtx.p.Unreserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Name) testCtx.p.Unreserve(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
t.Run("unreserve", func(t *testing.T) { t.Run("unreserve", func(t *testing.T) {
testCtx.verify(t, tc.want.unreserve, initialObjects, nil, status) testCtx.verify(t, tc.want.unreserve, initialObjects, nil, status)
}) })
} else { } else {
initialObjects = testCtx.listAll(t) initialObjects = testCtx.listAll(t)
initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.postbind) initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.postbind)
testCtx.p.PostBind(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Name) testCtx.p.PostBind(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name)
t.Run("postbind", func(t *testing.T) { t.Run("postbind", func(t *testing.T) {
testCtx.verify(t, tc.want.postbind, initialObjects, nil, status) testCtx.verify(t, tc.want.postbind, initialObjects, nil, status)
}) })

View File

@ -128,7 +128,7 @@ func (pl *InterPodAffinity) PreScore(
pCtx context.Context, pCtx context.Context,
cycleState *framework.CycleState, cycleState *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodes []*framework.NodeInfo,
) *framework.Status { ) *framework.Status {
if len(nodes) == 0 { if len(nodes) == 0 {
// No nodes to score. // No nodes to score.

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
plugintesting "k8s.io/kubernetes/pkg/scheduler/framework/plugins/testing" plugintesting "k8s.io/kubernetes/pkg/scheduler/framework/plugins/testing"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
var nsLabelT1 = map[string]string{"team": "team1"} var nsLabelT1 = map[string]string{"team": "team1"}
@ -783,7 +784,7 @@ func TestPreferredAffinity(t *testing.T) {
defer cancel() defer cancel()
state := framework.NewCycleState() state := framework.NewCycleState()
p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{HardPodAffinityWeight: 1, IgnorePreferredTermsOfExistingPods: test.ignorePreferredTermsOfExistingPods}, cache.NewSnapshot(test.pods, test.nodes), namespaces) p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{HardPodAffinityWeight: 1, IgnorePreferredTermsOfExistingPods: test.ignorePreferredTermsOfExistingPods}, cache.NewSnapshot(test.pods, test.nodes), namespaces)
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
if status.Code() != test.wantStatus.Code() { if status.Code() != test.wantStatus.Code() {
@ -951,7 +952,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
defer cancel() defer cancel()
state := framework.NewCycleState() state := framework.NewCycleState()
p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{HardPodAffinityWeight: test.hardPodAffinityWeight}, cache.NewSnapshot(test.pods, test.nodes), namespaces) p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{HardPodAffinityWeight: test.hardPodAffinityWeight}, cache.NewSnapshot(test.pods, test.nodes), namespaces)
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !test.wantStatus.Equal(status) { if !test.wantStatus.Equal(status) {
t.Errorf("InterPodAffinity#PreScore() returned unexpected status.Code got: %v, want: %v", status.Code(), test.wantStatus.Code()) t.Errorf("InterPodAffinity#PreScore() returned unexpected status.Code got: %v, want: %v", status.Code(), test.wantStatus.Code())
} }

View File

@ -182,7 +182,7 @@ func (s *preScoreState) Clone() framework.StateData {
} }
// PreScore builds and writes cycle state used by Score and NormalizeScore. // PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *NodeAffinity) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if len(nodes) == 0 { if len(nodes) == 0 {
return nil return nil
} }

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented. // TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
@ -1148,7 +1149,7 @@ func TestNodeAffinityPriority(t *testing.T) {
} }
var status *framework.Status var status *framework.Status
if test.runPreScore { if test.runPreScore {
status = p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) status = p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status) t.Errorf("unexpected error: %v", status)
} }

View File

@ -62,7 +62,7 @@ func (s *balancedAllocationPreScoreState) Clone() framework.StateData {
} }
// PreScore calculates incoming pod's resource requests and writes them to the cycle state used. // PreScore calculates incoming pod's resource requests and writes them to the cycle state used.
func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
state := &balancedAllocationPreScoreState{ state := &balancedAllocationPreScoreState{
podRequests: ba.calculatePodResourceRequestList(pod, ba.resources), podRequests: ba.calculatePodResourceRequestList(pod, ba.resources),
} }

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
func TestNodeResourcesBalancedAllocation(t *testing.T) { func TestNodeResourcesBalancedAllocation(t *testing.T) {
@ -393,7 +394,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
for i := range test.nodes { for i := range test.nodes {
if test.runPreScore { if test.runPreScore {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }

View File

@ -118,7 +118,7 @@ func (s *preScoreState) Clone() framework.StateData {
} }
// PreScore calculates incoming pod's resource requests and writes them to the cycle state used. // PreScore calculates incoming pod's resource requests and writes them to the cycle state used.
func (f *Fit) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (f *Fit) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
state := &preScoreState{ state := &preScoreState{
podRequests: f.calculatePodResourceRequestList(pod, f.resources), podRequests: f.calculatePodResourceRequestList(pod, f.resources),
} }

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
var ( var (
@ -950,7 +951,7 @@ func TestFitScore(t *testing.T) {
var gotPriorities framework.NodeScoreList var gotPriorities framework.NodeScoreList
for _, n := range test.nodes { for _, n := range test.nodes {
if test.runPreScore { if test.runPreScore {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
func TestLeastAllocatedScoringStrategy(t *testing.T) { func TestLeastAllocatedScoringStrategy(t *testing.T) {
@ -410,7 +411,7 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) {
return return
} }
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
func TestMostAllocatedScoringStrategy(t *testing.T) { func TestMostAllocatedScoringStrategy(t *testing.T) {
@ -366,7 +367,7 @@ func TestMostAllocatedScoringStrategy(t *testing.T) {
return return
} }
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }

View File

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) { func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
@ -130,7 +131,7 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
var gotScores framework.NodeScoreList var gotScores framework.NodeScoreList
for _, n := range test.nodes { for _, n := range test.nodes {
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }
@ -327,7 +328,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
var gotList framework.NodeScoreList var gotList framework.NodeScoreList
for _, n := range test.nodes { for _, n := range test.nodes {
status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }
@ -553,7 +554,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
} }

View File

@ -56,7 +56,7 @@ func (s *preScoreState) Clone() framework.StateData {
// 1) s.TopologyPairToPodCounts: keyed with both eligible topology pair and node names. // 1) s.TopologyPairToPodCounts: keyed with both eligible topology pair and node names.
// 2) s.IgnoredNodes: the set of nodes that shouldn't be scored. // 2) s.IgnoredNodes: the set of nodes that shouldn't be scored.
// 3) s.TopologyNormalizingWeight: The weight to be given to each constraint based on the number of values in a topology. // 3) s.TopologyNormalizingWeight: The weight to be given to each constraint based on the number of values in a topology.
func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, filteredNodes []*v1.Node, requireAllTopologies bool) error { func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, filteredNodes []*framework.NodeInfo, requireAllTopologies bool) error {
var err error var err error
if len(pod.Spec.TopologySpreadConstraints) > 0 { if len(pod.Spec.TopologySpreadConstraints) > 0 {
s.Constraints, err = pl.filterTopologySpreadConstraints( s.Constraints, err = pl.filterTopologySpreadConstraints(
@ -78,10 +78,10 @@ func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, fi
} }
topoSize := make([]int, len(s.Constraints)) topoSize := make([]int, len(s.Constraints))
for _, node := range filteredNodes { for _, node := range filteredNodes {
if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Labels, s.Constraints) { if requireAllTopologies && !nodeLabelsMatchSpreadConstraints(node.Node().Labels, s.Constraints) {
// Nodes which don't have all required topologyKeys present are ignored // Nodes which don't have all required topologyKeys present are ignored
// when scoring later. // when scoring later.
s.IgnoredNodes.Insert(node.Name) s.IgnoredNodes.Insert(node.Node().Name)
continue continue
} }
for i, constraint := range s.Constraints { for i, constraint := range s.Constraints {
@ -89,7 +89,7 @@ func (pl *PodTopologySpread) initPreScoreState(s *preScoreState, pod *v1.Pod, fi
if constraint.TopologyKey == v1.LabelHostname { if constraint.TopologyKey == v1.LabelHostname {
continue continue
} }
pair := topologyPair{key: constraint.TopologyKey, value: node.Labels[constraint.TopologyKey]} pair := topologyPair{key: constraint.TopologyKey, value: node.Node().Labels[constraint.TopologyKey]}
if s.TopologyPairToPodCounts[pair] == nil { if s.TopologyPairToPodCounts[pair] == nil {
s.TopologyPairToPodCounts[pair] = new(int64) s.TopologyPairToPodCounts[pair] = new(int64)
topoSize[i]++ topoSize[i]++
@ -113,7 +113,7 @@ func (pl *PodTopologySpread) PreScore(
ctx context.Context, ctx context.Context,
cycleState *framework.CycleState, cycleState *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
filteredNodes []*v1.Node, filteredNodes []*framework.NodeInfo,
) *framework.Status { ) *framework.Status {
allNodes, err := pl.sharedLister.NodeInfos().List() allNodes, err := pl.sharedLister.NodeInfos().List()
if err != nil { if err != nil {

View File

@ -37,6 +37,7 @@ import (
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
"k8s.io/utils/ptr" "k8s.io/utils/ptr"
) )
@ -103,7 +104,7 @@ func TestPreScoreSkip(t *testing.T) {
informerFactory.WaitForCacheSync(ctx.Done()) informerFactory.WaitForCacheSync(ctx.Done())
p := pl.(*PodTopologySpread) p := pl.(*PodTopologySpread)
cs := framework.NewCycleState() cs := framework.NewCycleState()
if s := p.PreScore(ctx, cs, tt.pod, tt.nodes); !s.IsSkip() { if s := p.PreScore(ctx, cs, tt.pod, tf.BuildNodeInfos(tt.nodes)); !s.IsSkip() {
t.Fatalf("Expected skip but got %v", s.AsError()) t.Fatalf("Expected skip but got %v", s.AsError())
} }
}) })
@ -590,7 +591,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
informerFactory.WaitForCacheSync(ctx.Done()) informerFactory.WaitForCacheSync(ctx.Done())
p := pl.(*PodTopologySpread) p := pl.(*PodTopologySpread)
cs := framework.NewCycleState() cs := framework.NewCycleState()
if s := p.PreScore(ctx, cs, tt.pod, tt.nodes); !s.IsSuccess() { if s := p.PreScore(ctx, cs, tt.pod, tf.BuildNodeInfos(tt.nodes)); !s.IsSuccess() {
t.Fatal(s.AsError()) t.Fatal(s.AsError())
} }
@ -1347,7 +1348,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
p.enableNodeInclusionPolicyInPodTopologySpread = tt.enableNodeInclusionPolicy p.enableNodeInclusionPolicyInPodTopologySpread = tt.enableNodeInclusionPolicy
p.enableMatchLabelKeysInPodTopologySpread = tt.enableMatchLabelKeys p.enableMatchLabelKeysInPodTopologySpread = tt.enableMatchLabelKeys
status := p.PreScore(ctx, state, tt.pod, tt.nodes) status := p.PreScore(ctx, state, tt.pod, tf.BuildNodeInfos(tt.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status) t.Errorf("unexpected error: %v", status)
} }
@ -1418,7 +1419,7 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) {
pl := plugintesting.SetupPlugin(ctx, b, podTopologySpreadFunc, &config.PodTopologySpreadArgs{DefaultingType: config.ListDefaulting}, cache.NewSnapshot(existingPods, allNodes)) pl := plugintesting.SetupPlugin(ctx, b, podTopologySpreadFunc, &config.PodTopologySpreadArgs{DefaultingType: config.ListDefaulting}, cache.NewSnapshot(existingPods, allNodes))
p := pl.(*PodTopologySpread) p := pl.(*PodTopologySpread)
status := p.PreScore(ctx, state, tt.pod, filteredNodes) status := p.PreScore(ctx, state, tt.pod, tf.BuildNodeInfos(filteredNodes))
if !status.IsSuccess() { if !status.IsSuccess() {
b.Fatalf("unexpected error: %v", status) b.Fatalf("unexpected error: %v", status)
} }

View File

@ -96,7 +96,7 @@ func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationLi
} }
// PreScore builds and writes cycle state used by Score and NormalizeScore. // PreScore builds and writes cycle state used by Score and NormalizeScore.
func (pl *TaintToleration) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *TaintToleration) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if len(nodes) == 0 { if len(nodes) == 0 {
return nil return nil
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
@ -241,7 +242,7 @@ func TestTaintTolerationScore(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("creating plugin: %v", err) t.Fatalf("creating plugin: %v", err)
} }
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
if !status.IsSuccess() { if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status) t.Errorf("unexpected error: %v", status)
} }

View File

@ -268,7 +268,7 @@ func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, p
} }
// PreScore invoked at the preScore extension point. It checks whether volumeBinding can skip Score // PreScore invoked at the preScore extension point. It checks whether volumeBinding can skip Score
func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if pl.scorer == nil { if pl.scorer == nil {
return framework.NewStatus(framework.Skip) return framework.NewStatus(framework.Skip)
} }

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
) )
var ( var (
@ -868,7 +869,7 @@ func TestVolumeBinding(t *testing.T) {
} }
t.Logf("Verify: call PreScore and check status") t.Logf("Verify: call PreScore and check status")
gotPreScoreStatus := p.PreScore(ctx, state, item.pod, item.nodes) gotPreScoreStatus := p.PreScore(ctx, state, item.pod, tf.BuildNodeInfos(item.nodes))
if diff := cmp.Diff(item.wantPreScoreStatus, gotPreScoreStatus); diff != "" { if diff := cmp.Diff(item.wantPreScoreStatus, gotPreScoreStatus); diff != "" {
t.Errorf("state got after prescore does not match (-want,+got):\n%s", diff) t.Errorf("state got after prescore does not match (-want,+got):\n%s", diff)
} }

View File

@ -1002,7 +1002,7 @@ func (f *frameworkImpl) RunPreScorePlugins(
ctx context.Context, ctx context.Context,
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodes []*framework.NodeInfo,
) (status *framework.Status) { ) (status *framework.Status) {
startTime := time.Now() startTime := time.Now()
skipPlugins := sets.New[string]() skipPlugins := sets.New[string]()
@ -1033,7 +1033,7 @@ func (f *frameworkImpl) RunPreScorePlugins(
return nil return nil
} }
func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreScorePlugin, state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreScorePlugin, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
if !state.ShouldRecordPluginMetrics() { if !state.ShouldRecordPluginMetrics() {
return pl.PreScore(ctx, state, pod, nodes) return pl.PreScore(ctx, state, pod, nodes)
} }
@ -1047,7 +1047,7 @@ func (f *frameworkImpl) runPreScorePlugin(ctx context.Context, pl framework.PreS
// It returns a list that stores scores from each plugin and total score for each Node. // It returns a list that stores scores from each plugin and total score for each Node.
// It also returns *Status, which is set to non-success if any of the plugins returns // It also returns *Status, which is set to non-success if any of the plugins returns
// a non-success status. // a non-success status.
func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) (ns []framework.NodePluginScores, status *framework.Status) { func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) (ns []framework.NodePluginScores, status *framework.Status) {
startTime := time.Now() startTime := time.Now()
defer func() { defer func() {
metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Score, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime)) metrics.FrameworkExtensionPointDuration.WithLabelValues(metrics.Score, status.Code().String(), f.profileName).Observe(metrics.SinceInSeconds(startTime))
@ -1075,7 +1075,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy
} }
// Run Score method for each node in parallel. // Run Score method for each node in parallel.
f.Parallelizer().Until(ctx, len(nodes), func(index int) { f.Parallelizer().Until(ctx, len(nodes), func(index int) {
nodeName := nodes[index].Name nodeName := nodes[index].Node().Name
logger := logger logger := logger
if verboseLogs { if verboseLogs {
logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName}) logger = klog.LoggerWithValues(logger, "node", klog.ObjectRef{Name: nodeName})
@ -1125,7 +1125,7 @@ func (f *frameworkImpl) RunScorePlugins(ctx context.Context, state *framework.Cy
// and then, build allNodePluginScores. // and then, build allNodePluginScores.
f.Parallelizer().Until(ctx, len(nodes), func(index int) { f.Parallelizer().Until(ctx, len(nodes), func(index int) {
nodePluginScores := framework.NodePluginScores{ nodePluginScores := framework.NodePluginScores{
Name: nodes[index].Name, Name: nodes[index].Node().Name,
Scores: make([]framework.PluginScore, len(plugins)), Scores: make([]framework.PluginScore, len(plugins)),
} }

View File

@ -144,7 +144,7 @@ func (pl *TestScorePlugin) Name() string {
return pl.name return pl.name
} }
func (pl *TestScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *TestScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreScoreStatus), injectReason) return framework.NewStatus(framework.Code(pl.inj.PreScoreStatus), injectReason)
} }
@ -212,7 +212,7 @@ func (pl *TestPlugin) PostFilter(_ context.Context, _ *framework.CycleState, _ *
return nil, framework.NewStatus(framework.Code(pl.inj.PostFilterStatus), injectReason) return nil, framework.NewStatus(framework.Code(pl.inj.PostFilterStatus), injectReason)
} }
func (pl *TestPlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *TestPlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Code(pl.inj.PreScoreStatus), injectReason) return framework.NewStatus(framework.Code(pl.inj.PreScoreStatus), injectReason)
} }
@ -1468,7 +1468,7 @@ func TestRunScorePlugins(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
state.SkipScorePlugins = tt.skippedPlugins state.SkipScorePlugins = tt.skippedPlugins
res, status := f.RunScorePlugins(ctx, state, pod, nodes) res, status := f.RunScorePlugins(ctx, state, pod, BuildNodeInfos(nodes))
if tt.err { if tt.err {
if status.IsSuccess() { if status.IsSuccess() {
@ -2782,7 +2782,9 @@ func TestRecordingMetrics(t *testing.T) {
}, },
{ {
name: "Score - Success", name: "Score - Success",
action: func(f framework.Framework) { f.RunScorePlugins(context.Background(), state, pod, nodes) }, action: func(f framework.Framework) {
f.RunScorePlugins(context.Background(), state, pod, BuildNodeInfos(nodes))
},
wantExtensionPoint: "Score", wantExtensionPoint: "Score",
wantStatus: framework.Success, wantStatus: framework.Success,
}, },
@ -2839,7 +2841,9 @@ func TestRecordingMetrics(t *testing.T) {
}, },
{ {
name: "Score - Error", name: "Score - Error",
action: func(f framework.Framework) { f.RunScorePlugins(context.Background(), state, pod, nodes) }, action: func(f framework.Framework) {
f.RunScorePlugins(context.Background(), state, pod, BuildNodeInfos(nodes))
},
inject: injectedResult{ScoreStatus: int(framework.Error)}, inject: injectedResult{ScoreStatus: int(framework.Error)},
wantExtensionPoint: "Score", wantExtensionPoint: "Score",
wantStatus: framework.Error, wantStatus: framework.Error,
@ -3318,3 +3322,13 @@ func mustNewPodInfo(t *testing.T, pod *v1.Pod) *framework.PodInfo {
} }
return podInfo return podInfo
} }
// BuildNodeInfos build NodeInfo slice from a v1.Node slice
func BuildNodeInfos(nodes []*v1.Node) []*framework.NodeInfo {
res := make([]*framework.NodeInfo, len(nodes))
for i := 0; i < len(nodes); i++ {
res[i] = framework.NewNodeInfo()
res[i].SetNode(nodes[i])
}
return res
}

View File

@ -61,7 +61,7 @@ type instrumentedPreScorePlugin struct {
var _ framework.PreScorePlugin = &instrumentedPreScorePlugin{} var _ framework.PreScorePlugin = &instrumentedPreScorePlugin{}
func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (p *instrumentedPreScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
status := p.PreScorePlugin.PreScore(ctx, state, pod, nodes) status := p.PreScorePlugin.PreScore(ctx, state, pod, nodes)
if !status.IsSkip() { if !status.IsSkip() {
p.metric.Inc() p.metric.Inc()

View File

@ -416,7 +416,7 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework
// When only one node after predicate, just use it. // When only one node after predicate, just use it.
if len(feasibleNodes) == 1 { if len(feasibleNodes) == 1 {
return ScheduleResult{ return ScheduleResult{
SuggestedHost: feasibleNodes[0].Name, SuggestedHost: feasibleNodes[0].Node().Name,
EvaluatedNodes: 1 + len(diagnosis.NodeToStatusMap), EvaluatedNodes: 1 + len(diagnosis.NodeToStatusMap),
FeasibleNodes: 1, FeasibleNodes: 1,
}, nil }, nil
@ -439,7 +439,7 @@ func (sched *Scheduler) schedulePod(ctx context.Context, fwk framework.Framework
// Filters the nodes to find the ones that fit the pod based on the framework // Filters the nodes to find the ones that fit the pod based on the framework
// filter plugins and filter extenders. // filter plugins and filter extenders.
func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.Diagnosis, error) { func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.Framework, state *framework.CycleState, pod *v1.Pod) ([]*framework.NodeInfo, framework.Diagnosis, error) {
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
diagnosis := framework.Diagnosis{ diagnosis := framework.Diagnosis{
NodeToStatusMap: make(framework.NodeToStatusMap), NodeToStatusMap: make(framework.NodeToStatusMap),
@ -524,7 +524,7 @@ func (sched *Scheduler) findNodesThatFitPod(ctx context.Context, fwk framework.F
return feasibleNodesAfterExtender, diagnosis, nil return feasibleNodesAfterExtender, diagnosis, nil
} }
func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, fwk framework.Framework, state *framework.CycleState, diagnosis framework.Diagnosis) ([]*v1.Node, error) { func (sched *Scheduler) evaluateNominatedNode(ctx context.Context, pod *v1.Pod, fwk framework.Framework, state *framework.CycleState, diagnosis framework.Diagnosis) ([]*framework.NodeInfo, error) {
nnn := pod.Status.NominatedNodeName nnn := pod.Status.NominatedNodeName
nodeInfo, err := sched.nodeInfoSnapshot.Get(nnn) nodeInfo, err := sched.nodeInfoSnapshot.Get(nnn)
if err != nil { if err != nil {
@ -551,17 +551,17 @@ func (sched *Scheduler) findNodesThatPassFilters(
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
diagnosis *framework.Diagnosis, diagnosis *framework.Diagnosis,
nodes []*framework.NodeInfo) ([]*v1.Node, error) { nodes []*framework.NodeInfo) ([]*framework.NodeInfo, error) {
numAllNodes := len(nodes) numAllNodes := len(nodes)
numNodesToFind := sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes)) numNodesToFind := sched.numFeasibleNodesToFind(fwk.PercentageOfNodesToScore(), int32(numAllNodes))
// Create feasible list with enough space to avoid growing it // Create feasible list with enough space to avoid growing it
// and allow assigning. // and allow assigning.
feasibleNodes := make([]*v1.Node, numNodesToFind) feasibleNodes := make([]*framework.NodeInfo, numNodesToFind)
if !fwk.HasFilterPlugins() { if !fwk.HasFilterPlugins() {
for i := range feasibleNodes { for i := range feasibleNodes {
feasibleNodes[i] = nodes[(sched.nextStartNodeIndex+i)%numAllNodes].Node() feasibleNodes[i] = nodes[(sched.nextStartNodeIndex+i)%numAllNodes]
} }
return feasibleNodes, nil return feasibleNodes, nil
} }
@ -586,7 +586,7 @@ func (sched *Scheduler) findNodesThatPassFilters(
cancel() cancel()
atomic.AddInt32(&feasibleNodesLen, -1) atomic.AddInt32(&feasibleNodesLen, -1)
} else { } else {
feasibleNodes[length-1] = nodeInfo.Node() feasibleNodes[length-1] = nodeInfo
} }
} else { } else {
statusesLock.Lock() statusesLock.Lock()
@ -646,7 +646,7 @@ func (sched *Scheduler) numFeasibleNodesToFind(percentageOfNodesToScore *int32,
return numNodes return numNodes
} }
func findNodesThatPassExtenders(ctx context.Context, extenders []framework.Extender, pod *v1.Pod, feasibleNodes []*v1.Node, statuses framework.NodeToStatusMap) ([]*v1.Node, error) { func findNodesThatPassExtenders(ctx context.Context, extenders []framework.Extender, pod *v1.Pod, feasibleNodes []*framework.NodeInfo, statuses framework.NodeToStatusMap) ([]*framework.NodeInfo, error) {
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
// Extenders are called sequentially. // Extenders are called sequentially.
// Nodes in original feasibleNodes can be excluded in one extender, and pass on to the next // Nodes in original feasibleNodes can be excluded in one extender, and pass on to the next
@ -711,7 +711,7 @@ func prioritizeNodes(
fwk framework.Framework, fwk framework.Framework,
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
nodes []*v1.Node, nodes []*framework.NodeInfo,
) ([]framework.NodePluginScores, error) { ) ([]framework.NodePluginScores, error) {
logger := klog.FromContext(ctx) logger := klog.FromContext(ctx)
// If no priority configs are provided, then all nodes will have a score of one. // If no priority configs are provided, then all nodes will have a score of one.
@ -720,7 +720,7 @@ func prioritizeNodes(
result := make([]framework.NodePluginScores, 0, len(nodes)) result := make([]framework.NodePluginScores, 0, len(nodes))
for i := range nodes { for i := range nodes {
result = append(result, framework.NodePluginScores{ result = append(result, framework.NodePluginScores{
Name: nodes[i].Name, Name: nodes[i].Node().Name,
TotalScore: 1, TotalScore: 1,
}) })
} }
@ -802,7 +802,7 @@ func prioritizeNodes(
// wait for all go routines to finish // wait for all go routines to finish
wg.Wait() wg.Wait()
for i := range nodesScores { for i := range nodesScores {
if score, ok := allNodeExtendersScores[nodes[i].Name]; ok { if score, ok := allNodeExtendersScores[nodes[i].Node().Name]; ok {
nodesScores[i].Scores = append(nodesScores[i].Scores, score.Scores...) nodesScores[i].Scores = append(nodesScores[i].Scores, score.Scores...)
nodesScores[i].TotalScore += score.TotalScore nodesScores[i].TotalScore += score.TotalScore
} }

View File

@ -113,13 +113,13 @@ func (f *fakeExtender) SupportsPreemption() bool {
return false return false
} }
func (f *fakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, extenderv1.FailedNodesMap, extenderv1.FailedNodesMap, error) { func (f *fakeExtender) Filter(pod *v1.Pod, nodes []*framework.NodeInfo) ([]*framework.NodeInfo, extenderv1.FailedNodesMap, extenderv1.FailedNodesMap, error) {
return nil, nil, nil, nil return nil, nil, nil, nil
} }
func (f *fakeExtender) Prioritize( func (f *fakeExtender) Prioritize(
_ *v1.Pod, _ *v1.Pod,
_ []*v1.Node, _ []*framework.NodeInfo,
) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) { ) (hostPriorities *extenderv1.HostPriorityList, weight int64, err error) {
return nil, 0, nil return nil, 0, nil
} }
@ -1627,11 +1627,11 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
extenders: []tf.FakeExtender{ extenders: []tf.FakeExtender{
{ {
ExtenderName: "FakeExtender1", ExtenderName: "FakeExtender1",
Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "a" { if node.Node().Name == "a" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
}}, }},
}, },
}, },
@ -1648,14 +1648,14 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
extenders: []tf.FakeExtender{ extenders: []tf.FakeExtender{
{ {
ExtenderName: "FakeExtender1", ExtenderName: "FakeExtender1",
Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "a" { if node.Node().Name == "a" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
if node.Name == "b" { if node.Node().Name == "b" {
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
} }
return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
}}, }},
}, },
}, },
@ -1673,14 +1673,14 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
extenders: []tf.FakeExtender{ extenders: []tf.FakeExtender{
{ {
ExtenderName: "FakeExtender1", ExtenderName: "FakeExtender1",
Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "a" { if node.Node().Name == "a" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
if node.Name == "b" { if node.Node().Name == "b" {
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
} }
return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
}}, }},
}, },
}, },
@ -1700,23 +1700,23 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
extenders: []tf.FakeExtender{ extenders: []tf.FakeExtender{
{ {
ExtenderName: "FakeExtender1", ExtenderName: "FakeExtender1",
Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "a" { if node.Node().Name == "a" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
if node.Name == "b" { if node.Node().Name == "b" {
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
} }
return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.UnschedulableAndUnresolvable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
}}, }},
}, },
{ {
ExtenderName: "FakeExtender1", ExtenderName: "FakeExtender1",
Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *v1.Node) *framework.Status { Predicates: []tf.FitPredicate{func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "a" { if node.Node().Name == "a" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
}}, }},
}, },
}, },
@ -1746,7 +1746,11 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
} }
pod := st.MakePod().Name("1").UID("1").Obj() pod := st.MakePod().Name("1").UID("1").Obj()
got, err := findNodesThatPassExtenders(ctx, extenders, pod, tt.nodes, tt.filteredNodesStatuses) got, err := findNodesThatPassExtenders(ctx, extenders, pod, tf.BuildNodeInfos(tt.nodes), tt.filteredNodesStatuses)
nodes := make([]*v1.Node, len(got))
for i := 0; i < len(got); i++ {
nodes[i] = got[i].Node()
}
if tt.expectsErr { if tt.expectsErr {
if err == nil { if err == nil {
t.Error("Unexpected non-error") t.Error("Unexpected non-error")
@ -1755,7 +1759,7 @@ func TestFindNodesThatPassExtenders(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Unexpected error: %v", err) t.Errorf("Unexpected error: %v", err)
} }
if diff := cmp.Diff(tt.expectedNodes, got); diff != "" { if diff := cmp.Diff(tt.expectedNodes, nodes); diff != "" {
t.Errorf("filtered nodes (-want,+got):\n%s", diff) t.Errorf("filtered nodes (-want,+got):\n%s", diff)
} }
if diff := cmp.Diff(tt.expectedStatuses, tt.filteredNodesStatuses, cmpOpts...); diff != "" { if diff := cmp.Diff(tt.expectedStatuses, tt.filteredNodesStatuses, cmpOpts...); diff != "" {
@ -2702,8 +2706,8 @@ func TestZeroRequest(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error filtering nodes: %+v", err) t.Fatalf("error filtering nodes: %+v", err)
} }
fwk.RunPreScorePlugins(ctx, state, test.pod, test.nodes) fwk.RunPreScorePlugins(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
list, err := prioritizeNodes(ctx, nil, fwk, state, test.pod, test.nodes) list, err := prioritizeNodes(ctx, nil, fwk, state, test.pod, tf.BuildNodeInfos(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -3099,7 +3103,7 @@ func Test_prioritizeNodes(t *testing.T) {
for ii := range test.extenders { for ii := range test.extenders {
extenders = append(extenders, &test.extenders[ii]) extenders = append(extenders, &test.extenders[ii])
} }
nodesscores, err := prioritizeNodes(ctx, extenders, fwk, state, test.pod, test.nodes) nodesscores, err := prioritizeNodes(ctx, extenders, fwk, state, test.pod, tf.BuildNodeInfos(test.nodes))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }

View File

@ -32,10 +32,10 @@ import (
) )
// FitPredicate is a function type which is used in fake extender. // FitPredicate is a function type which is used in fake extender.
type FitPredicate func(pod *v1.Pod, node *v1.Node) *framework.Status type FitPredicate func(pod *v1.Pod, node *framework.NodeInfo) *framework.Status
// PriorityFunc is a function type which is used in fake extender. // PriorityFunc is a function type which is used in fake extender.
type PriorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) type PriorityFunc func(pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.NodeScoreList, error)
// PriorityConfig is used in fake extender to perform Prioritize function. // PriorityConfig is used in fake extender to perform Prioritize function.
type PriorityConfig struct { type PriorityConfig struct {
@ -44,67 +44,67 @@ type PriorityConfig struct {
} }
// ErrorPredicateExtender implements FitPredicate function to always return error status. // ErrorPredicateExtender implements FitPredicate function to always return error status.
func ErrorPredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status { func ErrorPredicateExtender(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Error, "some error") return framework.NewStatus(framework.Error, "some error")
} }
// FalsePredicateExtender implements FitPredicate function to always return unschedulable status. // FalsePredicateExtender implements FitPredicate function to always return unschedulable status.
func FalsePredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status { func FalsePredicateExtender(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("pod is unschedulable on the node %q", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("pod is unschedulable on the node %q", node.Node().Name))
} }
// TruePredicateExtender implements FitPredicate function to always return success status. // TruePredicateExtender implements FitPredicate function to always return success status.
func TruePredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status { func TruePredicateExtender(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
// Node1PredicateExtender implements FitPredicate function to return true // Node1PredicateExtender implements FitPredicate function to return true
// when the given node's name is "node1"; otherwise return false. // when the given node's name is "node1"; otherwise return false.
func Node1PredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status { func Node1PredicateExtender(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "node1" { if node.Node().Name == "node1" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
} }
// Node2PredicateExtender implements FitPredicate function to return true // Node2PredicateExtender implements FitPredicate function to return true
// when the given node's name is "node2"; otherwise return false. // when the given node's name is "node2"; otherwise return false.
func Node2PredicateExtender(pod *v1.Pod, node *v1.Node) *framework.Status { func Node2PredicateExtender(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
if node.Name == "node2" { if node.Node().Name == "node2" {
return framework.NewStatus(framework.Success) return framework.NewStatus(framework.Success)
} }
return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Name)) return framework.NewStatus(framework.Unschedulable, fmt.Sprintf("node %q is not allowed", node.Node().Name))
} }
// ErrorPrioritizerExtender implements PriorityFunc function to always return error. // ErrorPrioritizerExtender implements PriorityFunc function to always return error.
func ErrorPrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) { func ErrorPrioritizerExtender(pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.NodeScoreList, error) {
return &framework.NodeScoreList{}, fmt.Errorf("some error") return &framework.NodeScoreList{}, fmt.Errorf("some error")
} }
// Node1PrioritizerExtender implements PriorityFunc function to give score 10 // Node1PrioritizerExtender implements PriorityFunc function to give score 10
// if the given node's name is "node1"; otherwise score 1. // if the given node's name is "node1"; otherwise score 1.
func Node1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) { func Node1PrioritizerExtender(pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.NodeScoreList, error) {
result := framework.NodeScoreList{} result := framework.NodeScoreList{}
for _, node := range nodes { for _, node := range nodes {
score := 1 score := 1
if node.Name == "node1" { if node.Node().Name == "node1" {
score = 10 score = 10
} }
result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)}) result = append(result, framework.NodeScore{Name: node.Node().Name, Score: int64(score)})
} }
return &result, nil return &result, nil
} }
// Node2PrioritizerExtender implements PriorityFunc function to give score 10 // Node2PrioritizerExtender implements PriorityFunc function to give score 10
// if the given node's name is "node2"; otherwise score 1. // if the given node's name is "node2"; otherwise score 1.
func Node2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*framework.NodeScoreList, error) { func Node2PrioritizerExtender(pod *v1.Pod, nodes []*framework.NodeInfo) (*framework.NodeScoreList, error) {
result := framework.NodeScoreList{} result := framework.NodeScoreList{}
for _, node := range nodes { for _, node := range nodes {
score := 1 score := 1
if node.Name == "node2" { if node.Node().Name == "node2" {
score = 10 score = 10
} }
result = append(result, framework.NodeScore{Name: node.Name, Score: int64(score)}) result = append(result, framework.NodeScore{Name: node.Node().Name, Score: int64(score)})
} }
return &result, nil return &result, nil
} }
@ -146,7 +146,7 @@ type FakeExtender struct {
Prioritizers []PriorityConfig Prioritizers []PriorityConfig
Weight int64 Weight int64
NodeCacheCapable bool NodeCacheCapable bool
FilteredNodes []*v1.Node FilteredNodes []*framework.NodeInfo
UnInterested bool UnInterested bool
Ignorable bool Ignorable bool
@ -198,7 +198,7 @@ func (f *FakeExtender) ProcessPreemption(
for nodeName, victims := range nodeNameToVictimsCopy { for nodeName, victims := range nodeNameToVictimsCopy {
// Try to do preemption on extender side. // Try to do preemption on extender side.
nodeInfo, _ := nodeInfos.Get(nodeName) nodeInfo, _ := nodeInfos.Get(nodeName)
extenderVictimPods, extenderPDBViolations, fits, err := f.selectVictimsOnNodeByExtender(logger, pod, nodeInfo.Node()) extenderVictimPods, extenderPDBViolations, fits, err := f.selectVictimsOnNodeByExtender(logger, pod, nodeInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -220,7 +220,7 @@ func (f *FakeExtender) ProcessPreemption(
// 1. More victim pods (if any) amended by preemption phase of extender. // 1. More victim pods (if any) amended by preemption phase of extender.
// 2. Number of violating victim (used to calculate PDB). // 2. Number of violating victim (used to calculate PDB).
// 3. Fits or not after preemption phase on extender's side. // 3. Fits or not after preemption phase on extender's side.
func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1.Pod, node *v1.Node) ([]*v1.Pod, int, bool, error) { func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1.Pod, node *framework.NodeInfo) ([]*v1.Pod, int, bool, error) {
// If a extender support preemption but have no cached node info, let's run filter to make sure // If a extender support preemption but have no cached node info, let's run filter to make sure
// default scheduler's decision still stand with given pod and node. // default scheduler's decision still stand with given pod and node.
if !f.NodeCacheCapable { if !f.NodeCacheCapable {
@ -236,7 +236,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1
// Otherwise, as a extender support preemption and have cached node info, we will assume cachedNodeNameToInfo is available // Otherwise, as a extender support preemption and have cached node info, we will assume cachedNodeNameToInfo is available
// and get cached node info by given node name. // and get cached node info by given node name.
nodeInfoCopy := f.CachedNodeNameToInfo[node.GetName()].Snapshot() nodeInfoCopy := f.CachedNodeNameToInfo[node.Node().Name].Snapshot()
var potentialVictims []*v1.Pod var potentialVictims []*v1.Pod
@ -261,7 +261,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1
// If the new pod does not fit after removing all the lower priority pods, // If the new pod does not fit after removing all the lower priority pods,
// we are almost done and this node is not suitable for preemption. // we are almost done and this node is not suitable for preemption.
status := f.runPredicate(pod, nodeInfoCopy.Node()) status := f.runPredicate(pod, nodeInfoCopy)
if status.IsSuccess() { if status.IsSuccess() {
// pass // pass
} else if status.IsRejected() { } else if status.IsRejected() {
@ -279,7 +279,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1
reprievePod := func(p *v1.Pod) bool { reprievePod := func(p *v1.Pod) bool {
addPod(p) addPod(p)
status := f.runPredicate(pod, nodeInfoCopy.Node()) status := f.runPredicate(pod, nodeInfoCopy)
if !status.IsSuccess() { if !status.IsSuccess() {
if err := removePod(p); err != nil { if err := removePod(p); err != nil {
return false return false
@ -300,7 +300,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1
// runPredicate run predicates of extender one by one for given pod and node. // runPredicate run predicates of extender one by one for given pod and node.
// Returns: fits or not. // Returns: fits or not.
func (f *FakeExtender) runPredicate(pod *v1.Pod, node *v1.Node) *framework.Status { func (f *FakeExtender) runPredicate(pod *v1.Pod, node *framework.NodeInfo) *framework.Status {
for _, predicate := range f.Predicates { for _, predicate := range f.Predicates {
status := predicate(pod, node) status := predicate(pod, node)
if !status.IsSuccess() { if !status.IsSuccess() {
@ -311,8 +311,8 @@ func (f *FakeExtender) runPredicate(pod *v1.Pod, node *v1.Node) *framework.Statu
} }
// Filter implements the extender Filter function. // Filter implements the extender Filter function.
func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, extenderv1.FailedNodesMap, extenderv1.FailedNodesMap, error) { func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*framework.NodeInfo) ([]*framework.NodeInfo, extenderv1.FailedNodesMap, extenderv1.FailedNodesMap, error) {
var filtered []*v1.Node var filtered []*framework.NodeInfo
failedNodesMap := extenderv1.FailedNodesMap{} failedNodesMap := extenderv1.FailedNodesMap{}
failedAndUnresolvableMap := extenderv1.FailedNodesMap{} failedAndUnresolvableMap := extenderv1.FailedNodesMap{}
for _, node := range nodes { for _, node := range nodes {
@ -320,9 +320,9 @@ func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, extend
if status.IsSuccess() { if status.IsSuccess() {
filtered = append(filtered, node) filtered = append(filtered, node)
} else if status.Code() == framework.Unschedulable { } else if status.Code() == framework.Unschedulable {
failedNodesMap[node.Name] = fmt.Sprintf("FakeExtender: node %q failed", node.Name) failedNodesMap[node.Node().Name] = fmt.Sprintf("FakeExtender: node %q failed", node.Node().Name)
} else if status.Code() == framework.UnschedulableAndUnresolvable { } else if status.Code() == framework.UnschedulableAndUnresolvable {
failedAndUnresolvableMap[node.Name] = fmt.Sprintf("FakeExtender: node %q failed and unresolvable", node.Name) failedAndUnresolvableMap[node.Node().Name] = fmt.Sprintf("FakeExtender: node %q failed and unresolvable", node.Node().Name)
} else { } else {
return nil, nil, nil, status.AsError() return nil, nil, nil, status.AsError()
} }
@ -336,7 +336,7 @@ func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, extend
} }
// Prioritize implements the extender Prioritize function. // Prioritize implements the extender Prioritize function.
func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.HostPriorityList, int64, error) { func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*framework.NodeInfo) (*extenderv1.HostPriorityList, int64, error) {
result := extenderv1.HostPriorityList{} result := extenderv1.HostPriorityList{}
combinedScores := map[string]int64{} combinedScores := map[string]int64{}
for _, prioritizer := range f.Prioritizers { for _, prioritizer := range f.Prioritizers {
@ -363,7 +363,7 @@ func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*extenderv1.Ho
func (f *FakeExtender) Bind(binding *v1.Binding) error { func (f *FakeExtender) Bind(binding *v1.Binding) error {
if len(f.FilteredNodes) != 0 { if len(f.FilteredNodes) != 0 {
for _, node := range f.FilteredNodes { for _, node := range f.FilteredNodes {
if node.Name == binding.Target.Name { if node.Node().Name == binding.Target.Name {
f.FilteredNodes = nil f.FilteredNodes = nil
return nil return nil
} }

View File

@ -266,7 +266,7 @@ func (pl *FakePreScoreAndScorePlugin) ScoreExtensions() framework.ScoreExtension
return nil return nil
} }
func (pl *FakePreScoreAndScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { func (pl *FakePreScoreAndScorePlugin) PreScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status {
return pl.preScoreStatus return pl.preScoreStatus
} }

View File

@ -19,6 +19,7 @@ package framework
import ( import (
"context" "context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1" kubeschedulerconfigv1 "k8s.io/kube-scheduler/config/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
@ -145,3 +146,13 @@ func getPluginSetByExtension(plugins *schedulerapi.Plugins, extension string) *s
return nil return nil
} }
} }
// BuildNodeInfos build NodeInfo slice from a v1.Node slice
func BuildNodeInfos(nodes []*v1.Node) []*framework.NodeInfo {
res := make([]*framework.NodeInfo, len(nodes))
for i := 0; i < len(nodes); i++ {
res[i] = framework.NewNodeInfo()
res[i].SetNode(nodes[i])
}
return res
}

View File

@ -428,7 +428,7 @@ func (*PreScorePlugin) Name() string {
} }
// PreScore is a test function. // PreScore is a test function.
func (pfp *PreScorePlugin) PreScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, _ []*v1.Node) *framework.Status { func (pfp *PreScorePlugin) PreScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, _ []*framework.NodeInfo) *framework.Status {
pfp.numPreScoreCalled++ pfp.numPreScoreCalled++
if pfp.failPreScore { if pfp.failPreScore {
return framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name)) return framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name))
@ -542,11 +542,7 @@ func (pp *PostFilterPlugin) PostFilter(ctx context.Context, state *framework.Cyc
for _, nodeInfo := range nodeInfos { for _, nodeInfo := range nodeInfos {
pp.fh.RunFilterPlugins(ctx, state, pod, nodeInfo) pp.fh.RunFilterPlugins(ctx, state, pod, nodeInfo)
} }
var nodes []*v1.Node pp.fh.RunScorePlugins(ctx, state, pod, nodeInfos)
for _, nodeInfo := range nodeInfos {
nodes = append(nodes, nodeInfo.Node())
}
pp.fh.RunScorePlugins(ctx, state, pod, nodes)
if pp.failPostFilter { if pp.failPostFilter {
return nil, framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name)) return nil, framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", pod.Name))