Merge pull request #86498 from Huang-Wei/deprecate-failedPredicateMap

Cleanup failedPredicateMap from generic_scheduler.go
This commit is contained in:
Kubernetes Prow Robot 2019-12-21 08:41:33 -08:00 committed by GitHub
commit ff975e865d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 89 additions and 138 deletions

View File

@ -66,15 +66,10 @@ const (
minFeasibleNodesPercentageToFind = 5
)
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
type FailedPredicateMap map[string][]predicates.PredicateFailureReason
// FitError describes a fit error of a pod.
type FitError struct {
Pod *v1.Pod
NumAllNodes int
// TODO(Huang-Wei): remove 'FailedPredicates'
FailedPredicates FailedPredicateMap
Pod *v1.Pod
NumAllNodes int
FilteredNodesStatuses framework.NodeToStatusMap
}
@ -89,12 +84,6 @@ const (
// Error returns detailed information of why the pod failed to fit on each node
func (f *FitError) Error() string {
reasons := make(map[string]int)
for _, predicates := range f.FailedPredicates {
for _, pred := range predicates {
reasons[pred.GetReason()]++
}
}
for _, status := range f.FilteredNodesStatuses {
for _, reason := range status.Reasons() {
reasons[reason]++
@ -102,7 +91,7 @@ func (f *FitError) Error() string {
}
sortReasonsHistogram := func() []string {
reasonStrings := []string{}
var reasonStrings []string
for k, v := range reasons {
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
}
@ -209,7 +198,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
trace.Step("Running prefilter plugins done")
startPredicateEvalTime := time.Now()
filteredNodes, failedPredicateMap, filteredNodesStatuses, err := g.findNodesThatFit(ctx, state, pod)
filteredNodes, filteredNodesStatuses, err := g.findNodesThatFit(ctx, state, pod)
if err != nil {
return result, err
}
@ -225,7 +214,6 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
return result, &FitError{
Pod: pod,
NumAllNodes: len(g.nodeInfoSnapshot.NodeInfoList),
FailedPredicates: failedPredicateMap,
FilteredNodesStatuses: filteredNodesStatuses,
}
}
@ -242,7 +230,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
metrics.DeprecatedSchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime))
return ScheduleResult{
SuggestedHost: filteredNodes[0].Name,
EvaluatedNodes: 1 + len(failedPredicateMap) + len(filteredNodesStatuses),
EvaluatedNodes: 1 + len(filteredNodesStatuses),
FeasibleNodes: 1,
}, nil
}
@ -263,7 +251,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
return ScheduleResult{
SuggestedHost: host,
EvaluatedNodes: len(filteredNodes) + len(failedPredicateMap) + len(filteredNodesStatuses),
EvaluatedNodes: len(filteredNodes) + len(filteredNodesStatuses),
FeasibleNodes: len(filteredNodes),
}, err
}
@ -470,10 +458,8 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i
// Filters the nodes to find the ones that fit based on the given predicate functions
// Each node is passed through the predicate functions to determine if it is a fit
// TODO(Huang-Wei): remove 'FailedPredicateMap' from the return parameters.
func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, FailedPredicateMap, framework.NodeToStatusMap, error) {
func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.NodeToStatusMap, error) {
var filtered []*v1.Node
failedPredicateMap := FailedPredicateMap{}
filteredNodesStatuses := framework.NodeToStatusMap{}
if !g.framework.HasFilterPlugins() {
@ -496,7 +482,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
// We check the nodes starting from where we left off in the previous scheduling cycle,
// this is to make sure all nodes have the same chance of being examined across pods.
nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.nextStartNodeIndex+i)%allNodes]
fits, _, status, err := g.podFitsOnNode(ctx, state, pod, nodeInfo)
fits, status, err := g.podFitsOnNode(ctx, state, pod, nodeInfo)
if err != nil {
errCh.SendErrorWithCancel(err, cancel)
return
@ -521,12 +507,12 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
// Stops searching for more nodes once the configured number of feasible nodes
// are found.
workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode)
processedNodes := int(filteredLen) + len(filteredNodesStatuses) + len(failedPredicateMap)
processedNodes := int(filteredLen) + len(filteredNodesStatuses)
g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % allNodes
filtered = filtered[:filteredLen]
if err := errCh.ReceiveError(); err != nil {
return []*v1.Node{}, FailedPredicateMap{}, framework.NodeToStatusMap{}, err
return []*v1.Node{}, framework.NodeToStatusMap{}, err
}
}
@ -543,15 +529,15 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
continue
}
return []*v1.Node{}, FailedPredicateMap{}, framework.NodeToStatusMap{}, err
return []*v1.Node{}, framework.NodeToStatusMap{}, err
}
// TODO(Huang-Wei): refactor this to fill 'filteredNodesStatuses' instead of 'failedPredicateMap'.
for failedNodeName, failedMsg := range failedMap {
if _, found := failedPredicateMap[failedNodeName]; !found {
failedPredicateMap[failedNodeName] = []predicates.PredicateFailureReason{}
if _, found := filteredNodesStatuses[failedNodeName]; !found {
filteredNodesStatuses[failedNodeName] = framework.NewStatus(framework.Unschedulable, failedMsg)
} else {
filteredNodesStatuses[failedNodeName].AppendReason(failedMsg)
}
failedPredicateMap[failedNodeName] = append(failedPredicateMap[failedNodeName], predicates.NewPredicateFailureError(extender.Name(), failedMsg))
}
filtered = filteredList
if len(filtered) == 0 {
@ -559,7 +545,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
}
}
}
return filtered, failedPredicateMap, filteredNodesStatuses, nil
return filtered, filteredNodesStatuses, nil
}
// addNominatedPods adds pods with equal or greater priority which are nominated
@ -606,8 +592,7 @@ func (g *genericScheduler) podFitsOnNode(
state *framework.CycleState,
pod *v1.Pod,
info *schedulernodeinfo.NodeInfo,
) (bool, []predicates.PredicateFailureReason, *framework.Status, error) {
var failedPredicates []predicates.PredicateFailureReason
) (bool, *framework.Status, error) {
var status *framework.Status
podsAdded := false
@ -636,19 +621,19 @@ func (g *genericScheduler) podFitsOnNode(
var err error
podsAdded, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, state, info)
if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err
return false, nil, err
}
} else if !podsAdded || len(failedPredicates) != 0 || !status.IsSuccess() {
} else if !podsAdded || !status.IsSuccess() {
break
}
status = g.framework.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse)
if !status.IsSuccess() && !status.IsUnschedulable() {
return false, failedPredicates, status, status.AsError()
return false, status, status.AsError()
}
}
return len(failedPredicates) == 0 && status.IsSuccess(), failedPredicates, status, nil
return status.IsSuccess(), status, nil
}
// prioritizeNodes prioritizes the nodes by running the score plugins,
@ -1011,7 +996,7 @@ func (g *genericScheduler) selectVictimsOnNode(
// inter-pod affinity to one or more victims, but we have decided not to
// support this case for performance reasons. Having affinity to lower
// priority pods is not a recommended configuration anyway.
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, nodeInfo); !fits {
if fits, _, err := g.podFitsOnNode(ctx, state, pod, nodeInfo); !fits {
if err != nil {
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
}
@ -1029,7 +1014,7 @@ func (g *genericScheduler) selectVictimsOnNode(
if err := addPod(p); err != nil {
return false, err
}
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, nodeInfo)
fits, _, _ := g.podFitsOnNode(ctx, state, pod, nodeInfo)
if !fits {
if err := removePod(p); err != nil {
return false, err
@ -1060,22 +1045,15 @@ func (g *genericScheduler) selectVictimsOnNode(
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
// that may be satisfied by removing pods from the node.
func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, fitErr *FitError) []*v1.Node {
potentialNodes := []*v1.Node{}
var potentialNodes []*v1.Node
for name, node := range nodeNameToInfo {
// We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
// to determine whether preemption may help or not on the node.
if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable {
continue
}
failedPredicates := fitErr.FailedPredicates[name]
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
// (which is the case today), the !found case should never happen, but we'd prefer
// to rely less on such assumptions in the code when checking does not impose
// significant overhead.
// Also, we currently assume all failures returned by extender as resolvable.
if !predicates.UnresolvablePredicateExists(failedPredicates) {
klog.V(3).Infof("Node %v is a potential node for preemption.", name)
potentialNodes = append(potentialNodes, node.Node())
}
klog.V(3).Infof("Node %v is a potential node for preemption.", name)
potentialNodes = append(potentialNodes, node.Node())
}
return potentialNodes
}

View File

@ -387,9 +387,8 @@ func TestGenericScheduler(t *testing.T) {
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
name: "test 1",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{},
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FilteredNodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
@ -461,9 +460,8 @@ func TestGenericScheduler(t *testing.T) {
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
name: "test 7",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{},
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
@ -492,9 +490,8 @@ func TestGenericScheduler(t *testing.T) {
nodes: []string{"1", "2"},
name: "test 8",
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{},
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FilteredNodesStatuses: framework.NodeToStatusMap{
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
@ -707,9 +704,8 @@ func TestGenericScheduler(t *testing.T) {
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
expectedHosts: nil,
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
NumAllNodes: 1,
FailedPredicates: FailedPredicateMap{},
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
NumAllNodes: 1,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter"),
},
@ -728,9 +724,8 @@ func TestGenericScheduler(t *testing.T) {
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
expectedHosts: nil,
wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
NumAllNodes: 1,
FailedPredicates: FailedPredicateMap{},
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
NumAllNodes: 1,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter"),
},
@ -852,7 +847,7 @@ func TestFindFitAllError(t *testing.T) {
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
)
_, _, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
_, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -886,7 +881,7 @@ func TestFindFitSomeError(t *testing.T) {
)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
_, _, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), pod)
_, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -969,7 +964,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
cache.UpdateNodeInfoSnapshot(scheduler.nodeInfoSnapshot)
queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1")
_, _, _, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), test.pod)
_, _, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), test.pod)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -1003,10 +998,10 @@ func TestHumanReadableFitError(t *testing.T) {
err := &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
FilteredNodesStatuses: framework.NodeToStatusMap{
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
},
}
if strings.Contains(err.Error(), "0/3 nodes are available") {
@ -1884,94 +1879,91 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
tests := []struct {
name string
failedPredMap FailedPredicateMap
nodesStatuses framework.NodeToStatusMap
expected map[string]bool // set of expected node names. Value is ignored.
}{
{
name: "No node should be attempted",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
"machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeSelectorNotMatch.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrTaintsTolerationsNotMatch.GetReason()),
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeLabelPresenceViolated.GetReason()),
},
expected: map[string]bool{},
},
{
name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrPodAffinityNotMatch.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnschedulable.GetReason()),
},
expected: map[string]bool{"machine1": true, "machine4": true},
},
{
name: "pod with both pod affinity and anti-affinity should be tried",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrPodAffinityNotMatch.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
},
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
},
{
name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodAffinityRulesNotMatch.GetReason()),
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrPodAffinityNotMatch.GetReason()),
},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "Mix of failed predicates works fine",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
"machine4": []algorithmpredicates.PredicateFailureReason{},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrDiskConflict.GetReason()),
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
},
expected: map[string]bool{"machine3": true, "machine4": true},
},
{
name: "Node condition errors should be considered unresolvable",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderPIDPressure.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
},
expected: map[string]bool{"machine4": true},
},
{
name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNotReady.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNetworkUnavailable.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnknownCondition.GetReason()),
},
expected: map[string]bool{"machine4": true},
},
{
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrVolumeZoneConflict.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrVolumeNodeConflict.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrVolumeBindConflict.GetReason()),
},
expected: map[string]bool{"machine4": true},
},
{
name: "ErrTopologySpreadConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTopologySpreadConstraintsNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTopologySpreadConstraintsNotMatch},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrTopologySpreadConstraintsNotMatch.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrTopologySpreadConstraintsNotMatch.GetReason()),
},
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
},
{
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
failedPredMap: FailedPredicateMap{},
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
nodesStatuses: framework.NodeToStatusMap{
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
"machine3": framework.NewStatus(framework.Unschedulable, ""),
@ -1979,28 +1971,11 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
},
expected: map[string]bool{"machine1": true, "machine3": true},
},
{
name: "Failed predicates and statuses should be evaluated",
failedPredMap: FailedPredicateMap{
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
"machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
},
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, ""),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
"machine3": framework.NewStatus(framework.Unschedulable, ""),
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
},
expected: map[string]bool{"machine1": true},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fitErr := FitError{
FailedPredicates: test.failedPredMap,
FilteredNodesStatuses: test.nodesStatuses,
}
nodes := nodesWherePreemptionMightHelp(nodeinfosnapshot.CreateNodeInfoMap(nil, makeNodeList(nodeNames)), &fitErr)
@ -2494,7 +2469,7 @@ func TestFairEvaluationForNodes(t *testing.T) {
// Iterating over all nodes more than twice
for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
nodesThatFit, _, _, err := g.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
nodesThatFit, _, err := g.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}

View File

@ -122,6 +122,11 @@ func (s *Status) Reasons() []string {
return s.reasons
}
// AppendReason appends given reason to the Status.
func (s *Status) AppendReason(reason string) {
s.reasons = append(s.reasons, reason)
}
// IsSuccess returns true if and only if "Status" is nil or Code is "Success".
func (s *Status) IsSuccess() bool {
return s.Code() == Success

View File

@ -143,10 +143,6 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v
return pod
}
func PredicateOne(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
return true, nil, nil
}
func PriorityOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
return framework.NodeScore{}, nil
}
@ -186,7 +182,6 @@ func TestSchedulerCreation(t *testing.T) {
testSource := "testProvider"
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")})
RegisterFitPredicate("PredicateOne", PredicateOne)
RegisterPriorityMapReduceFunction("PriorityOne", PriorityOne, nil, 1)
RegisterAlgorithmProvider(testSource, sets.NewString("PredicateOne"), sets.NewString("PriorityOne"))
@ -447,9 +442,8 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
select {
case err := <-errChan:
expectErr := &core.FitError{
Pod: secondPod,
NumAllNodes: 1,
FailedPredicates: core.FailedPredicateMap{},
Pod: secondPod,
NumAllNodes: 1,
FilteredNodesStatuses: framework.NodeToStatusMap{
node.Name: framework.NewStatus(
framework.Unschedulable,
@ -659,7 +653,6 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
expectErr := &core.FitError{
Pod: podWithTooBigResourceRequests,
NumAllNodes: len(nodes),
FailedPredicates: core.FailedPredicateMap{},
FilteredNodesStatuses: failedNodeStatues,
}
if len(fmt.Sprint(expectErr)) > 150 {