clean unused predicate error

This commit is contained in:
Alex Wang 2020-01-10 13:06:29 +08:00
parent da9fdc933b
commit 0bab010b45
3 changed files with 25 additions and 71 deletions

View File

@ -37,21 +37,8 @@ var (
ErrPodNotMatchHostName = NewPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
// ErrPodNotFitsHostPorts is used for PodFitsHostPorts predicate error.
ErrPodNotFitsHostPorts = NewPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
// ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error.
ErrNodeUnderMemoryPressure = NewPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
// ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error.
ErrNodeUnderDiskPressure = NewPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
// ErrNodeUnderPIDPressure is used for NodeUnderPIDPressure predicate error.
ErrNodeUnderPIDPressure = NewPredicateFailureError("NodeUnderPIDPressure", "node(s) had pid pressure")
// ErrNodeNotReady is used for NodeNotReady predicate error.
ErrNodeNotReady = NewPredicateFailureError("NodeNotReady", "node(s) were not ready")
// ErrNodeNetworkUnavailable is used for NodeNetworkUnavailable predicate error.
ErrNodeNetworkUnavailable = NewPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
// ErrNodeUnknownCondition is used for NodeUnknownCondition predicate error.
ErrNodeUnknownCondition = NewPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
// as ErrFakePredicate.
ErrFakePredicate = NewPredicateFailureError("FakePredicateError", "Nodes failed the fake predicate")
)
var unresolvablePredicateFailureErrors = map[PredicateFailureReason]struct{}{
@ -59,13 +46,8 @@ var unresolvablePredicateFailureErrors = map[PredicateFailureReason]struct{}{
ErrPodNotMatchHostName: {},
// Node conditions won't change when scheduler simulates removal of preemption victims.
// So, it is pointless to try nodes that have not been able to host the pod due to node
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....
ErrNodeNotReady: {},
ErrNodeNetworkUnavailable: {},
ErrNodeUnderDiskPressure: {},
ErrNodeUnderPIDPressure: {},
ErrNodeUnderMemoryPressure: {},
ErrNodeUnknownCondition: {},
// conditions.
ErrNodeUnknownCondition: {},
}
// UnresolvablePredicateExists checks if there is at least one unresolvable predicate failure reason.

View File

@ -27,7 +27,7 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -66,6 +66,8 @@ var (
errPrioritize = fmt.Errorf("priority map encounters an error")
)
const ErrReasonFake = "Nodes failed the fake predicate"
type trueFilterPlugin struct{}
// Name returns name of the plugin.
@ -92,7 +94,7 @@ func (pl *falseFilterPlugin) Name() string {
// Filter invoked at the filter extension point.
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason())
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
// NewFalseFilterPlugin initializes a falseFilterPlugin and returns it.
@ -116,7 +118,7 @@ func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState,
if pod.Name == node.Name {
return nil
}
return framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason())
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
// NewMatchFilterPlugin initializes a matchFilterPlugin and returns it.
@ -136,7 +138,7 @@ func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState,
if len(nodeInfo.Pods()) == 0 {
return nil
}
return framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason())
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
// NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it.
@ -392,8 +394,8 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FilteredNodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"machine1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"machine2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
},
},
},
@ -465,9 +467,9 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FilteredNodesStatuses: framework.NodeToStatusMap{
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"3": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
},
},
},
@ -495,8 +497,8 @@ func TestGenericScheduler(t *testing.T) {
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 2,
FilteredNodesStatuses: framework.NodeToStatusMap{
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
"1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
"2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
},
},
},
@ -863,7 +865,7 @@ func TestFindFitAllError(t *testing.T) {
t.Errorf("failed to find node %v in %v", node.Name, nodeToStatusMap)
}
reasons := status.Reasons()
if len(reasons) != 1 || reasons[0] != algorithmpredicates.ErrFakePredicate.GetReason() {
if len(reasons) != 1 || reasons[0] != ErrReasonFake {
t.Errorf("unexpected failure reasons: %v", reasons)
}
})
@ -899,7 +901,7 @@ func TestFindFitSomeError(t *testing.T) {
t.Errorf("failed to find node %v in %v", node.Name, nodeToStatusMap)
}
reasons := status.Reasons()
if len(reasons) != 1 || reasons[0] != algorithmpredicates.ErrFakePredicate.GetReason() {
if len(reasons) != 1 || reasons[0] != ErrReasonFake {
t.Errorf("unexpected failures: %v", reasons)
}
})
@ -989,24 +991,6 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
}
}
func TestHumanReadableFitError(t *testing.T) {
err := &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
NumAllNodes: 3,
FilteredNodesStatuses: framework.NodeToStatusMap{
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
},
}
if strings.Contains(err.Error(), "0/3 nodes are available") {
if strings.Contains(err.Error(), "2 node(s) had disk pressure") && strings.Contains(err.Error(), "1 node(s) had memory pressure") {
return
}
}
t.Errorf("Error message doesn't have all the information content: [" + err.Error() + "]")
}
// The point of this test is to show that you:
// - get the same priority for a zero-request pod as for a pod with the defaults requests,
// both when the zero-request pod is already on the machine and when the zero-request pod
@ -1919,29 +1903,17 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
{
name: "Mix of failed predicates works fine",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
},
expected: map[string]bool{"machine3": true, "machine4": true},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "Node condition errors should be considered unresolvable",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderPIDPressure.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
},
expected: map[string]bool{"machine4": true},
},
{
name: "Node condition errors should be considered unresolvable",
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNotReady.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNetworkUnavailable.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
},
expected: map[string]bool{"machine4": true},
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
},
{
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",

View File

@ -38,8 +38,8 @@ const (
// Using the name of the plugin will likely help us avoid collisions with other plugins.
preFilterStateKey = "PreFilter" + Name
// ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error.
ErrServiceAffinityViolated = "node(s) didn't match service affinity"
// ErrReason is used for CheckServiceAffinity predicate error.
ErrReason = "node(s) didn't match service affinity"
)
// Args holds the args that are used to configure the plugin.
@ -277,7 +277,7 @@ func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.Cyc
return nil
}
return framework.NewStatus(framework.Unschedulable, ErrServiceAffinityViolated)
return framework.NewStatus(framework.Unschedulable, ErrReason)
}
// Score invoked at the Score extension point.