Make predicate errors more human readable

This commit is contained in:
tanshanshan 2018-01-30 09:04:58 +08:00
parent 0726f8c726
commit c389e3cec7
4 changed files with 33 additions and 32 deletions

View File

@ -30,31 +30,31 @@ var (
// be made to pass by removing pods, or you change an existing predicate so that // be made to pass by removing pods, or you change an existing predicate so that
// it can never be made to pass by removing pods, you need to add the predicate // it can never be made to pass by removing pods, you need to add the predicate
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go // failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
ErrDiskConflict = newPredicateFailureError("NoDiskConflict") ErrDiskConflict = newPredicateFailureError("NoDiskConflict", "node(s) had no available disk")
ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict") ErrVolumeZoneConflict = newPredicateFailureError("NoVolumeZoneConflict", "node(s) had no available volume zone")
ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector") ErrNodeSelectorNotMatch = newPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity") ErrPodAffinityNotMatch = newPredicateFailureError("MatchInterPodAffinity", "node(s) didn't match pod affinity/anti-affinity")
ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch") ErrPodAffinityRulesNotMatch = newPredicateFailureError("PodAffinityRulesNotMatch", "node(s) didn't match pod affinity rules")
ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch") ErrPodAntiAffinityRulesNotMatch = newPredicateFailureError("PodAntiAffinityRulesNotMatch", "node(s) didn't match pod anti-affinity rules")
ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch") ErrExistingPodsAntiAffinityRulesNotMatch = newPredicateFailureError("ExistingPodsAntiAffinityRulesNotMatch", "node(s) didn't satisfy existing pods anti-affinity rules")
ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints") ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
ErrPodNotMatchHostName = newPredicateFailureError("HostName") ErrPodNotMatchHostName = newPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts") ErrPodNotFitsHostPorts = newPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence") ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity") ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount") ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure") ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure") ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk") ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk", "node(s) were out of disk space")
ErrNodeNotReady = newPredicateFailureError("NodeNotReady") ErrNodeNotReady = newPredicateFailureError("NodeNotReady", "node(s) were not ready")
ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable") ErrNodeNetworkUnavailable = newPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable") ErrNodeUnschedulable = newPredicateFailureError("NodeUnschedulable", "node(s) were unschedulable")
ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition") ErrNodeUnknownCondition = newPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict") ErrVolumeNodeConflict = newPredicateFailureError("VolumeNodeAffinityConflict", "node(s) had volume node affinity conflict")
ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch") ErrVolumeBindConflict = newPredicateFailureError("VolumeBindingNoMatch", "node(s) didn't find available persistent volumes to bind")
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error // ErrFakePredicate is used for test only. The fake predicates returning false also returns error
// as ErrFakePredicate. // as ErrFakePredicate.
ErrFakePredicate = newPredicateFailureError("FakePredicateError") ErrFakePredicate = newPredicateFailureError("FakePredicateError", "Nodes failed the fake predicate")
) )
// InsufficientResourceError is an error type that indicates what kind of resource limit is // InsufficientResourceError is an error type that indicates what kind of resource limit is
@ -91,10 +91,11 @@ func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
type PredicateFailureError struct { type PredicateFailureError struct {
PredicateName string PredicateName string
PredicateDesc string
} }
func newPredicateFailureError(predicateName string) *PredicateFailureError { func newPredicateFailureError(predicateName, predicateDesc string) *PredicateFailureError {
return &PredicateFailureError{PredicateName: predicateName} return &PredicateFailureError{PredicateName: predicateName, PredicateDesc: predicateDesc}
} }
func (e *PredicateFailureError) Error() string { func (e *PredicateFailureError) Error() string {
@ -102,7 +103,7 @@ func (e *PredicateFailureError) Error() string {
} }
func (e *PredicateFailureError) GetReason() string { func (e *PredicateFailureError) GetReason() string {
return e.PredicateName return e.PredicateDesc
} }
type FailureReason struct { type FailureReason struct {

View File

@ -518,7 +518,7 @@ func TestHumanReadableFitError(t *testing.T) {
}, },
} }
if strings.Contains(err.Error(), "0/3 nodes are available") { if strings.Contains(err.Error(), "0/3 nodes are available") {
if strings.Contains(err.Error(), "2 NodeUnderDiskPressure") && strings.Contains(err.Error(), "1 NodeUnderMemoryPressure") { if strings.Contains(err.Error(), "2 node(s) had disk pressure") && strings.Contains(err.Error(), "1 node(s) had memory pressure") {
return return
} }
} }

View File

@ -679,7 +679,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
FindBoundSatsified: false, FindBoundSatsified: false,
}, },
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
expectError: makePredicateError("1 VolumeNodeAffinityConflict"), expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
}, },
"unbound,no-matches": { "unbound,no-matches": {
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{ volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
@ -687,7 +687,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
FindBoundSatsified: true, FindBoundSatsified: true,
}, },
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
expectError: makePredicateError("1 VolumeBindingNoMatch"), expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
}, },
"bound-and-unbound-unsatisfied": { "bound-and-unbound-unsatisfied": {
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{ volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
@ -695,7 +695,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
FindBoundSatsified: false, FindBoundSatsified: false,
}, },
eventReason: "FailedScheduling", eventReason: "FailedScheduling",
expectError: makePredicateError("1 VolumeBindingNoMatch, 1 VolumeNodeAffinityConflict"), expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
}, },
"unbound,found-matches": { "unbound,found-matches": {
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{ volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{

View File

@ -95,8 +95,8 @@ func TestLocalPVNegativeAffinity(t *testing.T) {
if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 { if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 {
t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason) t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason)
} }
if !strings.Contains(p.Status.Conditions[0].Message, "MatchNodeSelector") || !strings.Contains(p.Status.Conditions[0].Message, "VolumeNodeAffinityConflict") { if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match node selector") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") {
t.Fatalf("Failed as Pod's %s failure message does not contain expected keywords: MatchNodeSelector, VolumeNodeAffinityConflict", podName) t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match node selector, node(s) had volume node affinity conflict", podName)
} }
if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil { if err := config.client.CoreV1().Pods(config.ns).Delete(podName, &metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete Pod %s: %v", podName, err) t.Fatalf("Failed to delete Pod %s: %v", podName, err)