From 9cdc4ae0ad733b0c1c24e8156ada835b499eb7ae Mon Sep 17 00:00:00 2001 From: jayunit100 Date: Thu, 5 Jan 2017 16:23:23 -0500 Subject: [PATCH] Update FitError as a message component into the PodConditionUpdater. --- plugin/pkg/scheduler/generic_scheduler.go | 11 ++++------- plugin/pkg/scheduler/generic_scheduler_test.go | 18 ++++++++++++++++++ plugin/pkg/scheduler/scheduler.go | 7 ++++--- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/plugin/pkg/scheduler/generic_scheduler.go b/plugin/pkg/scheduler/generic_scheduler.go index 25e2f2a4a99..71980cb2970 100644 --- a/plugin/pkg/scheduler/generic_scheduler.go +++ b/plugin/pkg/scheduler/generic_scheduler.go @@ -17,7 +17,6 @@ limitations under the License. package scheduler import ( - "bytes" "fmt" "sort" "strings" @@ -45,10 +44,10 @@ type FitError struct { var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods") +const NoNodeAvailableMsg = "No nodes are available that match all of the following predicates:" + // Error returns detailed information of why the pod failed to fit on each node func (f *FitError) Error() string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("pod (%s) failed to fit in any node\n", f.Pod.Name)) reasons := make(map[string]int) for _, predicates := range f.FailedPredicates { for _, pred := range predicates { @@ -64,10 +63,8 @@ func (f *FitError) Error() string { sort.Strings(reasonStrings) return reasonStrings } - - reasonMsg := fmt.Sprintf("fit failure summary on nodes : %v", strings.Join(sortReasonsHistogram(), ", ")) - buf.WriteString(reasonMsg) - return buf.String() + reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", strings.Join(sortReasonsHistogram(), ", ")) + return reasonMsg } type genericScheduler struct { diff --git a/plugin/pkg/scheduler/generic_scheduler_test.go b/plugin/pkg/scheduler/generic_scheduler_test.go index b225d6bbee8..892b3a4db3b 100644 --- a/plugin/pkg/scheduler/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/generic_scheduler_test.go @@ -21,6 +21,7 @@ import ( "math" "reflect" "strconv" + "strings" "testing" "time" @@ -397,6 +398,23 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node { } } +func TestHumanReadableFitError(t *testing.T) { + error := &FitError{ + Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, + FailedPredicates: FailedPredicateMap{ + "1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure}, + "2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, + "3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, + }, + } + if strings.Contains(error.Error(), "No nodes are available that match all of the following predicates") { + if strings.Contains(error.Error(), "NodeUnderDiskPressure (2)") && strings.Contains(error.Error(), "NodeUnderMemoryPressure (1)") { + return + } + } + t.Errorf("Error message doesn't have all the information content: [" + error.Error() + "]") +} + // The point of this test is to show that you: // - get the same priority for a zero-request pod as for a pod with the defaults requests, // both when the zero-request pod is already on the machine and when the zero-request pod diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index aea715004c9..943f478187f 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -98,9 +98,10 @@ func (s *Scheduler) scheduleOne() { s.config.Error(pod, err) s.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err) s.config.PodConditionUpdater.Update(pod, &v1.PodCondition{ - Type: v1.PodScheduled, - Status: v1.ConditionFalse, - Reason: v1.PodReasonUnschedulable, + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: v1.PodReasonUnschedulable, + Message: err.Error(), }) return }