Merge pull request #52515 from guangxuli/update_predicates_name_under_memory_pressure

Automatic merge from submit-queue (batch tested with PRs 53350, 52688, 53531, 52515). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Clarify predicates name to clean confusing

**What this PR does / why we need it**:
Just make the scheduling message more clear.
by reading the message described in https://github.com/kubernetes/kubernetes/issues/52166 for several times, i recognize that from user's view our indication indeed exist a little confusing. I think our scheduling message is more from view of developer, but not user's view. Since predicates want to filter nodes to get the fit nodes. In this case, the fit nodes are nodes no under memory pressure. And `NodeUnderDiskPressure` has the same problem.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #
fix https://github.com/kubernetes/kubernetes/issues/52166

**Special notes for your reviewer**:


**Release note**:
none
This commit is contained in:
Kubernetes Submit Queue 2017-10-06 21:32:15 -07:00 committed by GitHub
commit 2baaac658d
3 changed files with 19 additions and 11 deletions

View File

@ -42,13 +42,14 @@ type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
type FitError struct { type FitError struct {
Pod *v1.Pod Pod *v1.Pod
NumAllNodes int
FailedPredicates FailedPredicateMap FailedPredicates FailedPredicateMap
} }
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods") var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
const ( const (
NoNodeAvailableMsg = "No nodes are available that match all of the predicates" NoNodeAvailableMsg = "0/%v nodes are available"
// NominatedNodeAnnotationKey is used to annotate a pod that has preempted other pods. // NominatedNodeAnnotationKey is used to annotate a pod that has preempted other pods.
// The scheduler uses the annotation to find that the pod shouldn't preempt more pods // The scheduler uses the annotation to find that the pod shouldn't preempt more pods
// when it gets to the head of scheduling queue again. // when it gets to the head of scheduling queue again.
@ -68,12 +69,12 @@ func (f *FitError) Error() string {
sortReasonsHistogram := func() []string { sortReasonsHistogram := func() []string {
reasonStrings := []string{} reasonStrings := []string{}
for k, v := range reasons { for k, v := range reasons {
reasonStrings = append(reasonStrings, fmt.Sprintf("%v (%v)", k, v)) reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
} }
sort.Strings(reasonStrings) sort.Strings(reasonStrings)
return reasonStrings return reasonStrings
} }
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", strings.Join(sortReasonsHistogram(), ", ")) reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", f.NumAllNodes, strings.Join(sortReasonsHistogram(), ", "))
return reasonMsg return reasonMsg
} }
@ -122,6 +123,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister
if len(filteredNodes) == 0 { if len(filteredNodes) == 0 {
return "", &FitError{ return "", &FitError{
Pod: pod, Pod: pod,
NumAllNodes: len(nodes),
FailedPredicates: failedPredicateMap, FailedPredicates: failedPredicateMap,
} }
} }

View File

@ -200,7 +200,8 @@ func TestGenericScheduler(t *testing.T) {
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
name: "test 1", name: "test 1",
wErr: &FitError{ wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
@ -260,7 +261,8 @@ func TestGenericScheduler(t *testing.T) {
expectsErr: true, expectsErr: true,
name: "test 7", name: "test 7",
wErr: &FitError{ wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
@ -290,7 +292,8 @@ func TestGenericScheduler(t *testing.T) {
expectsErr: true, expectsErr: true,
name: "test 8", name: "test 8",
wErr: &FitError{ wErr: &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
NumAllNodes: 2,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
@ -406,15 +409,16 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
func TestHumanReadableFitError(t *testing.T) { func TestHumanReadableFitError(t *testing.T) {
err := &FitError{ err := &FitError{
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
NumAllNodes: 3,
FailedPredicates: FailedPredicateMap{ FailedPredicates: FailedPredicateMap{
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure}, "1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, "2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure}, "3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
}, },
} }
if strings.Contains(err.Error(), NoNodeAvailableMsg) { if strings.Contains(err.Error(), "0/3 nodes are available") {
if strings.Contains(err.Error(), "NodeUnderDiskPressure (2)") && strings.Contains(err.Error(), "NodeUnderMemoryPressure (1)") { if strings.Contains(err.Error(), "2 NodeUnderDiskPressure") && strings.Contains(err.Error(), "1 NodeUnderMemoryPressure") {
return return
} }
} }
@ -1180,7 +1184,7 @@ func TestPreempt(t *testing.T) {
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
cache, nil, map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders) cache, nil, map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders)
// Call Preempt and check the expected results. // Call Preempt and check the expected results.
node, victims, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{test.pod, failedPredMap})) node, victims, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil { if err != nil {
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err) t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
} }
@ -1208,7 +1212,7 @@ func TestPreempt(t *testing.T) {
test.pod.Annotations[NominatedNodeAnnotationKey] = node.Name test.pod.Annotations[NominatedNodeAnnotationKey] = node.Name
} }
// Call preempt again and make sure it doesn't preempt any more pods. // Call preempt again and make sure it doesn't preempt any more pods.
node, victims, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{test.pod, failedPredMap})) node, victims, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
if err != nil { if err != nil {
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err) t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
} }

View File

@ -300,6 +300,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
case err := <-errChan: case err := <-errChan:
expectErr := &core.FitError{ expectErr := &core.FitError{
Pod: secondPod, Pod: secondPod,
NumAllNodes: 1,
FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}}, FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
} }
if !reflect.DeepEqual(expectErr, err) { if !reflect.DeepEqual(expectErr, err) {
@ -484,6 +485,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
case err := <-errChan: case err := <-errChan:
expectErr := &core.FitError{ expectErr := &core.FitError{
Pod: podWithTooBigResourceRequests, Pod: podWithTooBigResourceRequests,
NumAllNodes: len(nodes),
FailedPredicates: failedPredicatesMap, FailedPredicates: failedPredicatesMap,
} }
if len(fmt.Sprint(expectErr)) > 150 { if len(fmt.Sprint(expectErr)) > 150 {