mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-22 18:16:52 +00:00
Merge pull request #52515 from guangxuli/update_predicates_name_under_memory_pressure
Automatic merge from submit-queue (batch tested with PRs 53350, 52688, 53531, 52515). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Clarify predicates name to clean confusing **What this PR does / why we need it**: Just make the scheduling message more clear. by reading the message described in https://github.com/kubernetes/kubernetes/issues/52166 for several times, i recognize that from user's view our indication indeed exist a little confusing. I think our scheduling message is more from view of developer, but not user's view. Since predicates want to filter nodes to get the fit nodes. In this case, the fit nodes are nodes no under memory pressure. And `NodeUnderDiskPressure` has the same problem. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes # fix https://github.com/kubernetes/kubernetes/issues/52166 **Special notes for your reviewer**: **Release note**: none
This commit is contained in:
commit
2baaac658d
@ -42,13 +42,14 @@ type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
||||
|
||||
type FitError struct {
|
||||
Pod *v1.Pod
|
||||
NumAllNodes int
|
||||
FailedPredicates FailedPredicateMap
|
||||
}
|
||||
|
||||
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
|
||||
|
||||
const (
|
||||
NoNodeAvailableMsg = "No nodes are available that match all of the predicates"
|
||||
NoNodeAvailableMsg = "0/%v nodes are available"
|
||||
// NominatedNodeAnnotationKey is used to annotate a pod that has preempted other pods.
|
||||
// The scheduler uses the annotation to find that the pod shouldn't preempt more pods
|
||||
// when it gets to the head of scheduling queue again.
|
||||
@ -68,12 +69,12 @@ func (f *FitError) Error() string {
|
||||
sortReasonsHistogram := func() []string {
|
||||
reasonStrings := []string{}
|
||||
for k, v := range reasons {
|
||||
reasonStrings = append(reasonStrings, fmt.Sprintf("%v (%v)", k, v))
|
||||
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
|
||||
}
|
||||
sort.Strings(reasonStrings)
|
||||
return reasonStrings
|
||||
}
|
||||
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", strings.Join(sortReasonsHistogram(), ", "))
|
||||
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", f.NumAllNodes, strings.Join(sortReasonsHistogram(), ", "))
|
||||
return reasonMsg
|
||||
}
|
||||
|
||||
@ -122,6 +123,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister
|
||||
if len(filteredNodes) == 0 {
|
||||
return "", &FitError{
|
||||
Pod: pod,
|
||||
NumAllNodes: len(nodes),
|
||||
FailedPredicates: failedPredicateMap,
|
||||
}
|
||||
}
|
||||
|
@ -200,7 +200,8 @@ func TestGenericScheduler(t *testing.T) {
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
name: "test 1",
|
||||
wErr: &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 2,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
@ -260,7 +261,8 @@ func TestGenericScheduler(t *testing.T) {
|
||||
expectsErr: true,
|
||||
name: "test 7",
|
||||
wErr: &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 3,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
@ -290,7 +292,8 @@ func TestGenericScheduler(t *testing.T) {
|
||||
expectsErr: true,
|
||||
name: "test 8",
|
||||
wErr: &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 2,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
@ -406,15 +409,16 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
|
||||
func TestHumanReadableFitError(t *testing.T) {
|
||||
err := &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 3,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
},
|
||||
}
|
||||
if strings.Contains(err.Error(), NoNodeAvailableMsg) {
|
||||
if strings.Contains(err.Error(), "NodeUnderDiskPressure (2)") && strings.Contains(err.Error(), "NodeUnderMemoryPressure (1)") {
|
||||
if strings.Contains(err.Error(), "0/3 nodes are available") {
|
||||
if strings.Contains(err.Error(), "2 NodeUnderDiskPressure") && strings.Contains(err.Error(), "1 NodeUnderMemoryPressure") {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1180,7 +1184,7 @@ func TestPreempt(t *testing.T) {
|
||||
scheduler := NewGenericScheduler(
|
||||
cache, nil, map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders)
|
||||
// Call Preempt and check the expected results.
|
||||
node, victims, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{test.pod, failedPredMap}))
|
||||
node, victims, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
|
||||
}
|
||||
@ -1208,7 +1212,7 @@ func TestPreempt(t *testing.T) {
|
||||
test.pod.Annotations[NominatedNodeAnnotationKey] = node.Name
|
||||
}
|
||||
// Call preempt again and make sure it doesn't preempt any more pods.
|
||||
node, victims, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{test.pod, failedPredMap}))
|
||||
node, victims, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
|
||||
}
|
||||
|
@ -300,6 +300,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
||||
case err := <-errChan:
|
||||
expectErr := &core.FitError{
|
||||
Pod: secondPod,
|
||||
NumAllNodes: 1,
|
||||
FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
|
||||
}
|
||||
if !reflect.DeepEqual(expectErr, err) {
|
||||
@ -484,6 +485,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
case err := <-errChan:
|
||||
expectErr := &core.FitError{
|
||||
Pod: podWithTooBigResourceRequests,
|
||||
NumAllNodes: len(nodes),
|
||||
FailedPredicates: failedPredicatesMap,
|
||||
}
|
||||
if len(fmt.Sprint(expectErr)) > 150 {
|
||||
|
Loading…
Reference in New Issue
Block a user