mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
clarify pridicates message when no nodes available
fix space address comment
This commit is contained in:
parent
5cc95fbf27
commit
7f3c4ac1f4
@ -42,13 +42,14 @@ type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
||||
|
||||
type FitError struct {
|
||||
Pod *v1.Pod
|
||||
NumAllNodes int
|
||||
FailedPredicates FailedPredicateMap
|
||||
}
|
||||
|
||||
var ErrNoNodesAvailable = fmt.Errorf("no nodes available to schedule pods")
|
||||
|
||||
const (
|
||||
NoNodeAvailableMsg = "No nodes are available that match all of the predicates"
|
||||
NoNodeAvailableMsg = "0/%v nodes are available"
|
||||
// NominatedNodeAnnotationKey is used to annotate a pod that has preempted other pods.
|
||||
// The scheduler uses the annotation to find that the pod shouldn't preempt more pods
|
||||
// when it gets to the head of scheduling queue again.
|
||||
@ -68,12 +69,12 @@ func (f *FitError) Error() string {
|
||||
sortReasonsHistogram := func() []string {
|
||||
reasonStrings := []string{}
|
||||
for k, v := range reasons {
|
||||
reasonStrings = append(reasonStrings, fmt.Sprintf("%v (%v)", k, v))
|
||||
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
|
||||
}
|
||||
sort.Strings(reasonStrings)
|
||||
return reasonStrings
|
||||
}
|
||||
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", strings.Join(sortReasonsHistogram(), ", "))
|
||||
reasonMsg := fmt.Sprintf(NoNodeAvailableMsg+": %v.", f.NumAllNodes, strings.Join(sortReasonsHistogram(), ", "))
|
||||
return reasonMsg
|
||||
}
|
||||
|
||||
@ -122,6 +123,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister
|
||||
if len(filteredNodes) == 0 {
|
||||
return "", &FitError{
|
||||
Pod: pod,
|
||||
NumAllNodes: len(nodes),
|
||||
FailedPredicates: failedPredicateMap,
|
||||
}
|
||||
}
|
||||
|
@ -200,7 +200,8 @@ func TestGenericScheduler(t *testing.T) {
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
name: "test 1",
|
||||
wErr: &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 2,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
@ -260,7 +261,8 @@ func TestGenericScheduler(t *testing.T) {
|
||||
expectsErr: true,
|
||||
name: "test 7",
|
||||
wErr: &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 3,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
@ -290,7 +292,8 @@ func TestGenericScheduler(t *testing.T) {
|
||||
expectsErr: true,
|
||||
name: "test 8",
|
||||
wErr: &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 2,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||
@ -406,15 +409,16 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
|
||||
func TestHumanReadableFitError(t *testing.T) {
|
||||
err := &FitError{
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}},
|
||||
NumAllNodes: 3,
|
||||
FailedPredicates: FailedPredicateMap{
|
||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
||||
},
|
||||
}
|
||||
if strings.Contains(err.Error(), NoNodeAvailableMsg) {
|
||||
if strings.Contains(err.Error(), "NodeUnderDiskPressure (2)") && strings.Contains(err.Error(), "NodeUnderMemoryPressure (1)") {
|
||||
if strings.Contains(err.Error(), "0/3 nodes are available") {
|
||||
if strings.Contains(err.Error(), "2 NodeUnderDiskPressure") && strings.Contains(err.Error(), "1 NodeUnderMemoryPressure") {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -1180,7 +1184,7 @@ func TestPreempt(t *testing.T) {
|
||||
scheduler := NewGenericScheduler(
|
||||
cache, nil, map[string]algorithm.FitPredicate{"matches": algorithmpredicates.PodFitsResources}, algorithm.EmptyPredicateMetadataProducer, []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, algorithm.EmptyMetadataProducer, extenders)
|
||||
// Call Preempt and check the expected results.
|
||||
node, victims, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{test.pod, failedPredMap}))
|
||||
node, victims, err := scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
|
||||
}
|
||||
@ -1208,7 +1212,7 @@ func TestPreempt(t *testing.T) {
|
||||
test.pod.Annotations[NominatedNodeAnnotationKey] = node.Name
|
||||
}
|
||||
// Call preempt again and make sure it doesn't preempt any more pods.
|
||||
node, victims, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{test.pod, failedPredMap}))
|
||||
node, victims, err = scheduler.Preempt(test.pod, schedulertesting.FakeNodeLister(makeNodeList(nodeNames)), error(&FitError{Pod: test.pod, FailedPredicates: failedPredMap}))
|
||||
if err != nil {
|
||||
t.Errorf("test [%v]: unexpected error in preemption: %v", test.name, err)
|
||||
}
|
||||
|
@ -300,6 +300,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
||||
case err := <-errChan:
|
||||
expectErr := &core.FitError{
|
||||
Pod: secondPod,
|
||||
NumAllNodes: 1,
|
||||
FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
|
||||
}
|
||||
if !reflect.DeepEqual(expectErr, err) {
|
||||
@ -484,6 +485,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
case err := <-errChan:
|
||||
expectErr := &core.FitError{
|
||||
Pod: podWithTooBigResourceRequests,
|
||||
NumAllNodes: len(nodes),
|
||||
FailedPredicates: failedPredicatesMap,
|
||||
}
|
||||
if len(fmt.Sprint(expectErr)) > 150 {
|
||||
|
Loading…
Reference in New Issue
Block a user