diff --git a/pkg/controller/daemon/BUILD b/pkg/controller/daemon/BUILD index acf6eb0d66e..29d9999169b 100644 --- a/pkg/controller/daemon/BUILD +++ b/pkg/controller/daemon/BUILD @@ -72,6 +72,8 @@ go_test( "//pkg/securitycontext:go_default_library", "//pkg/util/labels:go_default_library", "//plugin/pkg/scheduler/algorithm:go_default_library", + "//plugin/pkg/scheduler/algorithm/predicates:go_default_library", + "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 7f7674be6d2..8c4eac1564d 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -1351,7 +1351,7 @@ func NodeConditionPredicates(nodeInfo *schedulercache.NodeInfo) (bool, []algorit // TODO: There are other node status that the DaemonSet should ideally respect too, // e.g. MemoryPressure, and DiskPressure if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue { - reasons = append(reasons, predicates.ErrNodeSelectorNotMatch) + reasons = append(reasons, predicates.ErrNodeOutOfDisk) break } } diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index f8e49d48854..a708a4b9979 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -47,6 +47,8 @@ import ( "k8s.io/kubernetes/pkg/securitycontext" labelsutil "k8s.io/kubernetes/pkg/util/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" + "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" + "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) var ( @@ -1394,6 +1396,7 @@ func setDaemonSetCritical(ds *extensions.DaemonSet) { func TestNodeShouldRunDaemonPod(t *testing.T) { cases := []struct { podsOnNode []*v1.Pod + nodeCondition []v1.NodeCondition ds *extensions.DaemonSet wantToRun, shouldSchedule, shouldContinueRunning bool err error @@ -1414,6 +1417,23 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { shouldSchedule: true, shouldContinueRunning: true, }, + { + ds: &extensions.DaemonSet{ + Spec: extensions.DaemonSetSpec{ + Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel}, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: simpleDaemonSetLabel, + }, + Spec: resourcePodSpec("", "50M", "0.5"), + }, + }, + }, + nodeCondition: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}, + wantToRun: true, + shouldSchedule: false, + shouldContinueRunning: true, + }, { ds: &extensions.DaemonSet{ Spec: extensions.DaemonSetSpec{ @@ -1484,6 +1504,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) { for i, c := range cases { for _, strategy := range updateStrategies() { node := newNode("test-node", nil) + node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...) node.Status.Allocatable = allocatableResources("100M", "1") manager, _, _ := newTestController() manager.nodeStore.Add(node) @@ -2153,3 +2174,57 @@ func getQueuedKeys(queue workqueue.RateLimitingInterface) []string { sort.Strings(keys) return keys } + +func TestPredicates(t *testing.T) { + type args struct { + pod *v1.Pod + node *v1.Node + } + tests := []struct { + name string + args args + want bool + wantRes []algorithm.PredicateFailureReason + wantErr bool + }{ + { + name: "retrun OutOfDiskErr if outOfDisk", + args: args{ + pod: newPod("pod1-", "node-0", nil, nil), + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-0", + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + {Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}, + }, + Allocatable: v1.ResourceList{ + v1.ResourcePods: resource.MustParse("100"), + }, + }, + }, + }, + want: false, + wantRes: []algorithm.PredicateFailureReason{predicates.ErrNodeOutOfDisk}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nodeInfo := schedulercache.NewNodeInfo(tt.args.pod) + nodeInfo.SetNode(tt.args.node) + + got, res, err := Predicates(tt.args.pod, nodeInfo) + if (err != nil) != tt.wantErr { + t.Errorf("%s (error): error = %v, wantErr %v", tt.name, err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("%s (fit): got = %v, want %v", tt.name, got, tt.want) + } + if !reflect.DeepEqual(res, tt.wantRes) { + t.Errorf("%s (reasons): got = %v, want %v", tt.name, res, tt.wantRes) + } + }) + } +}