mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-02 09:47:06 +00:00
feat: update taint nodes by condition to GA
This commit is contained in:
@@ -21,7 +21,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -877,56 +877,6 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodePIDPressure verifies that scheduler's CheckNodePIDPressurePredicate predicate
|
||||
// functions works correctly.
|
||||
func TestNodePIDPressure(t *testing.T) {
|
||||
context := initTest(t, "node-pid-pressure")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a node.
|
||||
node, err := createNode(context.clientSet, "testnode", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create node: %v", err)
|
||||
}
|
||||
|
||||
cs := context.clientSet
|
||||
|
||||
// Adds PID pressure condition to the node.
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
}
|
||||
|
||||
// Update node condition.
|
||||
err = updateNodeStatus(context.clientSet, node)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot update node: %v", err)
|
||||
}
|
||||
|
||||
// Create test pod.
|
||||
testPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pidpressure-fake-name"},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "container", Image: imageutils.GetPauseImageName()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testPod, err = cs.CoreV1().Pods(context.ns.Name).Create(testPod)
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
|
||||
err = waitForPodUnschedulable(cs, testPod)
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for scheduled", err)
|
||||
}
|
||||
|
||||
cleanupPods(cs, t, []*v1.Pod{testPod})
|
||||
}
|
||||
|
||||
// TestEvenPodsSpreadPredicate verifies that EvenPodsSpread predicate functions well.
|
||||
func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
|
||||
|
@@ -110,7 +110,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeUnschedulable", // mandatory predicate
|
||||
"PredicateOne",
|
||||
"PredicateTwo",
|
||||
),
|
||||
@@ -118,6 +118,11 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"PriorityOne",
|
||||
"PriorityTwo",
|
||||
),
|
||||
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||
"FilterPlugin": {
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
policy: `{
|
||||
@@ -125,10 +130,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"apiVersion" : "v1"
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeUnschedulable", // mandatory predicate
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
@@ -168,9 +170,14 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"priorities" : []
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeUnschedulable", // mandatory predicate
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(),
|
||||
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||
"FilterPlugin": {
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
policy: `apiVersion: v1
|
||||
@@ -185,7 +192,7 @@ priorities:
|
||||
weight: 5
|
||||
`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeUnschedulable", // mandatory predicate
|
||||
"PredicateOne",
|
||||
"PredicateTwo",
|
||||
),
|
||||
@@ -193,16 +200,18 @@ priorities:
|
||||
"PriorityOne",
|
||||
"PriorityTwo",
|
||||
),
|
||||
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||
"FilterPlugin": {
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
policy: `apiVersion: v1
|
||||
kind: Policy
|
||||
`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeUnschedulable", // mandatory predicate
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
@@ -241,9 +250,14 @@ predicates: []
|
||||
priorities: []
|
||||
`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"CheckNodeCondition", // mandatory predicate
|
||||
"CheckNodeUnschedulable", // mandatory predicate
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(),
|
||||
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||
"FilterPlugin": {
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
// Add a ConfigMap object.
|
||||
@@ -362,12 +376,6 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
Reason: fmt.Sprintf("schedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
badCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: fmt.Sprintf("unschedulable condition"),
|
||||
LastHeartbeatTime: metav1.Time{Time: time.Now()},
|
||||
}
|
||||
// Create a new schedulable node, since we're first going to apply
|
||||
// the unschedulable condition and verify that pods aren't scheduled.
|
||||
node := &v1.Node{
|
||||
@@ -426,43 +434,6 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
}
|
||||
},
|
||||
},
|
||||
// Test node.Status.Conditions=ConditionTrue/Unknown
|
||||
{
|
||||
makeUnSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{badCondition},
|
||||
}
|
||||
if _, err = c.CoreV1().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with bad status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionUnknown
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
makeSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
|
||||
n.Status = v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
},
|
||||
Conditions: []v1.NodeCondition{goodCondition},
|
||||
}
|
||||
if _, err = c.CoreV1().Nodes().UpdateStatus(n); err != nil {
|
||||
t.Fatalf("Failed to update node with healthy status condition: %v", err)
|
||||
}
|
||||
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
|
||||
return node != nil && node.(*v1.Node).Status.Conditions[0].Status == v1.ConditionTrue
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to observe reflected update for status condition update: %v", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, mod := range nodeModifications {
|
||||
@@ -484,7 +455,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
// There are no schedulable nodes - the pod shouldn't be scheduled.
|
||||
err = waitForPodToScheduleWithTimeout(context.clientSet, myPod, 2*time.Second)
|
||||
if err == nil {
|
||||
t.Errorf("Pod scheduled successfully on unschedulable nodes")
|
||||
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
|
||||
}
|
||||
if err != wait.ErrWaitTimeout {
|
||||
t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
|
||||
|
@@ -67,9 +67,6 @@ func newPod(nsName, name string, req, limit v1.ResourceList) *v1.Pod {
|
||||
|
||||
// TestTaintNodeByCondition tests related cases for TaintNodeByCondition feature.
|
||||
func TestTaintNodeByCondition(t *testing.T) {
|
||||
// Enable TaintNodeByCondition
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.TaintNodesByCondition, true)()
|
||||
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
|
||||
@@ -110,7 +107,6 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
100, // Unhealthy zone threshold
|
||||
true, // Run taint manager
|
||||
true, // Use taint based evictions
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
@@ -539,7 +535,12 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
t.Errorf("Failed to create node, err: %v", err)
|
||||
}
|
||||
if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
t.Errorf("Failed to taint node <%s>, err: %v", node.Name, err)
|
||||
node, err = cs.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get node <%s>", node.Name)
|
||||
}
|
||||
|
||||
t.Errorf("Failed to taint node <%s>, expected: %v, got: %v, err: %v", node.Name, test.expectedTaints, node.Spec.Taints, err)
|
||||
}
|
||||
|
||||
var pods []*v1.Pod
|
||||
@@ -689,7 +690,6 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
0.55, // Unhealthy zone threshold
|
||||
true, // Run taint manager
|
||||
true, // Use taint based evictions
|
||||
true, // Enabled TaintNodeByCondition feature
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
|
@@ -212,6 +212,7 @@ func initTestSchedulerWithOptions(
|
||||
context.informerFactory.WaitForCacheSync(context.scheduler.StopEverything)
|
||||
|
||||
go context.scheduler.Run(context.ctx)
|
||||
|
||||
return context
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user