Merge pull request #125461 from mimowo/pod-disruption-conditions-ga

Graduate PodDisruptionConditions to stable
This commit is contained in:
Kubernetes Prow Robot 2024-07-09 11:08:13 -07:00 committed by GitHub
commit 4a214f6ad9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 404 additions and 720 deletions

View File

@ -2709,7 +2709,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
testCases := map[string]struct {
enableJobPodFailurePolicy bool
enablePodDisruptionConditions bool
enableJobPodReplacementPolicy bool
job batch.Job
pods []v1.Pod
@ -3702,7 +3701,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
wantStatusFailed: 1,
wantStatusSucceeded: 0,
},
"terminating Pod considered failed when PodDisruptionConditions is disabled": {
"terminating Pod not considered failed when JobPodFailurePolicy is enabled and used": {
enableJobPodFailurePolicy: true,
job: batch.Job{
TypeMeta: metav1.TypeMeta{Kind: "Job"},
@ -3727,40 +3726,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
},
},
},
pods: []v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
DeletionTimestamp: &now,
},
},
},
},
"terminating Pod not considered failed when PodDisruptionConditions is enabled": {
enableJobPodFailurePolicy: true,
enablePodDisruptionConditions: true,
job: batch.Job{
TypeMeta: metav1.TypeMeta{Kind: "Job"},
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Parallelism: ptr.To[int32](1),
Selector: validSelector,
Template: validTemplate,
BackoffLimit: ptr.To[int32](0),
PodFailurePolicy: &batch.PodFailurePolicy{
Rules: []batch.PodFailurePolicyRule{
{
Action: batch.PodFailurePolicyActionCount,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
},
},
},
},
},
},
},
pods: []v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
@ -3776,7 +3741,6 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)
if tc.job.Spec.PodReplacementPolicy == nil {

View File

@ -31,20 +31,17 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/client-go/util/workqueue"
featuregatetesting "k8s.io/component-base/featuregate/testing"
metricstestutil "k8s.io/component-base/metrics/testutil"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/podgc/metrics"
"k8s.io/kubernetes/pkg/controller/testutil"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/eviction"
testingclock "k8s.io/utils/clock/testing"
"k8s.io/utils/pointer"
@ -74,10 +71,9 @@ func TestGCTerminated(t *testing.T) {
threshold int
deletedPodNames sets.Set[string]
patchedPodNames sets.Set[string]
enablePodDisruptionConditions bool
}{
{
name: "delete pod a which is PodFailed and pod b which is PodSucceeded; PodDisruptionConditions enabled",
name: "delete pod a which is PodFailed and pod b which is PodSucceeded",
pods: []nameToPhase{
{name: "a", phase: v1.PodFailed},
{name: "b", phase: v1.PodSucceeded},
@ -85,7 +81,6 @@ func TestGCTerminated(t *testing.T) {
},
threshold: 1,
deletedPodNames: sets.New("a", "b"),
enablePodDisruptionConditions: true,
},
{
name: "threshold = 0, disables terminated pod deletion",
@ -156,7 +151,6 @@ func TestGCTerminated(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
resetMetrics()
_, ctx := ktesting.NewTestContext(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
creationTime := time.Unix(0, 0)
nodes := []*v1.Node{testutil.NewNode("node")}
@ -218,7 +212,6 @@ func TestGCOrphaned(t *testing.T) {
itemsInQueue int
deletedPodNames sets.Set[string]
patchedPodNames sets.Set[string]
enablePodDisruptionConditions bool
}{
{
name: "nodes present in lister",
@ -259,7 +252,7 @@ func TestGCOrphaned(t *testing.T) {
deletedPodNames: sets.New("a", "b"),
},
{
name: "no nodes with PodDisruptionConditions enabled",
name: "no nodes, one running pod",
delay: 2 * quarantineTime,
pods: []*v1.Pod{
makePod("a", "deleted", v1.PodFailed),
@ -269,7 +262,6 @@ func TestGCOrphaned(t *testing.T) {
itemsInQueue: 1,
deletedPodNames: sets.New("a", "b", "c"),
patchedPodNames: sets.New("c"),
enablePodDisruptionConditions: true,
},
{
name: "quarantine not finished",
@ -351,7 +343,6 @@ func TestGCOrphaned(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
resetMetrics()
_, ctx := ktesting.NewTestContext(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
client := setupNewSimpleClient(test.initialClientNodes, test.pods)
gcc, podInformer, nodeInformer := NewFromClient(ctx, client, -1)
@ -420,19 +411,7 @@ func TestGCUnscheduledTerminating(t *testing.T) {
pods []nameToPhase
deletedPodNames sets.Set[string]
patchedPodNames sets.Set[string]
enablePodDisruptionConditions bool
}{
{
name: "Unscheduled pod in any phase must be deleted, the phase of the running pod is changed to Failed; PodDisruptionConditions enabled",
pods: []nameToPhase{
{name: "a", phase: v1.PodFailed, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
{name: "c", phase: v1.PodRunning, deletionTimeStamp: &metav1.Time{}, nodeName: ""},
},
deletedPodNames: sets.New("a", "b", "c"),
patchedPodNames: sets.New("c"),
enablePodDisruptionConditions: true,
},
{
name: "Unscheduled pod in any phase must be deleted",
pods: []nameToPhase{
@ -457,7 +436,6 @@ func TestGCUnscheduledTerminating(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
resetMetrics()
_, ctx := ktesting.NewTestContext(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
creationTime := time.Unix(0, 0)
pods := make([]*v1.Pod, 0, len(test.pods))
@ -510,7 +488,6 @@ func TestGCTerminating(t *testing.T) {
nodes []node
deletedPodNames sets.Set[string]
patchedPodNames sets.Set[string]
enablePodDisruptionConditions bool
}{
{
name: "pods have deletion timestamp set and the corresponding nodes are not ready",
@ -592,7 +569,7 @@ func TestGCTerminating(t *testing.T) {
patchedPodNames: sets.New("b1", "b4", "b5", "b6"),
},
{
name: "pods deleted from node tained out-of-service; PodDisruptionConditions enabled",
name: "pods deleted from node tainted out-of-service",
nodes: []node{
{name: "worker", readyCondition: v1.ConditionFalse, taints: []v1.Taint{{Key: v1.TaintNodeOutOfService,
Effect: v1.TaintEffectNoExecute}}},
@ -604,14 +581,12 @@ func TestGCTerminating(t *testing.T) {
},
deletedPodNames: sets.New("a", "b", "c"),
patchedPodNames: sets.New("a"),
enablePodDisruptionConditions: true,
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
resetMetrics()
_, ctx := ktesting.NewTestContext(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
creationTime := time.Unix(0, 0)
nodes := make([]*v1.Node, 0, len(test.nodes))
@ -720,7 +695,6 @@ func TestGCInspectingPatchedPodBeforeDeletion(t *testing.T) {
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
pods := []*v1.Pod{test.pod}

View File

@ -32,14 +32,11 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/controller/testutil"
"k8s.io/kubernetes/pkg/features"
)
var timeForControllerToProgressForSanityCheck = 20 * time.Millisecond
@ -144,7 +141,6 @@ func TestCreatePod(t *testing.T) {
taintedNodes map[string][]corev1.Taint
expectPatch bool
expectDelete bool
enablePodDisruptionConditions bool
}{
{
description: "not scheduled - ignore",
@ -164,17 +160,8 @@ func TestCreatePod(t *testing.T) {
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectDelete: true,
},
{
description: "schedule on tainted Node; PodDisruptionConditions enabled",
pod: testutil.NewPod("pod1", "node1"),
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectPatch: true,
expectDelete: true,
enablePodDisruptionConditions: true,
},
{
description: "schedule on tainted Node with finite toleration",
@ -193,18 +180,18 @@ func TestCreatePod(t *testing.T) {
expectDelete: false,
},
{
description: "schedule on tainted Node with infinite ivalid toleration",
description: "schedule on tainted Node with infinite invalid toleration",
pod: addToleration(testutil.NewPod("pod1", "node1"), 2, -1),
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectPatch: true,
expectDelete: true,
},
}
for _, item := range testCases {
t.Run(item.description, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions)
ctx, cancel := context.WithCancel(context.Background())
fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.pod}})
controller, podIndexer, _ := setupNewController(ctx, fakeClientset)
@ -247,20 +234,8 @@ func TestUpdatePod(t *testing.T) {
taintedNodes map[string][]corev1.Taint
expectPatch bool
expectDelete bool
enablePodDisruptionConditions bool
skipOnWindows bool
}{
{
description: "scheduling onto tainted Node results in patch and delete when PodDisruptionConditions enabled",
prevPod: testutil.NewPod("pod1", ""),
newPod: testutil.NewPod("pod1", "node1"),
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectPatch: true,
expectDelete: true,
enablePodDisruptionConditions: true,
},
{
description: "scheduling onto tainted Node",
prevPod: testutil.NewPod("pod1", ""),
@ -268,6 +243,7 @@ func TestUpdatePod(t *testing.T) {
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectPatch: true,
expectDelete: true,
},
{
@ -287,6 +263,7 @@ func TestUpdatePod(t *testing.T) {
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectPatch: true,
expectDelete: true,
},
{
@ -297,6 +274,7 @@ func TestUpdatePod(t *testing.T) {
taintedNodes: map[string][]corev1.Taint{
"node1": {createNoExecuteTaint(1)},
},
expectPatch: true,
expectDelete: true,
skipOnWindows: true,
},
@ -308,7 +286,6 @@ func TestUpdatePod(t *testing.T) {
// TODO: remove skip once the flaking test has been fixed.
t.Skip("Skip flaking test on Windows.")
}
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions)
ctx, cancel := context.WithCancel(context.Background())
fakeClientset := fake.NewSimpleClientset(&corev1.PodList{Items: []corev1.Pod{*item.prevPod}})
controller, podIndexer, _ := setupNewController(context.TODO(), fakeClientset)
@ -424,10 +401,9 @@ func TestUpdateNode(t *testing.T) {
expectPatch bool
expectDelete bool
additionalSleep time.Duration
enablePodDisruptionConditions bool
}{
{
description: "Added taint, expect node patched and deleted when PodDisruptionConditions is enabled",
description: "Added taint, expect node patched and deleted",
pods: []corev1.Pod{
*testutil.NewPod("pod1", "node1"),
},
@ -435,16 +411,6 @@ func TestUpdateNode(t *testing.T) {
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectPatch: true,
expectDelete: true,
enablePodDisruptionConditions: true,
},
{
description: "Added taint",
pods: []corev1.Pod{
*testutil.NewPod("pod1", "node1"),
},
oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1}),
expectDelete: true,
},
{
description: "Added tolerated taint",
@ -462,6 +428,7 @@ func TestUpdateNode(t *testing.T) {
},
oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}),
expectPatch: true,
expectDelete: true,
},
{
@ -501,13 +468,13 @@ func TestUpdateNode(t *testing.T) {
},
oldNode: testutil.NewNode("node1"),
newNode: addTaintsToNode(testutil.NewNode("node1"), "testTaint1", "taint1", []int{1, 2}),
expectPatch: true,
expectDelete: true,
},
}
for _, item := range testCases {
t.Run(item.description, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, item.enablePodDisruptionConditions)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@ -567,6 +567,7 @@ const (
// kep: https://kep.k8s.io/3329
// alpha: v1.25
// beta: v1.26
// stable: v1.31
//
// Enables support for appending a dedicated pod condition indicating that
// the pod is being deleted due to a disruption.
@ -1117,7 +1118,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
PodDeletionCost: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta},
PodDisruptionConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33
PodReadyToStartContainersCondition: {Default: true, PreRelease: featuregate.Beta},

View File

@ -268,29 +268,6 @@ type podToMake struct {
}
func TestMemoryPressure_VerifyPodStatus(t *testing.T) {
testCases := map[string]struct {
wantPodStatus v1.PodStatus
}{
"eviction due to memory pressure; no image fs": {
wantPodStatus: v1.PodStatus{
Phase: v1.PodFailed,
Reason: "Evicted",
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
},
},
"eviction due to memory pressure; image fs": {
wantPodStatus: v1.PodStatus{
Phase: v1.PodFailed,
Reason: "Evicted",
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
},
},
}
for name, tc := range testCases {
for _, enablePodDisruptionConditions := range []bool{false, true} {
t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions)
podMaker := makePodWithMemoryStats
summaryStatsMaker := makeMemoryStats
podsToMake := []podToMake{
@ -356,24 +333,23 @@ func TestMemoryPressure_VerifyPodStatus(t *testing.T) {
t.Fatalf("Manager should have selected a pod for eviction")
}
wantPodStatus := tc.wantPodStatus.DeepCopy()
if enablePodDisruptionConditions {
wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
wantPodStatus := v1.PodStatus{
Phase: v1.PodFailed,
Reason: "Evicted",
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
Conditions: []v1.PodCondition{{
Type: "DisruptionTarget",
Status: "True",
Reason: "TerminationByKubelet",
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
})
}},
}
// verify the pod status after applying the status update function
podKiller.statusFn(&podKiller.pod.Status)
if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
if diff := cmp.Diff(wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
}
})
}
}
}
func TestPIDPressure_VerifyPodStatus(t *testing.T) {
@ -388,11 +364,7 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) {
},
},
}
for name, tc := range testCases {
for _, enablePodDisruptionConditions := range []bool{true, false} {
t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions)
for _, tc := range testCases {
podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats
podsToMake := []podToMake{
@ -460,22 +432,18 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) {
}
wantPodStatus := tc.wantPodStatus.DeepCopy()
if enablePodDisruptionConditions {
wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
Type: "DisruptionTarget",
Status: "True",
Reason: "TerminationByKubelet",
Message: "The node was low on resource: pids. Threshold quantity: 1200, available: 500. ",
})
}
// verify the pod status after applying the status update function
podKiller.statusFn(&podKiller.pod.Status)
if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
}
})
}
}
}
@ -570,11 +538,8 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
},
},
}
for name, tc := range testCases {
for _, enablePodDisruptionConditions := range []bool{false, true} {
t.Run(fmt.Sprintf("%s;PodDisruptionConditions=%v", name, enablePodDisruptionConditions), func(t *testing.T) {
for _, tc := range testCases {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.KubeletSeparateDiskGC, tc.kubeletSeparateDiskFeature)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, enablePodDisruptionConditions)
podMaker := makePodWithDiskStats
summaryStatsMaker := makeDiskStats
@ -645,14 +610,12 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
t.Fatalf("Manager should have selected a pod for eviction")
}
if enablePodDisruptionConditions {
wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
Type: "DisruptionTarget",
Status: "True",
Reason: "TerminationByKubelet",
Message: tc.evictionMessage,
})
}
// verify the pod status after applying the status update function
podKiller.statusFn(&podKiller.pod.Status)
@ -660,8 +623,6 @@ func TestDiskPressureNodeFs_VerifyPodStatus(t *testing.T) {
t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
}
}
})
}
}
}

View File

@ -3267,13 +3267,12 @@ func Test_generateAPIPodStatus(t *testing.T) {
unreadyContainer []string
previousStatus v1.PodStatus
isPodTerminal bool
enablePodDisruptionConditions bool
expected v1.PodStatus
expectedPodDisruptionCondition v1.PodCondition
expectedPodDisruptionCondition *v1.PodCondition
expectedPodReadyToStartContainersCondition v1.PodCondition
}{
{
name: "pod disruption condition is copied over and the phase is set to failed when deleted; PodDisruptionConditions enabled",
name: "pod disruption condition is copied over and the phase is set to failed when deleted",
pod: &v1.Pod{
Spec: desiredState,
Status: v1.PodStatus{
@ -3302,7 +3301,6 @@ func Test_generateAPIPodStatus(t *testing.T) {
}},
},
isPodTerminal: true,
enablePodDisruptionConditions: true,
expected: v1.PodStatus{
Phase: v1.PodFailed,
HostIP: "127.0.0.1",
@ -3319,7 +3317,7 @@ func Test_generateAPIPodStatus(t *testing.T) {
ready(waitingWithLastTerminationUnknown("containerB", 0)),
},
},
expectedPodDisruptionCondition: v1.PodCondition{
expectedPodDisruptionCondition: &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
LastTransitionTime: normalized_now,
@ -3705,7 +3703,6 @@ func Test_generateAPIPodStatus(t *testing.T) {
for _, test := range tests {
for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
t.Run(test.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
@ -3719,8 +3716,8 @@ func Test_generateAPIPodStatus(t *testing.T) {
if enablePodReadyToStartContainersCondition {
expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...)
}
if test.enablePodDisruptionConditions {
expected.Conditions = append([]v1.PodCondition{test.expectedPodDisruptionCondition}, expected.Conditions...)
if test.expectedPodDisruptionCondition != nil {
expected.Conditions = append([]v1.PodCondition{*test.expectedPodDisruptionCondition}, expected.Conditions...)
}
if !apiequality.Semantic.DeepEqual(*expected, actual) {
t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual))

View File

@ -125,14 +125,13 @@ func TestManager(t *testing.T) {
shutdownGracePeriodCriticalPods time.Duration
systemInhibitDelay time.Duration
overrideSystemInhibitDelay time.Duration
enablePodDisruptionConditions bool
expectedDidOverrideInhibitDelay bool
expectedPodToGracePeriodOverride map[string]int64
expectedError error
expectedPodStatuses map[string]v1.PodStatus
}{
{
desc: "verify pod status; PodDisruptionConditions enabled",
desc: "verify pod status",
activePods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "running-pod"},
@ -160,7 +159,6 @@ func TestManager(t *testing.T) {
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
enablePodDisruptionConditions: true,
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"running-pod": 20, "failed-pod": 20, "succeeded-pod": 20},
expectedPodStatuses: map[string]v1.PodStatus{
@ -212,7 +210,6 @@ func TestManager(t *testing.T) {
shutdownGracePeriodCriticalPods: time.Duration(10 * time.Second),
systemInhibitDelay: time.Duration(40 * time.Second),
overrideSystemInhibitDelay: time.Duration(40 * time.Second),
enablePodDisruptionConditions: false,
expectedDidOverrideInhibitDelay: false,
expectedPodToGracePeriodOverride: map[string]int64{"normal-pod-nil-grace-period": 20, "critical-pod-nil-grace-period": 10},
expectedPodStatuses: map[string]v1.PodStatus{
@ -220,11 +217,27 @@ func TestManager(t *testing.T) {
Phase: v1.PodFailed,
Message: "Pod was terminated in response to imminent node shutdown.",
Reason: "Terminated",
Conditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
Message: "Pod was terminated in response to imminent node shutdown.",
},
},
},
"critical-pod-nil-grace-period": {
Phase: v1.PodFailed,
Message: "Pod was terminated in response to imminent node shutdown.",
Reason: "Terminated",
Conditions: []v1.PodCondition{
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
Message: "Pod was terminated in response to imminent node shutdown.",
},
},
},
},
},
@ -331,7 +344,6 @@ func TestManager(t *testing.T) {
systemDbus = func() (dbusInhibiter, error) {
return fakeDbus, nil
}
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodDisruptionConditions, tc.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.GracefulNodeShutdown, true)
proberManager := probetest.FakeManager{}

View File

@ -35,14 +35,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
featuregatetesting "k8s.io/component-base/featuregate/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
@ -1065,7 +1062,6 @@ func TestTerminatePod_DefaultUnknownStatus(t *testing.T) {
func TestTerminatePod_EnsurePodPhaseIsTerminal(t *testing.T) {
testCases := map[string]struct {
enablePodDisruptionConditions bool
status v1.PodStatus
wantStatus v1.PodStatus
}{
@ -1543,7 +1539,6 @@ func deleteAction() core.DeleteAction {
func TestMergePodStatus(t *testing.T) {
useCases := []struct {
desc string
enablePodDisruptionConditions bool
hasRunningContainers bool
oldPodStatus func(input v1.PodStatus) v1.PodStatus
newPodStatus func(input v1.PodStatus) v1.PodStatus
@ -1552,14 +1547,12 @@ func TestMergePodStatus(t *testing.T) {
{
"no change",
false,
false,
func(input v1.PodStatus) v1.PodStatus { return input },
func(input v1.PodStatus) v1.PodStatus { return input },
getPodStatus(),
},
{
"add DisruptionTarget condition when transitioning into failed phase; PodDisruptionConditions enabled",
true,
"add DisruptionTarget condition when transitioning into failed phase",
false,
func(input v1.PodStatus) v1.PodStatus { return input },
func(input v1.PodStatus) v1.PodStatus {
@ -1598,8 +1591,7 @@ func TestMergePodStatus(t *testing.T) {
},
},
{
"don't add DisruptionTarget condition when transitioning into failed phase, but there are might still be running containers; PodDisruptionConditions enabled",
true,
"don't add DisruptionTarget condition when transitioning into failed phase, but there might still be running containers",
true,
func(input v1.PodStatus) v1.PodStatus { return input },
func(input v1.PodStatus) v1.PodStatus {
@ -1627,8 +1619,7 @@ func TestMergePodStatus(t *testing.T) {
},
},
{
"preserve DisruptionTarget condition; PodDisruptionConditions enabled",
true,
"preserve DisruptionTarget condition",
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
@ -1662,43 +1653,7 @@ func TestMergePodStatus(t *testing.T) {
},
},
{
"preserve DisruptionTarget condition; PodDisruptionConditions disabled",
false,
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
})
return input
},
func(input v1.PodStatus) v1.PodStatus {
return input
},
v1.PodStatus{
Phase: v1.PodRunning,
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
},
{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: "TerminationByKubelet",
},
},
Message: "Message",
},
},
{
"override DisruptionTarget condition; PodDisruptionConditions enabled",
true,
"override DisruptionTarget condition",
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
@ -1744,8 +1699,7 @@ func TestMergePodStatus(t *testing.T) {
},
},
{
"don't override DisruptionTarget condition when remaining in running phase; PodDisruptionConditions enabled",
true,
"don't override DisruptionTarget condition when remaining in running phase",
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
@ -1784,8 +1738,7 @@ func TestMergePodStatus(t *testing.T) {
},
},
{
"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers; PodDisruptionConditions enabled",
true,
"don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers",
true,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
@ -1827,7 +1780,6 @@ func TestMergePodStatus(t *testing.T) {
{
"readiness changes",
false,
false,
func(input v1.PodStatus) v1.PodStatus { return input },
func(input v1.PodStatus) v1.PodStatus {
input.Conditions[0].Status = v1.ConditionFalse
@ -1851,7 +1803,6 @@ func TestMergePodStatus(t *testing.T) {
{
"additional pod condition",
false,
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.PodConditionType("example.com/feature"),
@ -1882,7 +1833,6 @@ func TestMergePodStatus(t *testing.T) {
{
"additional pod condition and readiness changes",
false,
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.PodConditionType("example.com/feature"),
@ -1916,7 +1866,6 @@ func TestMergePodStatus(t *testing.T) {
{
"additional pod condition changes",
false,
false,
func(input v1.PodStatus) v1.PodStatus {
input.Conditions = append(input.Conditions, v1.PodCondition{
Type: v1.PodConditionType("example.com/feature"),
@ -1953,7 +1902,6 @@ func TestMergePodStatus(t *testing.T) {
{
"phase is transitioning to failed and no containers running",
false,
false,
func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodRunning
input.Reason = "Unknown"
@ -1990,7 +1938,6 @@ func TestMergePodStatus(t *testing.T) {
},
{
"phase is transitioning to failed and containers running",
false,
true,
func(input v1.PodStatus) v1.PodStatus {
input.Phase = v1.PodRunning
@ -2024,7 +1971,6 @@ func TestMergePodStatus(t *testing.T) {
for _, tc := range useCases {
t.Run(tc.desc, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers)
if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) {
t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output))

View File

@ -27,7 +27,6 @@ import (
func TestPodConditionByKubelet(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
trueCases := []v1.PodConditionType{
v1.PodScheduled,
@ -56,8 +55,6 @@ func TestPodConditionByKubelet(t *testing.T) {
}
func TestPodConditionSharedByKubelet(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
trueCases := []v1.PodConditionType{
v1.DisruptionTarget,
}

View File

@ -34,13 +34,10 @@ import (
"k8s.io/apimachinery/pkg/watch"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/kubernetes/fake"
featuregatetesting "k8s.io/component-base/featuregate/testing"
podapi "k8s.io/kubernetes/pkg/api/pod"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/features"
)
func TestEviction(t *testing.T) {
@ -802,10 +799,7 @@ func TestAddConditionAndDelete(t *testing.T) {
evictionRest := newEvictionStorage(storage.Store, client.PolicyV1())
for _, tc := range cases {
for _, conditionsEnabled := range []bool{true, false} {
name := fmt.Sprintf("%s_conditions=%v", tc.name, conditionsEnabled)
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, conditionsEnabled)
t.Run(tc.name, func(t *testing.T) {
var deleteOptions *metav1.DeleteOptions
if tc.initialPod {
newPod := validNewPod()
@ -843,7 +837,6 @@ func TestAddConditionAndDelete(t *testing.T) {
})
}
}
}
func resource(resource string) schema.GroupResource {
return schema.GroupResource{Group: "", Resource: resource}

View File

@ -1431,14 +1431,6 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
nominatedNodeStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
expected: true,
},
{
name: "Pod with nominated node, but without nominated node status",
pod: st.MakePod().Name("p_without_status").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
nodes: []string{"node1"},
nominatedNodeStatus: nil,
expected: false,
},
{
name: "Pod without nominated node",
pod: st.MakePod().Name("p_without_nominated_node").UID("p").Priority(highPriority).Obj(),
@ -1456,8 +1448,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
expected: false,
},
{
name: "victim Pods terminating, feature PodDisruptionConditions is enabled",
fts: feature.Features{EnablePodDisruptionConditions: true},
name: "preemption victim pod terminating, as indicated by the dedicated DisruptionTarget condition",
pod: st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
@ -1465,34 +1456,17 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
expected: false,
},
{
name: "non-victim Pods terminating, feature PodDisruptionConditions is enabled",
fts: feature.Features{EnablePodDisruptionConditions: true},
name: "non-victim Pods terminating",
pod: st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
nodes: []string{"node1"},
expected: true,
},
{
name: "victim Pods terminating, feature PodDisruptionConditions is disabled",
fts: feature.Features{EnablePodDisruptionConditions: false},
pod: st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
nodes: []string{"node1"},
expected: false,
},
{
name: "non-victim Pods terminating, feature PodDisruptionConditions is disabled",
fts: feature.Features{EnablePodDisruptionConditions: false},
pod: st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
nodes: []string{"node1"},
expected: false,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.fts.EnablePodDisruptionConditions = true
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()

View File

@ -82,9 +82,6 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test)
OOMScoreAdj = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("OOMScoreAdj"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
PodDisruptionConditions = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodDisruptionConditions"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources"))

View File

@ -91,7 +91,7 @@ var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisrupt
}
})
f.It("should add DisruptionTarget condition to the preempted pod", nodefeature.PodDisruptionConditions, func(ctx context.Context) {
f.It("should add DisruptionTarget condition to the preempted pod", func(ctx context.Context) {
// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
node := getNodeName(ctx, f)
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{

View File

@ -31,7 +31,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/eviction"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
@ -45,6 +44,7 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/ptr"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
@ -513,23 +513,19 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
})
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled", nodefeature.PodDisruptionConditions, func() {
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", func() {
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
pidsConsumed := int64(10000)
summary := eventuallyGetSummary(ctx)
availablePids := *(summary.Node.Rlimit.MaxPID) - *(summary.Node.Rlimit.NumOfRunningProcesses)
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
initialConfig.EvictionMinimumReclaim = map[string]string{}
initialConfig.FeatureGates = map[string]bool{
string(features.PodDisruptionConditions): true,
}
})
disruptionTarget := v1.DisruptionTarget
specs := []podEvictSpec{
{
evictionPriority: 1,
pod: pidConsumingPod("fork-bomb-container", 30000),
wantPodDisruptionCondition: &disruptionTarget,
wantPodDisruptionCondition: ptr.To(v1.DisruptionTarget),
},
}
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)

View File

@ -83,7 +83,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
}
})
f.Context("graceful node shutdown when PodDisruptionConditions are enabled", nodefeature.PodDisruptionConditions, func() {
f.Context("graceful node shutdown; baseline scenario to verify DisruptionTarget is added", func() {
const (
pollInterval = 1 * time.Second
@ -95,7 +95,6 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.FeatureGates = map[string]bool{
string(features.GracefulNodeShutdown): true,
string(features.PodDisruptionConditions): true,
string(features.GracefulNodeShutdownBasedOnPodPriority): false,
}
initialConfig.ShutdownGracePeriod = metav1.Duration{Duration: nodeShutdownGracePeriod}

View File

@ -40,7 +40,6 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/feature"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
@ -50,12 +49,10 @@ import (
"k8s.io/client-go/restmapper"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/disruption"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/kubernetes/test/utils/ktesting"
)
@ -346,33 +343,19 @@ func TestEvictionVersions(t *testing.T) {
// TestEvictionWithFinalizers tests eviction with the use of finalizers
func TestEvictionWithFinalizers(t *testing.T) {
cases := map[string]struct {
enablePodDisruptionConditions bool
phase v1.PodPhase
dryRun bool
wantDisruptionTargetCond bool
}{
"terminal pod with PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
"terminal pod": {
phase: v1.PodSucceeded,
wantDisruptionTargetCond: true,
},
"terminal pod with PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
phase: v1.PodSucceeded,
wantDisruptionTargetCond: false,
},
"running pod with PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
"running pod": {
phase: v1.PodRunning,
wantDisruptionTargetCond: true,
},
"running pod with PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
phase: v1.PodRunning,
wantDisruptionTargetCond: false,
},
"running pod with PodDisruptionConditions enabled should not update conditions in dry-run mode": {
enablePodDisruptionConditions: true,
"running pod should not update conditions in dry-run mode": {
phase: v1.PodRunning,
dryRun: true,
wantDisruptionTargetCond: false,
@ -386,7 +369,6 @@ func TestEvictionWithFinalizers(t *testing.T) {
ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-finalizers", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)
defer tCtx.Cancel("test has completed")
informers.Start(tCtx.Done())

View File

@ -51,31 +51,21 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
nodeIndex := 1 // the exact node doesn't matter, pick one
tests := map[string]struct {
enablePodDisruptionConditions bool
enableSeparateTaintEvictionController bool
startStandaloneTaintEvictionController bool
wantPodEvicted bool
}{
"Test eviction for NoExecute taint added by user; pod condition added when PodDisruptionConditions enabled; separate taint eviction controller disabled": {
enablePodDisruptionConditions: true,
enableSeparateTaintEvictionController: false,
startStandaloneTaintEvictionController: false,
wantPodEvicted: true,
},
"Test eviction for NoExecute taint added by user; no pod condition added when PodDisruptionConditions disabled; separate taint eviction controller disabled": {
enablePodDisruptionConditions: false,
"Test eviction for NoExecute taint added by user; pod condition added; separate taint eviction controller disabled": {
enableSeparateTaintEvictionController: false,
startStandaloneTaintEvictionController: false,
wantPodEvicted: true,
},
"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled but not started": {
enablePodDisruptionConditions: false,
enableSeparateTaintEvictionController: true,
startStandaloneTaintEvictionController: false,
wantPodEvicted: false,
},
"Test eviction for NoExecute taint added by user; separate taint eviction controller enabled and started": {
enablePodDisruptionConditions: false,
enableSeparateTaintEvictionController: true,
startStandaloneTaintEvictionController: true,
wantPodEvicted: true,
@ -124,7 +114,6 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
},
}
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.SeparateTaintEvictionController, test.enableSeparateTaintEvictionController)
testCtx := testutils.InitTestAPIServer(t, "taint-no-execute", nil)
cs := testCtx.ClientSet
@ -202,9 +191,9 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
t.Fatalf("Test Failed: error: %q, while getting updated pod", err)
}
_, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions && cond == nil {
if test.wantPodEvicted && cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
} else if !test.enablePodDisruptionConditions && cond != nil {
} else if !test.wantPodEvicted && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget)
}
})

View File

@ -40,14 +40,12 @@ import (
// TestPodGcOrphanedPodsWithFinalizer tests deletion of orphaned pods
func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
tests := map[string]struct {
enablePodDisruptionConditions bool
enableJobPodReplacementPolicy bool
phase v1.PodPhase
wantPhase v1.PodPhase
wantDisruptionTarget *v1.PodCondition
}{
"PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
"pending pod": {
phase: v1.PodPending,
wantPhase: v1.PodFailed,
wantDisruptionTarget: &v1.PodCondition{
@ -57,8 +55,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
Message: "PodGC: node no longer exists",
},
},
"PodDisruptionConditions and PodReplacementPolicy enabled": {
enablePodDisruptionConditions: true,
"pending pod; PodReplacementPolicy enabled": {
enableJobPodReplacementPolicy: true,
phase: v1.PodPending,
wantPhase: v1.PodFailed,
@ -69,24 +66,11 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
Message: "PodGC: node no longer exists",
},
},
"Only PodReplacementPolicy enabled; no PodDisruptionCondition": {
enablePodDisruptionConditions: false,
enableJobPodReplacementPolicy: true,
phase: v1.PodPending,
wantPhase: v1.PodFailed,
},
"PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
phase: v1.PodPending,
wantPhase: v1.PodPending,
},
"PodDisruptionConditions enabled; succeeded pod": {
enablePodDisruptionConditions: true,
"succeeded pod": {
phase: v1.PodSucceeded,
wantPhase: v1.PodSucceeded,
},
"PodDisruptionConditions enabled; failed pod": {
enablePodDisruptionConditions: true,
"failed pod": {
phase: v1.PodFailed,
wantPhase: v1.PodFailed,
},
@ -94,7 +78,6 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
testCtx := setup(t, "podgc-orphaned")
cs := testCtx.ClientSet
@ -170,31 +153,18 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
// TestTerminatingOnOutOfServiceNode tests deletion pods terminating on out-of-service nodes
func TestTerminatingOnOutOfServiceNode(t *testing.T) {
tests := map[string]struct {
enablePodDisruptionConditions bool
enableJobPodReplacementPolicy bool
withFinalizer bool
wantPhase v1.PodPhase
}{
"pod has phase changed to Failed when PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
"pod has phase changed to Failed": {
withFinalizer: true,
wantPhase: v1.PodFailed,
},
"pod has phase unchanged when PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
withFinalizer: true,
wantPhase: v1.PodPending,
},
"pod is getting deleted when no finalizer and PodDisruptionConditions enabled": {
enablePodDisruptionConditions: true,
"pod is getting deleted when no finalizer": {
withFinalizer: false,
},
"pod is getting deleted when no finalizer and PodDisruptionConditions disabled": {
enablePodDisruptionConditions: false,
withFinalizer: false,
},
"pod has phase changed when PodDisruptionConditions disabled, but JobPodReplacementPolicy enabled": {
enablePodDisruptionConditions: false,
"pod has phase changed when JobPodReplacementPolicy enabled": {
enableJobPodReplacementPolicy: true,
withFinalizer: true,
wantPhase: v1.PodFailed,
@ -203,7 +173,6 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeOutOfServiceVolumeDetach, true)
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
testCtx := setup(t, "podgc-out-of-service")
@ -385,7 +354,6 @@ func TestPodGcForPodsWithDuplicatedFieldKeys(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, true)
testCtx := setup(t, "podgc-orphaned")
cs := testCtx.ClientSet

View File

@ -33,17 +33,14 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
configv1 "k8s.io/kube-scheduler/config/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler"
configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing"
"k8s.io/kubernetes/pkg/scheduler/framework"
@ -207,34 +204,7 @@ func TestPreemption(t *testing.T) {
enablePreFilter bool
unresolvable bool
preemptedPodIndexes map[int]struct{}
enablePodDisruptionConditions bool
}{
{
name: "basic pod preemption with PodDisruptionConditions enabled",
initTokens: maxTokens,
existingPods: []*v1.Pod{
initPausePod(&testutils.PausePodConfig{
Name: "victim-pod",
Namespace: testCtx.NS.Name,
Priority: &lowPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
},
}),
},
pod: initPausePod(&testutils.PausePodConfig{
Name: "preemptor-pod",
Namespace: testCtx.NS.Name,
Priority: &highPriority,
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(200, resource.DecimalSI)},
},
}),
preemptedPodIndexes: map[int]struct{}{0: {}},
enablePodDisruptionConditions: true,
},
{
name: "basic pod preemption",
initTokens: maxTokens,
@ -484,7 +454,6 @@ func TestPreemption(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)
filter.Tokens = test.initTokens
filter.EnablePreFilter = test.enablePreFilter
filter.Unresolvable = test.unresolvable
@ -513,10 +482,8 @@ func TestPreemption(t *testing.T) {
t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name)
}
_, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget)
if test.enablePodDisruptionConditions && cond == nil {
if cond == nil {
t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
} else if test.enablePodDisruptionConditions == false && cond != nil {
t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget)
}
} else {
if p.DeletionTimestamp != nil {