mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 15:58:37 +00:00
review remarks for graduating PodDisruptionConditions
This commit is contained in:
parent
bf0c9885a4
commit
780191bea6
@ -3701,40 +3701,7 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
|
|||||||
wantStatusFailed: 1,
|
wantStatusFailed: 1,
|
||||||
wantStatusSucceeded: 0,
|
wantStatusSucceeded: 0,
|
||||||
},
|
},
|
||||||
"terminating Pod considered failed when PodDisruptionConditions is disabled": {
|
"terminating Pod not considered failed when JobPodFailurePolicy is enabled and used": {
|
||||||
enableJobPodFailurePolicy: true,
|
|
||||||
job: batch.Job{
|
|
||||||
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
|
||||||
ObjectMeta: validObjectMeta,
|
|
||||||
Spec: batch.JobSpec{
|
|
||||||
Parallelism: ptr.To[int32](1),
|
|
||||||
Selector: validSelector,
|
|
||||||
Template: validTemplate,
|
|
||||||
BackoffLimit: ptr.To[int32](0),
|
|
||||||
PodFailurePolicy: &batch.PodFailurePolicy{
|
|
||||||
Rules: []batch.PodFailurePolicyRule{
|
|
||||||
{
|
|
||||||
Action: batch.PodFailurePolicyActionCount,
|
|
||||||
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
|
|
||||||
{
|
|
||||||
Type: v1.DisruptionTarget,
|
|
||||||
Status: v1.ConditionTrue,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
pods: []v1.Pod{
|
|
||||||
{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
DeletionTimestamp: &now,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"terminating Pod not considered failed when PodDisruptionConditions is enabled": {
|
|
||||||
enableJobPodFailurePolicy: true,
|
enableJobPodFailurePolicy: true,
|
||||||
job: batch.Job{
|
job: batch.Job{
|
||||||
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
TypeMeta: metav1.TypeMeta{Kind: "Job"},
|
||||||
|
@ -180,7 +180,7 @@ func TestCreatePod(t *testing.T) {
|
|||||||
expectDelete: false,
|
expectDelete: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "schedule on tainted Node with infinite ivalid toleration",
|
description: "schedule on tainted Node with infinite invalid toleration",
|
||||||
pod: addToleration(testutil.NewPod("pod1", "node1"), 2, -1),
|
pod: addToleration(testutil.NewPod("pod1", "node1"), 2, -1),
|
||||||
taintedNodes: map[string][]corev1.Taint{
|
taintedNodes: map[string][]corev1.Taint{
|
||||||
"node1": {createNoExecuteTaint(1)},
|
"node1": {createNoExecuteTaint(1)},
|
||||||
|
@ -268,96 +268,87 @@ type podToMake struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMemoryPressure_VerifyPodStatus(t *testing.T) {
|
func TestMemoryPressure_VerifyPodStatus(t *testing.T) {
|
||||||
testCases := map[string]struct {
|
podMaker := makePodWithMemoryStats
|
||||||
wantPodStatus v1.PodStatus
|
summaryStatsMaker := makeMemoryStats
|
||||||
}{
|
podsToMake := []podToMake{
|
||||||
"eviction due to memory pressure": {
|
{name: "below-requests", requests: newResourceList("", "1Gi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "900Mi"},
|
||||||
wantPodStatus: v1.PodStatus{
|
{name: "above-requests", requests: newResourceList("", "100Mi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "700Mi"},
|
||||||
Phase: v1.PodFailed,
|
}
|
||||||
Reason: "Evicted",
|
pods := []*v1.Pod{}
|
||||||
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
|
podStats := map[*v1.Pod]statsapi.PodStats{}
|
||||||
|
for _, podToMake := range podsToMake {
|
||||||
|
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
|
||||||
|
pods = append(pods, pod)
|
||||||
|
podStats[pod] = podStat
|
||||||
|
}
|
||||||
|
activePodsFunc := func() []*v1.Pod {
|
||||||
|
return pods
|
||||||
|
}
|
||||||
|
|
||||||
|
fakeClock := testingclock.NewFakeClock(time.Now())
|
||||||
|
podKiller := &mockPodKiller{}
|
||||||
|
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
|
||||||
|
diskGC := &mockDiskGC{err: nil}
|
||||||
|
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||||
|
|
||||||
|
config := Config{
|
||||||
|
PressureTransitionPeriod: time.Minute * 5,
|
||||||
|
Thresholds: []evictionapi.Threshold{
|
||||||
|
{
|
||||||
|
Signal: evictionapi.SignalMemoryAvailable,
|
||||||
|
Operator: evictionapi.OpLessThan,
|
||||||
|
Value: evictionapi.ThresholdValue{
|
||||||
|
Quantity: quantityMustParse("2Gi"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500Mi", podStats)}
|
||||||
podMaker := makePodWithMemoryStats
|
manager := &managerImpl{
|
||||||
summaryStatsMaker := makeMemoryStats
|
clock: fakeClock,
|
||||||
podsToMake := []podToMake{
|
killPodFunc: podKiller.killPodNow,
|
||||||
{name: "below-requests", requests: newResourceList("", "1Gi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "900Mi"},
|
imageGC: diskGC,
|
||||||
{name: "above-requests", requests: newResourceList("", "100Mi", ""), limits: newResourceList("", "1Gi", ""), memoryWorkingSet: "700Mi"},
|
containerGC: diskGC,
|
||||||
}
|
config: config,
|
||||||
pods := []*v1.Pod{}
|
recorder: &record.FakeRecorder{},
|
||||||
podStats := map[*v1.Pod]statsapi.PodStats{}
|
summaryProvider: summaryProvider,
|
||||||
for _, podToMake := range podsToMake {
|
nodeRef: nodeRef,
|
||||||
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
|
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
||||||
pods = append(pods, pod)
|
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
||||||
podStats[pod] = podStat
|
}
|
||||||
}
|
|
||||||
activePodsFunc := func() []*v1.Pod {
|
|
||||||
return pods
|
|
||||||
}
|
|
||||||
|
|
||||||
fakeClock := testingclock.NewFakeClock(time.Now())
|
// synchronize to detect the memory pressure
|
||||||
podKiller := &mockPodKiller{}
|
_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
|
|
||||||
diskGC := &mockDiskGC{err: nil}
|
|
||||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
|
||||||
|
|
||||||
config := Config{
|
if err != nil {
|
||||||
PressureTransitionPeriod: time.Minute * 5,
|
t.Fatalf("Manager expects no error but got %v", err)
|
||||||
Thresholds: []evictionapi.Threshold{
|
}
|
||||||
{
|
// verify memory pressure is detected
|
||||||
Signal: evictionapi.SignalMemoryAvailable,
|
if !manager.IsUnderMemoryPressure() {
|
||||||
Operator: evictionapi.OpLessThan,
|
t.Fatalf("Manager should have detected memory pressure")
|
||||||
Value: evictionapi.ThresholdValue{
|
}
|
||||||
Quantity: quantityMustParse("2Gi"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("1500Mi", podStats)}
|
|
||||||
manager := &managerImpl{
|
|
||||||
clock: fakeClock,
|
|
||||||
killPodFunc: podKiller.killPodNow,
|
|
||||||
imageGC: diskGC,
|
|
||||||
containerGC: diskGC,
|
|
||||||
config: config,
|
|
||||||
recorder: &record.FakeRecorder{},
|
|
||||||
summaryProvider: summaryProvider,
|
|
||||||
nodeRef: nodeRef,
|
|
||||||
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
|
||||||
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// synchronize to detect the memory pressure
|
// verify a pod is selected for eviction
|
||||||
_, err := manager.synchronize(diskInfoProvider, activePodsFunc)
|
if podKiller.pod == nil {
|
||||||
|
t.Fatalf("Manager should have selected a pod for eviction")
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
wantPodStatus := v1.PodStatus{
|
||||||
t.Fatalf("Manager expects no error but got %v", err)
|
Phase: v1.PodFailed,
|
||||||
}
|
Reason: "Evicted",
|
||||||
// verify memory pressure is detected
|
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
|
||||||
if !manager.IsUnderMemoryPressure() {
|
Conditions: []v1.PodCondition{{
|
||||||
t.Fatalf("Manager should have detected memory pressure")
|
|
||||||
}
|
|
||||||
|
|
||||||
// verify a pod is selected for eviction
|
|
||||||
if podKiller.pod == nil {
|
|
||||||
t.Fatalf("Manager should have selected a pod for eviction")
|
|
||||||
}
|
|
||||||
|
|
||||||
wantPodStatus := tc.wantPodStatus.DeepCopy()
|
|
||||||
wantPodStatus.Conditions = append(wantPodStatus.Conditions, v1.PodCondition{
|
|
||||||
Type: "DisruptionTarget",
|
Type: "DisruptionTarget",
|
||||||
Status: "True",
|
Status: "True",
|
||||||
Reason: "TerminationByKubelet",
|
Reason: "TerminationByKubelet",
|
||||||
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
|
Message: "The node was low on resource: memory. Threshold quantity: 2Gi, available: 1500Mi. ",
|
||||||
})
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
// verify the pod status after applying the status update function
|
// verify the pod status after applying the status update function
|
||||||
podKiller.statusFn(&podKiller.pod.Status)
|
podKiller.statusFn(&podKiller.pod.Status)
|
||||||
if diff := cmp.Diff(*wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
|
if diff := cmp.Diff(wantPodStatus, podKiller.pod.Status, cmpopts.IgnoreFields(v1.PodCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
|
||||||
t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
|
t.Errorf("Unexpected pod status of the evicted pod (-want,+got):\n%s", diff)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1544,6 +1544,13 @@ func TestMergePodStatus(t *testing.T) {
|
|||||||
newPodStatus func(input v1.PodStatus) v1.PodStatus
|
newPodStatus func(input v1.PodStatus) v1.PodStatus
|
||||||
expectPodStatus v1.PodStatus
|
expectPodStatus v1.PodStatus
|
||||||
}{
|
}{
|
||||||
|
{
|
||||||
|
"no change",
|
||||||
|
false,
|
||||||
|
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||||
|
func(input v1.PodStatus) v1.PodStatus { return input },
|
||||||
|
getPodStatus(),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"add DisruptionTarget condition when transitioning into failed phase",
|
"add DisruptionTarget condition when transitioning into failed phase",
|
||||||
false,
|
false,
|
||||||
|
@ -34,13 +34,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
"k8s.io/apiserver/pkg/registry/rest"
|
"k8s.io/apiserver/pkg/registry/rest"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
podapi "k8s.io/kubernetes/pkg/api/pod"
|
podapi "k8s.io/kubernetes/pkg/api/pod"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/pkg/apis/policy"
|
"k8s.io/kubernetes/pkg/apis/policy"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEviction(t *testing.T) {
|
func TestEviction(t *testing.T) {
|
||||||
|
@ -1431,14 +1431,6 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
|
|||||||
nominatedNodeStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
|
nominatedNodeStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
|
||||||
expected: true,
|
expected: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "Pod with nominated node, but without nominated node status",
|
|
||||||
pod: st.MakePod().Name("p_without_status").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
|
|
||||||
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().Obj()},
|
|
||||||
nodes: []string{"node1"},
|
|
||||||
nominatedNodeStatus: nil,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "Pod without nominated node",
|
name: "Pod without nominated node",
|
||||||
pod: st.MakePod().Name("p_without_nominated_node").UID("p").Priority(highPriority).Obj(),
|
pod: st.MakePod().Name("p_without_nominated_node").UID("p").Priority(highPriority).Obj(),
|
||||||
@ -1456,7 +1448,7 @@ func TestPodEligibleToPreemptOthers(t *testing.T) {
|
|||||||
expected: false,
|
expected: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "victim Pods terminating",
|
name: "preemption victim pod terminating, as indicated by the dedicated DisruptionTarget condition",
|
||||||
pod: st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
|
pod: st.MakePod().Name("p_with_nominated_node").UID("p").Priority(highPriority).NominatedNodeName("node1").Obj(),
|
||||||
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
|
pods: []*v1.Pod{st.MakePod().Name("p1").UID("p1").Priority(lowPriority).Node("node1").Terminating().
|
||||||
Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
|
Condition(v1.DisruptionTarget, v1.ConditionTrue, v1.PodReasonPreemptionByScheduler).Obj()},
|
||||||
|
@ -82,9 +82,6 @@ var (
|
|||||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||||
OOMScoreAdj = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("OOMScoreAdj"))
|
OOMScoreAdj = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("OOMScoreAdj"))
|
||||||
|
|
||||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
|
||||||
PodDisruptionConditions = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodDisruptionConditions"))
|
|
||||||
|
|
||||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||||
PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources"))
|
PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources"))
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisrupt
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
f.It("should add DisruptionTarget condition to the preempted pod", nodefeature.PodDisruptionConditions, func(ctx context.Context) {
|
f.It("should add DisruptionTarget condition to the preempted pod", func(ctx context.Context) {
|
||||||
// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
|
// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
|
||||||
node := getNodeName(ctx, f)
|
node := getNodeName(ctx, f)
|
||||||
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
|
nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
|
||||||
|
@ -44,6 +44,7 @@ import (
|
|||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -512,7 +513,7 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
|
|||||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
|
||||||
})
|
})
|
||||||
|
|
||||||
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", nodefeature.PodDisruptionConditions, func() {
|
f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; baseline scenario to verify DisruptionTarget is added", func() {
|
||||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||||
pidsConsumed := int64(10000)
|
pidsConsumed := int64(10000)
|
||||||
summary := eventuallyGetSummary(ctx)
|
summary := eventuallyGetSummary(ctx)
|
||||||
@ -520,12 +521,11 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framewo
|
|||||||
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalPIDAvailable): fmt.Sprintf("%d", availablePids-pidsConsumed)}
|
||||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||||
})
|
})
|
||||||
disruptionTarget := v1.DisruptionTarget
|
|
||||||
specs := []podEvictSpec{
|
specs := []podEvictSpec{
|
||||||
{
|
{
|
||||||
evictionPriority: 1,
|
evictionPriority: 1,
|
||||||
pod: pidConsumingPod("fork-bomb-container", 30000),
|
pod: pidConsumingPod("fork-bomb-container", 30000),
|
||||||
wantPodDisruptionCondition: &disruptionTarget,
|
wantPodDisruptionCondition: ptr.To(v1.DisruptionTarget),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
|
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs)
|
||||||
|
@ -83,7 +83,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
f.Context("graceful node shutdown; baseline scenario to verify DisruptionTarget is added", nodefeature.PodDisruptionConditions, func() {
|
f.Context("graceful node shutdown; baseline scenario to verify DisruptionTarget is added", func() {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
pollInterval = 1 * time.Second
|
pollInterval = 1 * time.Second
|
||||||
|
@ -40,7 +40,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apiserver/pkg/util/feature"
|
|
||||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
@ -50,12 +49,10 @@ import (
|
|||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
"k8s.io/client-go/scale"
|
"k8s.io/client-go/scale"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
"k8s.io/kubernetes/test/utils/ktesting"
|
"k8s.io/kubernetes/test/utils/ktesting"
|
||||||
)
|
)
|
||||||
|
Loading…
Reference in New Issue
Block a user