diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index fe30006e8af..3dd07b0f77f 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -886,6 +886,51 @@ func TestNodeReclaimFuncs(t *testing.T) { t.Errorf("Manager should not report disk pressure") } + // synchronize + manager.synchronize(diskInfoProvider, activePodsFunc) + + // we should not have disk pressure + if manager.IsUnderDiskPressure() { + t.Errorf("Manager should not report disk pressure") + } + + // induce hard threshold + fakeClock.Step(1 * time.Minute) + summaryProvider.result = summaryStatsMaker(".9Gi", "200Gi", podStats) + // make GC return disk usage bellow the threshold, but not satisfying minReclaim + diskGC.summaryAfterGC = summaryStatsMaker("1.1Gi", "200Gi", podStats) + manager.synchronize(diskInfoProvider, activePodsFunc) + + // we should have disk pressure + if !manager.IsUnderDiskPressure() { + t.Errorf("Manager should report disk pressure since soft threshold was met") + } + + // verify image gc was invoked + if !diskGC.imageGCInvoked || !diskGC.containerGCInvoked { + t.Errorf("Manager should have invoked image gc") + } + + // verify a pod was killed because image gc was not enough to satisfy minReclaim + if podKiller.pod == nil { + t.Errorf("Manager should have killed a pod, but didn't") + } + + // reset state + diskGC.imageGCInvoked = false + diskGC.containerGCInvoked = false + podKiller.pod = nil + + // remove disk pressure + fakeClock.Step(20 * time.Minute) + summaryProvider.result = summaryStatsMaker("16Gi", "200Gi", podStats) + manager.synchronize(diskInfoProvider, activePodsFunc) + + // we should not have disk pressure + if manager.IsUnderDiskPressure() { + t.Errorf("Manager should not report disk pressure") + } + // induce disk pressure! fakeClock.Step(1 * time.Minute) summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats)