use table test pattern and replace hardcoded values

This commit is contained in:
reinka 2024-02-13 20:05:26 +01:00 committed by Andrei Poehlmann
parent 960d7fbf09
commit 48b1576980

View File

@ -169,11 +169,6 @@ func makePIDStats(nodeAvailablePIDs string, numberOfRunningProcesses string, pod
MaxPID: &availablePIDs, MaxPID: &availablePIDs,
NumOfRunningProcesses: &NumberOfRunningProcesses, NumOfRunningProcesses: &NumberOfRunningProcesses,
}, },
SystemContainers: []statsapi.ContainerStats{
{
Name: statsapi.SystemContainerPods,
},
},
}, },
Pods: []statsapi.PodStats{}, Pods: []statsapi.PodStats{},
} }
@ -402,8 +397,8 @@ func TestPIDPressure_VerifyPodStatus(t *testing.T) {
podMaker := makePodWithPIDStats podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats summaryStatsMaker := makePIDStats
podsToMake := []podToMake{ podsToMake := []podToMake{
{name: "below-requests"}, {},
{name: "above-requests"}, {},
} }
pods := []*v1.Pod{} pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{} podStats := map[*v1.Pod]statsapi.PodStats{}
@ -924,229 +919,253 @@ func makeContainersByQOS(class v1.PodQOSClass) []v1.Container {
} }
func TestPIDPressure(t *testing.T) { func TestPIDPressure(t *testing.T) {
podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats
podsToMake := []podToMake{
{name: "high-priority-high-usage", priority: highPriority, pidUsage: 900},
{name: "default-priority-low-usage", priority: defaultPriority, pidUsage: 100},
{name: "default-priority-medium-usage", priority: defaultPriority, pidUsage: 400},
{name: "low-priority-high-usage", priority: lowPriority, pidUsage: 600},
{name: "low-priority-low-usage", priority: lowPriority, pidUsage: 50},
}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.pidUsage)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[3]
activePodsFunc := func() []*v1.Pod {
return pods
}
fakeClock := testingclock.NewFakeClock(time.Now()) // Define test cases
podKiller := &mockPodKiller{} testCases := []struct {
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)} name string
diskGC := &mockDiskGC{err: nil} podsToMake []podToMake
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""} evictPodIndex int
noPressurePIDUsage string
config := Config{ pressurePIDUsageWithGracePeriod string
MaxPodGracePeriodSeconds: 5, pressurePIDUsageWithoutGracePeriod string
PressureTransitionPeriod: time.Minute * 5, totalPID string
Thresholds: []evictionapi.Threshold{ }{
{ {
Signal: evictionapi.SignalPIDAvailable, name: "eviction due to pid pressure",
Operator: evictionapi.OpLessThan, podsToMake: []podToMake{
Value: evictionapi.ThresholdValue{ {name: "high-priority-high-usage", priority: highPriority, pidUsage: 900},
Quantity: quantityMustParse("1200"), {name: "default-priority-low-usage", priority: defaultPriority, pidUsage: 100},
}, {name: "default-priority-medium-usage", priority: defaultPriority, pidUsage: 400},
}, {name: "low-priority-high-usage", priority: lowPriority, pidUsage: 600},
{ {name: "low-priority-low-usage", priority: lowPriority, pidUsage: 50},
Signal: evictionapi.SignalPIDAvailable,
Operator: evictionapi.OpLessThan,
Value: evictionapi.ThresholdValue{
Quantity: quantityMustParse("1500"),
},
GracePeriod: time.Minute * 2,
}, },
evictPodIndex: 3, // we expect the low-priority-high-usage pod to be evicted
noPressurePIDUsage: "300",
pressurePIDUsageWithGracePeriod: "700",
pressurePIDUsageWithoutGracePeriod: "1200",
totalPID: "2000",
}, },
} }
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("2000", "300", podStats)}
manager := &managerImpl{
clock: fakeClock,
killPodFunc: podKiller.killPodNow,
imageGC: diskGC,
containerGC: diskGC,
config: config,
recorder: &record.FakeRecorder{},
summaryProvider: summaryProvider,
nodeRef: nodeRef,
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
thresholdsFirstObservedAt: thresholdsObservedAt{},
}
// create a best effort pod to test admission for _, tc := range testCases {
podToAdmit, _ := podMaker("pod-to-admit", defaultPriority, 50) t.Run(tc.name, func(t *testing.T) {
podMaker := makePodWithPIDStats
summaryStatsMaker := makePIDStats
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range tc.podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.pidUsage)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[tc.evictPodIndex]
activePodsFunc := func() []*v1.Pod { return pods }
// synchronize fakeClock := testingclock.NewFakeClock(time.Now())
_, err := manager.synchronize(diskInfoProvider, activePodsFunc) podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: ptr.To(false)}
diskGC := &mockDiskGC{err: nil}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
if err != nil { config := Config{
t.Fatalf("Manager expects no error but got %v", err) MaxPodGracePeriodSeconds: 5,
} PressureTransitionPeriod: time.Minute * 5,
Thresholds: []evictionapi.Threshold{
{
Signal: evictionapi.SignalPIDAvailable,
Operator: evictionapi.OpLessThan,
Value: evictionapi.ThresholdValue{
Quantity: quantityMustParse("1200"),
},
},
{
Signal: evictionapi.SignalPIDAvailable,
Operator: evictionapi.OpLessThan,
Value: evictionapi.ThresholdValue{
Quantity: quantityMustParse("1500"),
},
GracePeriod: time.Minute * 2,
},
},
}
// we should not have disk pressure summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats)}
if manager.IsUnderDiskPressure() { manager := &managerImpl{
t.Fatalf("Manager should not report disk pressure") clock: fakeClock,
} killPodFunc: podKiller.killPodNow,
imageGC: diskGC,
containerGC: diskGC,
config: config,
recorder: &record.FakeRecorder{},
summaryProvider: summaryProvider,
nodeRef: nodeRef,
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
thresholdsFirstObservedAt: thresholdsObservedAt{},
}
// try to admit our pod (should succeed) // create a pod to test admission
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit { podToAdmit, _ := podMaker("pod-to-admit", defaultPriority, 50)
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
}
// induce soft threshold for PID pressure // synchronize
fakeClock.Step(1 * time.Minute) _, err := manager.synchronize(diskInfoProvider, activePodsFunc)
summaryProvider.result = summaryStatsMaker("2000", "700", podStats)
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil { if err != nil {
t.Fatalf("Manager expects no error but got %v", err) t.Fatalf("Manager expects no error but got %v", err)
} }
// now, we should have PID pressure // we should not have PID pressure
if !manager.IsUnderPIDPressure() { if manager.IsUnderPIDPressure() {
t.Errorf("Manager should report PID pressure since soft threshold was met") t.Fatalf("Manager should not report PID pressure")
} }
// verify no pod was yet killed because there has not yet been enough time passed // try to admit our pod (should succeed)
if podKiller.pod != nil { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod.Name) t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
} }
// step forward in time past the grace period // induce soft threshold for PID pressure
fakeClock.Step(3 * time.Minute) fakeClock.Step(1 * time.Minute)
// no change in PID stats to simulate continued pressure summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.pressurePIDUsageWithGracePeriod, podStats)
_, err = manager.synchronize(diskInfoProvider, activePodsFunc) _, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil { if err != nil {
t.Fatalf("Manager expects no error but got %v", err) t.Fatalf("Manager expects no error but got %v", err)
} }
// verify PID pressure is still reported // now, we should have PID pressure
if !manager.IsUnderPIDPressure() { if !manager.IsUnderPIDPressure() {
t.Errorf("Manager should still report PID pressure") t.Errorf("Manager should report PID pressure since soft threshold was met")
} }
// verify the right pod was killed with the right grace period. // verify no pod was yet killed because there has not yet been enough time passed
if podKiller.pod != podToEvict { if podKiller.pod != nil {
t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name) t.Errorf("Manager should not have killed a pod yet, but killed: %v", podKiller.pod.Name)
} }
if podKiller.gracePeriodOverride == nil {
t.Errorf("Manager chose to kill pod but should have had a grace period override.")
}
observedGracePeriod := *podKiller.gracePeriodOverride
if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds {
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod)
}
// reset state // step forward in time past the grace period
podKiller.pod = nil fakeClock.Step(3 * time.Minute)
podKiller.gracePeriodOverride = nil // no change in PID stats to simulate continued pressure
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
// remove PID pressure by simulating increased PID availability if err != nil {
fakeClock.Step(20 * time.Minute) t.Fatalf("Manager expects no error but got %v", err)
summaryProvider.result = summaryStatsMaker("2000", "300", podStats) // Simulate increased PID availability }
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil { // verify PID pressure is still reported
t.Fatalf("Manager expects no error but got %v", err) if !manager.IsUnderPIDPressure() {
} t.Errorf("Manager should still report PID pressure")
}
// verify PID pressure is resolved // verify the right pod was killed with the right grace period.
if manager.IsUnderPIDPressure() { if podKiller.pod != podToEvict {
t.Errorf("Manager should not report PID pressure") t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
} }
if podKiller.gracePeriodOverride == nil {
t.Errorf("Manager chose to kill pod but should have had a grace period override.")
}
observedGracePeriod := *podKiller.gracePeriodOverride
if observedGracePeriod != manager.config.MaxPodGracePeriodSeconds {
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", manager.config.MaxPodGracePeriodSeconds, observedGracePeriod)
}
// re-induce PID pressure // reset state
fakeClock.Step(1 * time.Minute) podKiller.pod = nil
summaryProvider.result = summaryStatsMaker("2000", "1200", podStats) podKiller.gracePeriodOverride = nil
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil { // remove PID pressure by simulating increased PID availability
t.Fatalf("Manager expects no error but got %v", err) fakeClock.Step(20 * time.Minute)
} summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats) // Simulate increased PID availability
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
// verify PID pressure is reported again if err != nil {
if !manager.IsUnderPIDPressure() { t.Fatalf("Manager expects no error but got %v", err)
t.Errorf("Manager should report PID pressure") }
}
// verify the right pod was killed with the right grace period. // verify PID pressure is resolved
if podKiller.pod != podToEvict { if manager.IsUnderPIDPressure() {
t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name) t.Errorf("Manager should not report PID pressure")
} }
if podKiller.gracePeriodOverride == nil {
t.Errorf("Manager chose to kill pod but should have had a grace period override.")
}
observedGracePeriod = *podKiller.gracePeriodOverride
if observedGracePeriod != int64(0) {
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
}
// try to admit our pod (should fail) // re-induce PID pressure
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit { fakeClock.Step(1 * time.Minute)
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit) summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.pressurePIDUsageWithoutGracePeriod, podStats)
} _, err = manager.synchronize(diskInfoProvider, activePodsFunc)
// reduce PID pressure if err != nil {
fakeClock.Step(1 * time.Minute) t.Fatalf("Manager expects no error but got %v", err)
summaryProvider.result = summaryStatsMaker("2000", "300", podStats) }
podKiller.pod = nil // reset state
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil { // verify PID pressure is reported again
t.Fatalf("Manager expects no error but got %v", err) if !manager.IsUnderPIDPressure() {
} t.Errorf("Manager should report PID pressure")
}
// we should have PID pressure (because transition period not yet met) // verify the right pod was killed with the right grace period.
if !manager.IsUnderPIDPressure() { if podKiller.pod != podToEvict {
t.Errorf("Manager should report PID pressure") t.Errorf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod.Name, podToEvict.Name)
} }
if podKiller.gracePeriodOverride == nil {
t.Errorf("Manager chose to kill pod but should have had a grace period override.")
}
observedGracePeriod = *podKiller.gracePeriodOverride
if observedGracePeriod != int64(0) {
t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
}
// no pod should have been killed // try to admit our pod (should fail)
if podKiller.pod != nil { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit {
t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name) t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
} }
// try to admit our pod (should fail) // reduce PID pressure
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit { fakeClock.Step(1 * time.Minute)
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit) summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats)
} podKiller.pod = nil // reset state
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
// move the clock past the transition period if err != nil {
fakeClock.Step(5 * time.Minute) t.Fatalf("Manager expects no error but got %v", err)
summaryProvider.result = summaryStatsMaker("2000", "300", podStats) }
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil { // we should have PID pressure (because transition period not yet met)
t.Fatalf("Manager expects no error but got %v", err) if !manager.IsUnderPIDPressure() {
} t.Errorf("Manager should report PID pressure")
}
// we should not have PID pressure (because transition period met) // no pod should have been killed
if manager.IsUnderPIDPressure() { if podKiller.pod != nil {
t.Errorf("Manager should not report PID pressure") t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
} }
// no pod should have been killed // try to admit our pod (should fail)
if podKiller.pod != nil { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); result.Admit {
t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name) t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
} }
// try to admit our pod (should succeed) // move the clock past the transition period
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit { fakeClock.Step(5 * time.Minute)
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit) summaryProvider.result = summaryStatsMaker(tc.totalPID, tc.noPressurePIDUsage, podStats)
_, err = manager.synchronize(diskInfoProvider, activePodsFunc)
if err != nil {
t.Fatalf("Manager expects no error but got %v", err)
}
// we should not have PID pressure (because transition period met)
if manager.IsUnderPIDPressure() {
t.Errorf("Manager should not report PID pressure")
}
// no pod should have been killed
if podKiller.pod != nil {
t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
}
// try to admit our pod (should succeed)
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: podToAdmit}); !result.Admit {
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, true, result.Admit)
}
})
} }
} }