mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 01:06:27 +00:00
Merge pull request #34818 from nebril/eviction-test-cleanup
Automatic merge from submit-queue Cleanup kubelet eviction manager tests It cleans up kubelet eviction manager tests Extracted parts of tests that were similar to each other to functions
This commit is contained in:
commit
6515e3573e
@ -67,39 +67,87 @@ func (m *mockImageGC) DeleteUnusedImages() (int64, error) {
|
|||||||
return m.freed, m.err
|
return m.freed, m.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestMemoryPressure
|
func makePodWithMemoryStats(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
|
||||||
func TestMemoryPressure(t *testing.T) {
|
pod := newPod(name, []api.Container{
|
||||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
|
newContainer(name, requests, limits),
|
||||||
pod := newPod(name, []api.Container{
|
}, nil)
|
||||||
newContainer(name, requests, limits),
|
podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
|
||||||
}, nil)
|
return pod, podStats
|
||||||
podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
|
}
|
||||||
return pod, podStats
|
|
||||||
|
func makePodWithDiskStats(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) {
|
||||||
|
pod := newPod(name, []api.Container{
|
||||||
|
newContainer(name, requests, limits),
|
||||||
|
}, nil)
|
||||||
|
podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
|
||||||
|
return pod, podStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeMemoryStats(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
|
||||||
|
val := resource.MustParse(nodeAvailableBytes)
|
||||||
|
availableBytes := uint64(val.Value())
|
||||||
|
WorkingSetBytes := uint64(val.Value())
|
||||||
|
result := &statsapi.Summary{
|
||||||
|
Node: statsapi.NodeStats{
|
||||||
|
Memory: &statsapi.MemoryStats{
|
||||||
|
AvailableBytes: &availableBytes,
|
||||||
|
WorkingSetBytes: &WorkingSetBytes,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Pods: []statsapi.PodStats{},
|
||||||
}
|
}
|
||||||
summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
|
for _, podStat := range podStats {
|
||||||
val := resource.MustParse(nodeAvailableBytes)
|
result.Pods = append(result.Pods, podStat)
|
||||||
availableBytes := uint64(val.Value())
|
}
|
||||||
WorkingSetBytes := uint64(val.Value())
|
return result
|
||||||
result := &statsapi.Summary{
|
}
|
||||||
Node: statsapi.NodeStats{
|
|
||||||
Memory: &statsapi.MemoryStats{
|
func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
|
||||||
AvailableBytes: &availableBytes,
|
rootFsVal := resource.MustParse(rootFsAvailableBytes)
|
||||||
WorkingSetBytes: &WorkingSetBytes,
|
rootFsBytes := uint64(rootFsVal.Value())
|
||||||
|
rootFsCapacityBytes := uint64(rootFsVal.Value() * 2)
|
||||||
|
imageFsVal := resource.MustParse(imageFsAvailableBytes)
|
||||||
|
imageFsBytes := uint64(imageFsVal.Value())
|
||||||
|
imageFsCapacityBytes := uint64(imageFsVal.Value() * 2)
|
||||||
|
result := &statsapi.Summary{
|
||||||
|
Node: statsapi.NodeStats{
|
||||||
|
Fs: &statsapi.FsStats{
|
||||||
|
AvailableBytes: &rootFsBytes,
|
||||||
|
CapacityBytes: &rootFsCapacityBytes,
|
||||||
|
},
|
||||||
|
Runtime: &statsapi.RuntimeStats{
|
||||||
|
ImageFs: &statsapi.FsStats{
|
||||||
|
AvailableBytes: &imageFsBytes,
|
||||||
|
CapacityBytes: &imageFsCapacityBytes,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Pods: []statsapi.PodStats{},
|
},
|
||||||
}
|
Pods: []statsapi.PodStats{},
|
||||||
for _, podStat := range podStats {
|
|
||||||
result.Pods = append(result.Pods, podStat)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
podsToMake := []struct {
|
for _, podStat := range podStats {
|
||||||
name string
|
result.Pods = append(result.Pods, podStat)
|
||||||
requests api.ResourceList
|
}
|
||||||
limits api.ResourceList
|
return result
|
||||||
memoryWorkingSet string
|
}
|
||||||
}{
|
|
||||||
|
type podToMake struct {
|
||||||
|
name string
|
||||||
|
requests api.ResourceList
|
||||||
|
limits api.ResourceList
|
||||||
|
memoryWorkingSet string
|
||||||
|
rootFsUsed string
|
||||||
|
logsFsUsed string
|
||||||
|
logsFsInodesUsed string
|
||||||
|
rootFsInodesUsed string
|
||||||
|
perLocalVolumeUsed string
|
||||||
|
perLocalVolumeInodesUsed string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestMemoryPressure
|
||||||
|
func TestMemoryPressure(t *testing.T) {
|
||||||
|
podMaker := makePodWithMemoryStats
|
||||||
|
summaryStatsMaker := makeMemoryStats
|
||||||
|
podsToMake := []podToMake{
|
||||||
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
|
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
|
||||||
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
|
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
|
||||||
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
|
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
|
||||||
@ -314,48 +362,9 @@ func parseQuantity(value string) resource.Quantity {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDiskPressureNodeFs(t *testing.T) {
|
func TestDiskPressureNodeFs(t *testing.T) {
|
||||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) {
|
podMaker := makePodWithDiskStats
|
||||||
pod := newPod(name, []api.Container{
|
summaryStatsMaker := makeDiskStats
|
||||||
newContainer(name, requests, limits),
|
podsToMake := []podToMake{
|
||||||
}, nil)
|
|
||||||
podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
|
|
||||||
return pod, podStats
|
|
||||||
}
|
|
||||||
summaryStatsMaker := func(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
|
|
||||||
rootFsVal := resource.MustParse(rootFsAvailableBytes)
|
|
||||||
rootFsBytes := uint64(rootFsVal.Value())
|
|
||||||
rootFsCapacityBytes := uint64(rootFsVal.Value() * 2)
|
|
||||||
imageFsVal := resource.MustParse(imageFsAvailableBytes)
|
|
||||||
imageFsBytes := uint64(imageFsVal.Value())
|
|
||||||
imageFsCapacityBytes := uint64(imageFsVal.Value() * 2)
|
|
||||||
result := &statsapi.Summary{
|
|
||||||
Node: statsapi.NodeStats{
|
|
||||||
Fs: &statsapi.FsStats{
|
|
||||||
AvailableBytes: &rootFsBytes,
|
|
||||||
CapacityBytes: &rootFsCapacityBytes,
|
|
||||||
},
|
|
||||||
Runtime: &statsapi.RuntimeStats{
|
|
||||||
ImageFs: &statsapi.FsStats{
|
|
||||||
AvailableBytes: &imageFsBytes,
|
|
||||||
CapacityBytes: &imageFsCapacityBytes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Pods: []statsapi.PodStats{},
|
|
||||||
}
|
|
||||||
for _, podStat := range podStats {
|
|
||||||
result.Pods = append(result.Pods, podStat)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
podsToMake := []struct {
|
|
||||||
name string
|
|
||||||
requests api.ResourceList
|
|
||||||
limits api.ResourceList
|
|
||||||
rootFsUsed string
|
|
||||||
logsFsUsed string
|
|
||||||
perLocalVolumeUsed string
|
|
||||||
}{
|
|
||||||
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"},
|
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"},
|
||||||
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"},
|
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"},
|
||||||
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "300Mi"},
|
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "300Mi"},
|
||||||
@ -550,37 +559,9 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
|||||||
|
|
||||||
// TestMinReclaim verifies that min-reclaim works as desired.
|
// TestMinReclaim verifies that min-reclaim works as desired.
|
||||||
func TestMinReclaim(t *testing.T) {
|
func TestMinReclaim(t *testing.T) {
|
||||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
|
podMaker := makePodWithMemoryStats
|
||||||
pod := newPod(name, []api.Container{
|
summaryStatsMaker := makeMemoryStats
|
||||||
newContainer(name, requests, limits),
|
podsToMake := []podToMake{
|
||||||
}, nil)
|
|
||||||
podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
|
|
||||||
return pod, podStats
|
|
||||||
}
|
|
||||||
summaryStatsMaker := func(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
|
|
||||||
val := resource.MustParse(nodeAvailableBytes)
|
|
||||||
availableBytes := uint64(val.Value())
|
|
||||||
WorkingSetBytes := uint64(val.Value())
|
|
||||||
result := &statsapi.Summary{
|
|
||||||
Node: statsapi.NodeStats{
|
|
||||||
Memory: &statsapi.MemoryStats{
|
|
||||||
AvailableBytes: &availableBytes,
|
|
||||||
WorkingSetBytes: &WorkingSetBytes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Pods: []statsapi.PodStats{},
|
|
||||||
}
|
|
||||||
for _, podStat := range podStats {
|
|
||||||
result.Pods = append(result.Pods, podStat)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
podsToMake := []struct {
|
|
||||||
name string
|
|
||||||
requests api.ResourceList
|
|
||||||
limits api.ResourceList
|
|
||||||
memoryWorkingSet string
|
|
||||||
}{
|
|
||||||
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
|
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "200Mi"},
|
||||||
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
|
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), memoryWorkingSet: "800Mi"},
|
||||||
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
|
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), memoryWorkingSet: "300Mi"},
|
||||||
@ -716,48 +697,9 @@ func TestMinReclaim(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeReclaimFuncs(t *testing.T) {
|
func TestNodeReclaimFuncs(t *testing.T) {
|
||||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) {
|
podMaker := makePodWithDiskStats
|
||||||
pod := newPod(name, []api.Container{
|
summaryStatsMaker := makeDiskStats
|
||||||
newContainer(name, requests, limits),
|
podsToMake := []podToMake{
|
||||||
}, nil)
|
|
||||||
podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
|
|
||||||
return pod, podStats
|
|
||||||
}
|
|
||||||
summaryStatsMaker := func(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
|
|
||||||
rootFsVal := resource.MustParse(rootFsAvailableBytes)
|
|
||||||
rootFsBytes := uint64(rootFsVal.Value())
|
|
||||||
rootFsCapacityBytes := uint64(rootFsVal.Value() * 2)
|
|
||||||
imageFsVal := resource.MustParse(imageFsAvailableBytes)
|
|
||||||
imageFsBytes := uint64(imageFsVal.Value())
|
|
||||||
imageFsCapacityBytes := uint64(imageFsVal.Value() * 2)
|
|
||||||
result := &statsapi.Summary{
|
|
||||||
Node: statsapi.NodeStats{
|
|
||||||
Fs: &statsapi.FsStats{
|
|
||||||
AvailableBytes: &rootFsBytes,
|
|
||||||
CapacityBytes: &rootFsCapacityBytes,
|
|
||||||
},
|
|
||||||
Runtime: &statsapi.RuntimeStats{
|
|
||||||
ImageFs: &statsapi.FsStats{
|
|
||||||
AvailableBytes: &imageFsBytes,
|
|
||||||
CapacityBytes: &imageFsCapacityBytes,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Pods: []statsapi.PodStats{},
|
|
||||||
}
|
|
||||||
for _, podStat := range podStats {
|
|
||||||
result.Pods = append(result.Pods, podStat)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
podsToMake := []struct {
|
|
||||||
name string
|
|
||||||
requests api.ResourceList
|
|
||||||
limits api.ResourceList
|
|
||||||
rootFsUsed string
|
|
||||||
logsFsUsed string
|
|
||||||
perLocalVolumeUsed string
|
|
||||||
}{
|
|
||||||
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"},
|
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "200Mi"},
|
||||||
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"},
|
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "800Mi"},
|
||||||
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsUsed: "300Mi"},
|
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsUsed: "300Mi"},
|
||||||
@ -950,14 +892,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
podsToMake := []struct {
|
podsToMake := []podToMake{
|
||||||
name string
|
|
||||||
requests api.ResourceList
|
|
||||||
limits api.ResourceList
|
|
||||||
rootFsInodesUsed string
|
|
||||||
logsFsInodesUsed string
|
|
||||||
perLocalVolumeInodesUsed string
|
|
||||||
}{
|
|
||||||
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsInodesUsed: "200Mi"},
|
{name: "guaranteed-low", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsInodesUsed: "200Mi"},
|
||||||
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsInodesUsed: "800Mi"},
|
{name: "guaranteed-high", requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsInodesUsed: "800Mi"},
|
||||||
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsInodesUsed: "300Mi"},
|
{name: "burstable-low", requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), rootFsInodesUsed: "300Mi"},
|
||||||
|
Loading…
Reference in New Issue
Block a user