fix InodeEvictionTest

This commit is contained in:
Anish Shah 2024-10-04 22:49:14 +00:00
parent a37101a202
commit 91b70a9f19

View File

@ -92,11 +92,9 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logInodeMetrics, []podEvictSpec{
{
evictionPriority: 1,
pod: inodeConsumingPod("container-inode-hog", lotsOfFiles, nil),
},
{
evictionPriority: 1,
pod: inodeConsumingPod("volume-inode-hog", lotsOfFiles, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}),
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: inodeConsumingPod("volume-inode-hog", lotsOfFiles, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}),
},
{
evictionPriority: 0,
@ -199,8 +197,8 @@ var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.With
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
{
evictionPriority: 1,
// TODO(#127864): Due to a race condition between eviction manager and containerd GC,
// we provision an emptyDir volume instead of writing to the writable container layers.
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
@ -240,8 +238,8 @@ var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.
runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logDiskMetrics, []podEvictSpec{
{
evictionPriority: 1,
// TODO(#127864): Due to a race condition between eviction manager and containerd GC,
// we provision an emptyDir volume instead of writing to the writable container layers.
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
@ -483,14 +481,14 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow()
specs := []podEvictSpec{
{
evictionPriority: 2,
// TODO(#127864): Due to a race condition between eviction manager and containerd GC,
// we provision an emptyDir volume instead of writing to the writable container layers.
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("best-effort-disk", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
evictionPriority: 1,
// TODO(#127864): Due to a race condition between eviction manager and containerd GC,
// we provision an emptyDir volume instead of writing to the writable container layers.
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("high-priority-disk", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{