Remove sleepAfterExecuting param from diskConsumingPod

This commit is contained in:
Kevin Torres
2025-05-01 19:25:02 +00:00
parent 388046c3ea
commit 7cf39066b3
2 changed files with 15 additions and 15 deletions

View File

@@ -288,7 +288,7 @@ var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.With
evictionPriority: 1,
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}, true),
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
evictionPriority: 0,
@@ -329,7 +329,7 @@ var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.
evictionPriority: 1,
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}, true),
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
evictionPriority: 0,
@@ -371,7 +371,7 @@ var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodS
evictionMaxPodGracePeriod: evictionSoftGracePeriod,
evictionSoftGracePeriod: evictionMaxPodGracePeriod,
evictionPriority: 1,
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}, true),
pod: diskConsumingPod("container-disk-hog", lotsOfDisk, nil, v1.ResourceRequirements{}),
},
})
})
@@ -397,32 +397,32 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(
evictionPriority: 1, // This pod should be evicted because emptyDir (default storage type) usage violation
pod: diskConsumingPod("emptydir-disk-sizelimit", useOverLimit, &v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
}, v1.ResourceRequirements{}, true),
}, v1.ResourceRequirements{}),
},
{
evictionPriority: 1, // This pod should cross the container limit by writing to its writable layer.
pod: diskConsumingPod("container-disk-limit", useOverLimit, nil, v1.ResourceRequirements{Limits: containerLimit}, true),
pod: diskConsumingPod("container-disk-limit", useOverLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
},
{
evictionPriority: 1, // This pod should hit the container limit by writing to an emptydir
pod: diskConsumingPod("container-emptydir-disk-limit", useOverLimit, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}},
v1.ResourceRequirements{Limits: containerLimit}, true),
v1.ResourceRequirements{Limits: containerLimit}),
},
{
evictionPriority: 0, // This pod should not be evicted because MemoryBackedVolumes cannot use more space than is allocated to them since SizeMemoryBackedVolumes was enabled
pod: diskConsumingPod("emptydir-memory-sizelimit", useOverLimit, &v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{Medium: "Memory", SizeLimit: &sizeLimit},
}, v1.ResourceRequirements{}, true),
}, v1.ResourceRequirements{}),
},
{
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
pod: diskConsumingPod("emptydir-disk-below-sizelimit", useUnderLimit, &v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
}, v1.ResourceRequirements{}, true),
}, v1.ResourceRequirements{}),
},
{
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}, true),
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
},
})
})
@@ -612,13 +612,13 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow()
evictionPriority: 2,
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("best-effort-disk", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}, true),
pod: diskConsumingPod("best-effort-disk", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
evictionPriority: 1,
// TODO(#127864): Container runtime may not immediate free up the resources after the pod eviction,
// causing the test to fail. We provision an emptyDir volume to avoid relying on the runtime behavior.
pod: diskConsumingPod("high-priority-disk", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}, true),
pod: diskConsumingPod("high-priority-disk", lotsOfDisk, &v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, v1.ResourceRequirements{}),
},
{
evictionPriority: 0,
@@ -630,7 +630,7 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow()
Limits: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("300Mi"),
},
}, true),
}),
},
}
specs[1].pod.Spec.PriorityClassName = highPriorityClassName
@@ -1260,13 +1260,13 @@ func inodeConsumingPod(name string, numFiles int, volumeSource *v1.VolumeSource,
return podWithCommand(volumeSource, v1.ResourceRequirements{}, numFiles, name, fmt.Sprintf("touch %s${i}.txt; sleep 0.001;", filepath.Join(path, "file")), sleepAfterExecuting)
}
func diskConsumingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements, sleepAfterExecuting bool) *v1.Pod {
func diskConsumingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements) *v1.Pod {
path := ""
if volumeSource != nil {
path = volumeMountPath
}
// Each iteration writes 1 Mb, so do diskConsumedMB iterations.
return podWithCommand(volumeSource, resources, diskConsumedMB, name, fmt.Sprintf("dd if=/dev/urandom of=%s${i} bs=1048576 count=1 2>/dev/null; sleep .1;", filepath.Join(path, "file")), sleepAfterExecuting)
return podWithCommand(volumeSource, resources, diskConsumedMB, name, fmt.Sprintf("dd if=/dev/urandom of=%s${i} bs=1048576 count=1 2>/dev/null; sleep .1;", filepath.Join(path, "file")), true)
}
func pidConsumingPod(name string, numProcesses int) *v1.Pod {

View File

@@ -104,7 +104,7 @@ var _ = SIGDescribe("KubeletSeparateDiskGC", feature.KubeletSeparateDiskGC, func
// This pod should exceed disk capacity on nodeFs since it writes a lot to writeable layer.
evictionPriority: 1,
pod: diskConsumingPod("container-emptydir-disk-limit", diskTestInMb, nil,
v1.ResourceRequirements{}, true),
v1.ResourceRequirements{}),
},
})
})