mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
test: Bump timeout for runPausePod
The `runPausePod` timeout was 1 minute previously which appears to be too short and timing out in some tests. Switch to `f.Timeouts.PodStartShort` which is the common timeout used to wait for pods to start which defaults to 5min. Also refactor to remove `runPausePodWithoutTimeout` and instead rely on `runPausePod` since we do not make the timeout customizable directly (it can be changed via the test framework if desired). Signed-off-by: David Porter <david@porter.me>
This commit is contained in:
parent
0cb1eabbf0
commit
71719a6036
@ -952,12 +952,8 @@ func createPausePod(ctx context.Context, f *framework.Framework, conf pausePodCo
|
||||
}
|
||||
|
||||
func runPausePod(ctx context.Context, f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
return runPausePodWithTimeout(ctx, f, conf, framework.PollShortTimeout)
|
||||
}
|
||||
|
||||
func runPausePodWithTimeout(ctx context.Context, f *framework.Framework, conf pausePodConfig, timeout time.Duration) *v1.Pod {
|
||||
pod := createPausePod(ctx, f, conf)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, timeout))
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStartShort))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, conf.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
|
@ -192,14 +192,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
|
||||
ginkgo.By("Run a high priority pod that has same requirements as that of lower priority pod")
|
||||
// Create a high priority pod and make sure it is scheduled on the same node as the low priority pod.
|
||||
runPausePodWithTimeout(ctx, f, pausePodConfig{
|
||||
runPausePod(ctx, f, pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
PriorityClassName: highPriorityClassName,
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: podRes,
|
||||
Limits: podRes,
|
||||
},
|
||||
}, framework.PodStartShortTimeout)
|
||||
})
|
||||
|
||||
preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(ctx, pods[0].Name, metav1.GetOptions{})
|
||||
podPreempted := (err != nil && apierrors.IsNotFound(err)) ||
|
||||
@ -290,7 +290,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
framework.Failf("Error cleanup pod `%s/%s`: %v", metav1.NamespaceSystem, "critical-pod", err)
|
||||
}
|
||||
}()
|
||||
runPausePodWithTimeout(ctx, f, pausePodConfig{
|
||||
runPausePod(ctx, f, pausePodConfig{
|
||||
Name: "critical-pod",
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
PriorityClassName: scheduling.SystemClusterCritical,
|
||||
@ -298,7 +298,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
Requests: podRes,
|
||||
Limits: podRes,
|
||||
},
|
||||
}, framework.PodStartShortTimeout)
|
||||
})
|
||||
|
||||
defer func() {
|
||||
// Clean-up the critical pod
|
||||
|
Loading…
Reference in New Issue
Block a user