mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Merge pull request #63946 from msau42/fix-reconstruction-flake
Automatic merge from submit-queue (batch tested with PRs 63920, 63716, 63928, 60553, 63946). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Wait for pod deletion instead of termination in reconstruction test **What this PR does / why we need it**: Change volume test to wait for pod deletion instead of pod termination **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Addresses https://github.com/kubernetes/kubernetes/issues/63923 **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
e6688fc65a
@ -116,6 +116,9 @@ const (
|
|||||||
// minutes by slow docker pulls or something else.
|
// minutes by slow docker pulls or something else.
|
||||||
PodStartShortTimeout = 1 * time.Minute
|
PodStartShortTimeout = 1 * time.Minute
|
||||||
|
|
||||||
|
// How long to wait for a pod to be deleted
|
||||||
|
PodDeleteTimeout = 5 * time.Minute
|
||||||
|
|
||||||
// If there are any orphaned namespaces to clean up, this test is running
|
// If there are any orphaned namespaces to clean up, this test is running
|
||||||
// on a long lived cluster. A long wait here is preferably to spurious test
|
// on a long lived cluster. A long wait here is preferably to spurious test
|
||||||
// failures caused by leaked resources from a previous test run.
|
// failures caused by leaked resources from a previous test run.
|
||||||
|
@ -17,7 +17,6 @@ go_library(
|
|||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -191,14 +190,11 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Wait for pod to enter "Terminating state"
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
|
|
||||||
By("Starting the kubelet and waiting for pod to delete.")
|
By("Starting the kubelet and waiting for pod to delete.")
|
||||||
KubeletCommand(KStart, c, clientPod)
|
KubeletCommand(KStart, c, clientPod)
|
||||||
err = f.WaitForPodTerminated(clientPod.Name, "")
|
err = f.WaitForPodNotFound(clientPod.Name, framework.PodDeleteTimeout)
|
||||||
if !apierrs.IsNotFound(err) && err != nil {
|
if err != nil {
|
||||||
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
|
Expect(err).NotTo(HaveOccurred(), "Expected pod to be not found.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if forceDelete {
|
if forceDelete {
|
||||||
|
Loading…
Reference in New Issue
Block a user