mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
Merge pull request #129717 from esotsal/fix-128837
testing: Fix pod delete timeout failures after InPlacePodVerticalScaling Graduate to Beta commit
This commit is contained in:
commit
a271299643
@ -115,7 +115,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
|
|||||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
|
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
ginkgo.By("delete the pod with lifecycle hook")
|
ginkgo.By("delete the pod with lifecycle hook")
|
||||||
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), f.Timeouts.PodDelete)
|
||||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
|
||||||
ginkgo.By("check prestop hook")
|
ginkgo.By("check prestop hook")
|
||||||
if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil {
|
if podWithHook.Spec.Containers[0].Lifecycle.PreStop.HTTPGet != nil {
|
||||||
@ -333,7 +333,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, "R
|
|||||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
|
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
|
||||||
}
|
}
|
||||||
ginkgo.By("delete the pod with lifecycle hook")
|
ginkgo.By("delete the pod with lifecycle hook")
|
||||||
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), f.Timeouts.PodDelete)
|
||||||
if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop != nil {
|
if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop != nil {
|
||||||
ginkgo.By("check prestop hook")
|
ginkgo.By("check prestop hook")
|
||||||
if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop.HTTPGet != nil {
|
if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop.HTTPGet != nil {
|
||||||
@ -571,7 +571,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() {
|
|||||||
podClient.CreateSync(ctx, podWithHook)
|
podClient.CreateSync(ctx, podWithHook)
|
||||||
ginkgo.By("delete the pod with lifecycle hook using sleep action")
|
ginkgo.By("delete the pod with lifecycle hook using sleep action")
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
cost := time.Since(start)
|
cost := time.Since(start)
|
||||||
// cost should be
|
// cost should be
|
||||||
// longer than 5 seconds (pod should sleep for 5 seconds)
|
// longer than 5 seconds (pod should sleep for 5 seconds)
|
||||||
@ -592,7 +592,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() {
|
|||||||
podClient.CreateSync(ctx, podWithHook)
|
podClient.CreateSync(ctx, podWithHook)
|
||||||
ginkgo.By("delete the pod with lifecycle hook using sleep action")
|
ginkgo.By("delete the pod with lifecycle hook using sleep action")
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(2), e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(2), f.Timeouts.PodDelete)
|
||||||
cost := time.Since(start)
|
cost := time.Since(start)
|
||||||
// cost should be
|
// cost should be
|
||||||
// longer than 2 seconds (we change gracePeriodSeconds to 2 seconds here, and it's less than sleep action)
|
// longer than 2 seconds (we change gracePeriodSeconds to 2 seconds here, and it's less than sleep action)
|
||||||
@ -618,7 +618,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() {
|
|||||||
framework.ExpectNoError(e2epod.WaitForContainerTerminated(ctx, f.ClientSet, f.Namespace.Name, p.Name, name, 3*time.Minute))
|
framework.ExpectNoError(e2epod.WaitForContainerTerminated(ctx, f.ClientSet, f.Namespace.Name, p.Name, name, 3*time.Minute))
|
||||||
ginkgo.By("delete the pod with lifecycle hook using sleep action")
|
ginkgo.By("delete the pod with lifecycle hook using sleep action")
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
cost := time.Since(start)
|
cost := time.Since(start)
|
||||||
// cost should be
|
// cost should be
|
||||||
// shorter than sleep action (container is terminated and sleep action should be ignored)
|
// shorter than sleep action (container is terminated and sleep action should be ignored)
|
||||||
@ -650,7 +650,7 @@ var _ = SIGDescribe(feature.PodLifecycleSleepActionAllowZero, func() {
|
|||||||
podClient.CreateSync(ctx, podWithHook)
|
podClient.CreateSync(ctx, podWithHook)
|
||||||
ginkgo.By("delete the pod with lifecycle hook using sleep action with zero duration")
|
ginkgo.By("delete the pod with lifecycle hook using sleep action with zero duration")
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, podWithHook.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
cost := time.Since(start)
|
cost := time.Since(start)
|
||||||
// cost should be
|
// cost should be
|
||||||
// longer than 0 seconds (pod shouldn't sleep and the handler should return immediately)
|
// longer than 0 seconds (pod shouldn't sleep and the handler should return immediately)
|
||||||
|
@ -1158,7 +1158,6 @@ func doPodResizeErrorTests() {
|
|||||||
for idx := range tests {
|
for idx := range tests {
|
||||||
tc := tests[idx]
|
tc := tests[idx]
|
||||||
f := framework.NewDefaultFramework("pod-resize-error-tests")
|
f := framework.NewDefaultFramework("pod-resize-error-tests")
|
||||||
timeouts := f.Timeouts
|
|
||||||
|
|
||||||
ginkgo.It(tc.name, func(ctx context.Context) {
|
ginkgo.It(tc.name, func(ctx context.Context) {
|
||||||
podClient := e2epod.NewPodClient(f)
|
podClient := e2epod.NewPodClient(f)
|
||||||
@ -1197,7 +1196,7 @@ func doPodResizeErrorTests() {
|
|||||||
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPod, tc.expected))
|
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPod, tc.expected))
|
||||||
|
|
||||||
ginkgo.By("deleting pod")
|
ginkgo.By("deleting pod")
|
||||||
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
|
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -307,7 +307,7 @@ var _ = SIGDescribe("Pods", func() {
|
|||||||
ginkgo.By("verifying pod deletion was observed")
|
ginkgo.By("verifying pod deletion was observed")
|
||||||
deleted := false
|
deleted := false
|
||||||
var lastPod *v1.Pod
|
var lastPod *v1.Pod
|
||||||
timer := time.After(e2epod.DefaultPodDeletionTimeout)
|
timer := time.After(f.Timeouts.PodDelete)
|
||||||
for !deleted {
|
for !deleted {
|
||||||
select {
|
select {
|
||||||
case event := <-w.ResultChan():
|
case event := <-w.ResultChan():
|
||||||
|
@ -83,8 +83,8 @@ var _ = SIGDescribe("Security Context", func() {
|
|||||||
createdPod2 := podClient.Create(ctx, makePod(false))
|
createdPod2 := podClient.Create(ctx, makePod(false))
|
||||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||||
ginkgo.By("delete the pods")
|
ginkgo.By("delete the pods")
|
||||||
podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
})
|
})
|
||||||
getLogs := func(pod *v1.Pod) (string, error) {
|
getLogs := func(pod *v1.Pod) (string, error) {
|
||||||
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
|
err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
|
||||||
|
@ -176,7 +176,7 @@ func MatchMultipleContainerOutputs(
|
|||||||
createdPod := podClient.Create(ctx, pod)
|
createdPod := podClient.Create(ctx, pod)
|
||||||
defer func() {
|
defer func() {
|
||||||
ginkgo.By("delete the pod")
|
ginkgo.By("delete the pod")
|
||||||
podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
podClient.DeleteSync(ctx, createdPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for client pod to complete.
|
// Wait for client pod to complete.
|
||||||
|
@ -606,7 +606,7 @@ var _ = SIGDescribe("Kubectl Port forwarding", func() {
|
|||||||
ginkgo.By("Wait for client being interrupted")
|
ginkgo.By("Wait for client being interrupted")
|
||||||
select {
|
select {
|
||||||
case err = <-errorChan:
|
case err = <-errorChan:
|
||||||
case <-time.After(e2epod.DefaultPodDeletionTimeout):
|
case <-time.After(f.Timeouts.PodDelete):
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Check the client error")
|
ginkgo.By("Check the client error")
|
||||||
|
@ -197,7 +197,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
|||||||
|
|
||||||
// and delete the first pod
|
// and delete the first pod
|
||||||
framework.Logf("Cleaning up %s pod", podBackend1)
|
framework.Logf("Cleaning up %s pod", podBackend1)
|
||||||
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete)
|
||||||
|
|
||||||
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
||||||
|
|
||||||
@ -273,7 +273,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
|||||||
|
|
||||||
// and delete the first pod
|
// and delete the first pod
|
||||||
framework.Logf("Cleaning up %s pod", podBackend1)
|
framework.Logf("Cleaning up %s pod", podBackend1)
|
||||||
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete)
|
||||||
|
|
||||||
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
||||||
|
|
||||||
@ -358,7 +358,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
|||||||
|
|
||||||
// Now recreate the first backend pod
|
// Now recreate the first backend pod
|
||||||
framework.Logf("Cleaning up %s pod", podBackend1)
|
framework.Logf("Cleaning up %s pod", podBackend1)
|
||||||
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete)
|
||||||
|
|
||||||
ginkgo.By("Waiting for DaemonSet pods to become ready")
|
ginkgo.By("Waiting for DaemonSet pods to become ready")
|
||||||
err = wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
|
err = wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, false, func(ctx context.Context) (bool, error) {
|
||||||
@ -445,7 +445,7 @@ var _ = common.SIGDescribe("Conntrack", func() {
|
|||||||
|
|
||||||
// and delete the first pod
|
// and delete the first pod
|
||||||
framework.Logf("Cleaning up %s pod", podBackend1)
|
framework.Logf("Cleaning up %s pod", podBackend1)
|
||||||
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, fr.Timeouts.PodDelete)
|
||||||
|
|
||||||
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
||||||
|
|
||||||
|
@ -820,7 +820,7 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
|
|||||||
|
|
||||||
// and delete the first pod
|
// and delete the first pod
|
||||||
framework.Logf("Cleaning up %s pod", podBackend1)
|
framework.Logf("Cleaning up %s pod", podBackend1)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
||||||
|
|
||||||
@ -952,7 +952,7 @@ var _ = common.SIGDescribe("LoadBalancers", feature.LoadBalancer, func() {
|
|||||||
|
|
||||||
// and delete the first pod
|
// and delete the first pod
|
||||||
framework.Logf("Cleaning up %s pod", podBackend1)
|
framework.Logf("Cleaning up %s pod", podBackend1)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
|
||||||
|
|
||||||
|
@ -76,7 +76,7 @@ var _ = SIGDescribe("ContainerLogRotation", framework.WithSlow(), framework.With
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
logRotationPod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
logRotationPod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||||
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, time.Minute)
|
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) {
|
ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) {
|
||||||
@ -146,7 +146,7 @@ var _ = SIGDescribe("ContainerLogRotationWithMultipleWorkers", framework.WithSlo
|
|||||||
}
|
}
|
||||||
logRotationPod := e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
logRotationPod := e2epod.NewPodClient(f).CreateSync(ctx, pod)
|
||||||
logRotationPods = append(logRotationPods, logRotationPod)
|
logRotationPods = append(logRotationPods, logRotationPod)
|
||||||
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, time.Minute)
|
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ func deletePodSyncByName(ctx context.Context, f *framework.Framework, podName st
|
|||||||
delOpts := metav1.DeleteOptions{
|
delOpts := metav1.DeleteOptions{
|
||||||
GracePeriodSeconds: &gp,
|
GracePeriodSeconds: &gp,
|
||||||
}
|
}
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, podName, delOpts, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
func deletePods(ctx context.Context, f *framework.Framework, podNames []string) {
|
func deletePods(ctx context.Context, f *framework.Framework, podNames []string) {
|
||||||
|
@ -136,10 +136,10 @@ var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisrupt
|
|||||||
})
|
})
|
||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
// Delete Pods
|
// Delete Pods
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
// Log Events
|
// Log Events
|
||||||
logPodEvents(ctx, f)
|
logPodEvents(ctx, f)
|
||||||
logNodeEvents(ctx, f)
|
logNodeEvents(ctx, f)
|
||||||
|
@ -256,12 +256,12 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.Device
|
|||||||
"the pod succeeded to start, when it should fail with the admission error")
|
"the pod succeeded to start, when it should fail with the admission error")
|
||||||
|
|
||||||
ginkgo.By("removing application pods")
|
ginkgo.By("removing application pods")
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
ginkgo.By("Deleting the device plugin pod")
|
ginkgo.By("Deleting the device plugin pod")
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
ginkgo.By("Deleting the directory and file setup for controlling registration")
|
ginkgo.By("Deleting the directory and file setup for controlling registration")
|
||||||
err := os.RemoveAll(triggerPathDir)
|
err := os.RemoveAll(triggerPathDir)
|
||||||
@ -276,7 +276,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.Device
|
|||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Deleting pod: %s", p.Name)
|
framework.Logf("Deleting pod: %s", p.Name)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Waiting for devices to become unavailable on the local node")
|
ginkgo.By("Waiting for devices to become unavailable on the local node")
|
||||||
|
@ -197,7 +197,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
|
|
||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
ginkgo.By("Deleting the device plugin pod")
|
ginkgo.By("Deleting the device plugin pod")
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
ginkgo.By("Deleting any Pods created by the test")
|
ginkgo.By("Deleting any Pods created by the test")
|
||||||
l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{})
|
l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{})
|
||||||
@ -208,7 +208,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Deleting pod: %s", p.Name)
|
framework.Logf("Deleting pod: %s", p.Name)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
restartKubelet(ctx, true)
|
restartKubelet(ctx, true)
|
||||||
@ -492,7 +492,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
gomega.Expect(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace)).To(gomega.Succeed())
|
gomega.Expect(e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace)).To(gomega.Succeed())
|
||||||
|
|
||||||
ginkgo.By("Deleting the device plugin")
|
ginkgo.By("Deleting the device plugin")
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||||
|
|
||||||
gomega.Eventually(getNodeResourceValues, devicePluginGracefulTimeout, f.Timeouts.Poll).WithContext(ctx).WithArguments(SampleDeviceResourceName).Should(gomega.Equal(ResourceValue{Allocatable: 0, Capacity: int(expectedSampleDevsAmount)}))
|
gomega.Eventually(getNodeResourceValues, devicePluginGracefulTimeout, f.Timeouts.Poll).WithContext(ctx).WithArguments(SampleDeviceResourceName).Should(gomega.Equal(ResourceValue{Allocatable: 0, Capacity: int(expectedSampleDevsAmount)}))
|
||||||
@ -539,7 +539,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
deleteOptions := metav1.DeleteOptions{
|
deleteOptions := metav1.DeleteOptions{
|
||||||
GracePeriodSeconds: &gp,
|
GracePeriodSeconds: &gp,
|
||||||
}
|
}
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, f.Timeouts.PodDelete)
|
||||||
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||||
|
|
||||||
ginkgo.By("Recreating the plugin pod")
|
ginkgo.By("Recreating the plugin pod")
|
||||||
@ -616,7 +616,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
deleteOptions := metav1.DeleteOptions{
|
deleteOptions := metav1.DeleteOptions{
|
||||||
GracePeriodSeconds: &gp,
|
GracePeriodSeconds: &gp,
|
||||||
}
|
}
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, deleteOptions, f.Timeouts.PodDelete)
|
||||||
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
waitForContainerRemoval(ctx, devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
|
||||||
|
|
||||||
ginkgo.By("Recreating the plugin pod")
|
ginkgo.By("Recreating the plugin pod")
|
||||||
@ -901,7 +901,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
|||||||
|
|
||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
ginkgo.By("Deleting the device plugin pod")
|
ginkgo.By("Deleting the device plugin pod")
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, devicePluginPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
ginkgo.By("Deleting any Pods created by the test")
|
ginkgo.By("Deleting any Pods created by the test")
|
||||||
l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{})
|
l, err := e2epod.NewPodClient(f).List(ctx, metav1.ListOptions{})
|
||||||
@ -915,7 +915,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
|||||||
e2epod.NewPodClient(f).RemoveFinalizer(context.TODO(), p.Name, testFinalizer)
|
e2epod.NewPodClient(f).RemoveFinalizer(context.TODO(), p.Name, testFinalizer)
|
||||||
|
|
||||||
framework.Logf("Deleting pod: %s", p.Name)
|
framework.Logf("Deleting pod: %s", p.Name)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.Remove(triggerPathDir)
|
err = os.Remove(triggerPathDir)
|
||||||
|
@ -251,7 +251,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
ginkgo.AfterEach(func(ctx context.Context) {
|
ginkgo.AfterEach(func(ctx context.Context) {
|
||||||
for _, pod := range test.testPods {
|
for _, pod := range test.testPods {
|
||||||
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
ginkgo.By(fmt.Sprintf("Deleting Pod %v", pod.podName))
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, pod.podName, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Making sure all containers get cleaned up")
|
ginkgo.By("Making sure all containers get cleaned up")
|
||||||
|
@ -365,7 +365,7 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func
|
|||||||
// we should use JustAfterEach because framework will teardown the client under the AfterEach method
|
// we should use JustAfterEach because framework will teardown the client under the AfterEach method
|
||||||
ginkgo.JustAfterEach(func(ctx context.Context) {
|
ginkgo.JustAfterEach(func(ctx context.Context) {
|
||||||
ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name))
|
ginkgo.By(fmt.Sprintf("deleting test pod %s", testpod.Name))
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, testpod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
releaseHugepages(ctx)
|
releaseHugepages(ctx)
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), nodefeature.G
|
|||||||
allImages, err := is.ListImages(context.Background(), &runtimeapi.ImageFilter{})
|
allImages, err := is.ListImages(context.Background(), &runtimeapi.ImageFilter{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
// Even though the image gc max timing is less, we are bound by the kubelet's
|
// Even though the image gc max timing is less, we are bound by the kubelet's
|
||||||
// ImageGCPeriod, which is hardcoded to 5 minutes.
|
// ImageGCPeriod, which is hardcoded to 5 minutes.
|
||||||
@ -94,7 +94,7 @@ var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), nodefeature.G
|
|||||||
allImages, err := is.ListImages(context.Background(), &runtimeapi.ImageFilter{})
|
allImages, err := is.ListImages(context.Background(), &runtimeapi.ImageFilter{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
restartKubelet(ctx, true)
|
restartKubelet(ctx, true)
|
||||||
|
|
||||||
|
@ -350,7 +350,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
|
|||||||
ginkgo.JustAfterEach(func(ctx context.Context) {
|
ginkgo.JustAfterEach(func(ctx context.Context) {
|
||||||
// delete the test pod
|
// delete the test pod
|
||||||
if testPod != nil && testPod.Name != "" {
|
if testPod != nil && testPod.Name != "" {
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
// release hugepages
|
// release hugepages
|
||||||
@ -555,7 +555,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
|
|||||||
ginkgo.JustAfterEach(func(ctx context.Context) {
|
ginkgo.JustAfterEach(func(ctx context.Context) {
|
||||||
// delete the test pod 2
|
// delete the test pod 2
|
||||||
if testPod2.Name != "" {
|
if testPod2.Name != "" {
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -634,7 +634,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
|
|||||||
ginkgo.JustAfterEach(func(ctx context.Context) {
|
ginkgo.JustAfterEach(func(ctx context.Context) {
|
||||||
for _, workloadPod := range workloadPods {
|
for _, workloadPod := range workloadPods {
|
||||||
if workloadPod.Name != "" {
|
if workloadPod.Name != "" {
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -94,7 +94,7 @@ var _ = SIGDescribe("Node Performance Testing", framework.WithSerial(), framewor
|
|||||||
delOpts := metav1.DeleteOptions{
|
delOpts := metav1.DeleteOptions{
|
||||||
GracePeriodSeconds: &gp,
|
GracePeriodSeconds: &gp,
|
||||||
}
|
}
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, delOpts, e2epod.DefaultPodDeletionTimeout)
|
e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, delOpts, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
// We are going to give some more time for the CPU manager to do any clean
|
// We are going to give some more time for the CPU manager to do any clean
|
||||||
// up it needs to do now that the pod has been deleted. Otherwise we may
|
// up it needs to do now that the pod has been deleted. Otherwise we may
|
||||||
|
@ -175,7 +175,7 @@ var _ = SIGDescribe("Hostname of Pod", framework.WithNodeConformance(), func() {
|
|||||||
// Create Pod
|
// Create Pod
|
||||||
launchedPod := e2epod.NewPodClient(f).Create(ctx, pod)
|
launchedPod := e2epod.NewPodClient(f).Create(ctx, pod)
|
||||||
// Ensure we delete pod
|
// Ensure we delete pod
|
||||||
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
|
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, launchedPod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
|
|
||||||
// Pod should remain in the pending state generating events with reason FailedCreatePodSandBox
|
// Pod should remain in the pending state generating events with reason FailedCreatePodSandBox
|
||||||
// Expected Message Error Event
|
// Expected Message Error Event
|
||||||
|
@ -1034,7 +1034,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
|
|||||||
pod := makePodResourcesTestPod(pd)
|
pod := makePodResourcesTestPod(pd)
|
||||||
pod.Spec.Containers[0].Command = []string{"sh", "-c", "/bin/true"}
|
pod.Spec.Containers[0].Command = []string{"sh", "-c", "/bin/true"}
|
||||||
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
pod = e2epod.NewPodClient(f).Create(ctx, pod)
|
||||||
defer e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, metav1.DeleteOptions{}, time.Minute)
|
defer e2epod.NewPodClient(f).DeleteSync(ctx, pod.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
err := e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "Pod Succeeded", time.Minute*2, testutils.PodSucceeded)
|
err := e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "Pod Succeeded", time.Minute*2, testutils.PodSucceeded)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
|
||||||
|
@ -34,7 +34,6 @@ import (
|
|||||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
@ -373,12 +372,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod)
|
|||||||
defer ginkgo.GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
err := e2epod.NewPodClient(f).Delete(ctx, pod.ObjectMeta.Name, *metav1.NewDeleteOptions(30))
|
e2epod.NewPodClient(f).DeleteSync(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
if apierrors.IsNotFound(err) {
|
|
||||||
framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name, 10*time.Minute))
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -181,7 +181,7 @@ func addAfterEachForCleaningUpPods(f *framework.Framework) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
framework.Logf("Deleting pod: %s", p.Name)
|
framework.Logf("Deleting pod: %s", p.Name)
|
||||||
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, 2*time.Minute)
|
e2epod.NewPodClient(f).DeleteSync(ctx, p.Name, metav1.DeleteOptions{}, f.Timeouts.PodDelete)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user