diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index c290a02d7bd..4a1f9398ef8 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -401,7 +401,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceMana Should(HaveAllocatableDevices()) }) - ginkgo.It("should deploy pod consuming devices first but fail with admission error after kubelet restart in case device plugin hasn't re-registered", func(ctx context.Context) { + framework.It("should deploy pod consuming devices first but fail with admission error after kubelet restart in case device plugin hasn't re-registered", framework.WithFlaky(), func(ctx context.Context) { var err error podCMD := "while true; do sleep 1000; done;" diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 41f348c6768..749dabc9e62 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -844,7 +844,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) { // simulate node reboot scenario by removing pods using CRI before kubelet is started. In addition to that, // intentionally a scenario is created where after node reboot, application pods requesting devices appear before the device plugin pod // exposing those devices as resource has restarted. The expected behavior is that the application pod fails at admission time. - ginkgo.It("Keeps device plugin assignments across node reboots (no pod restart, no device plugin re-registration)", func(ctx context.Context) { + framework.It("Keeps device plugin assignments across node reboots (no pod restart, no device plugin re-registration)", framework.WithFlaky(), func(ctx context.Context) { podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever) pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD)) deviceIDRE := "stub devices: (Dev-[0-9]+)" diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index 3af1b2a9a7a..88e7f4d3fca 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -376,7 +376,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature. }) }) - ginkgo.Context("when gracefully shutting down with Pod priority", func() { + framework.Context("when gracefully shutting down with Pod priority", framework.WithFlaky(), func() { const ( pollInterval = 1 * time.Second diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index ed3db638ac3..edeb4ca61fa 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -953,7 +953,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource }) }) - ginkgo.Context("without SRIOV devices in the system", func() { + framework.Context("without SRIOV devices in the system", framework.WithFlaky(), func() { ginkgo.BeforeEach(func() { requireLackOfSRIOVDevices() }) @@ -1224,7 +1224,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource }) }) - ginkgo.Context("with the builtin rate limit values", func() { + framework.Context("with the builtin rate limit values", framework.WithFlaky(), func() { ginkgo.It("should hit throttling when calling podresources List in a tight loop", func(ctx context.Context) { // ensure APIs have been called at least once endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)