mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 20:17:41 +00:00
node: e2e: Remove flaky label as device plugin reboot test is deflaked
With the device plugin node reboot test fixed, we can see in testgrid [node-kubelet-containerd-flaky](https://testgrid.k8s.io/sig-node-containerd#node-kubelet-containerd-flaky) that the test is passing consitently and we can remove the flaky label. With the test not flaky anymore, we can validate new PRs against it and ensure we don't cause regressions. Signed-off-by: Swati Sehgal <swsehgal@redhat.com>
This commit is contained in:
parent
8294abc599
commit
82f0303f89
@ -932,7 +932,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
||||
// simulate node reboot scenario by removing pods using CRI before kubelet is started. In addition to that,
|
||||
// intentionally a scenario is created where after node reboot, application pods requesting devices appear before the device plugin pod
|
||||
// exposing those devices as resource has restarted. The expected behavior is that the application pod fails at admission time.
|
||||
framework.It("Does not keep device plugin assignments across node reboots if fails admission (no pod restart, no device plugin re-registration)", framework.WithFlaky(), func(ctx context.Context) {
|
||||
framework.It("Does not keep device plugin assignments across node reboots if fails admission (no pod restart, no device plugin re-registration)", func(ctx context.Context) {
|
||||
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever)
|
||||
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
|
||||
deviceIDRE := "stub devices: (Dev-[0-9]+)"
|
||||
|
Loading…
Reference in New Issue
Block a user