mark flaky jobs as flaky and move them to a different job

This commit is contained in:
Kevin Hannon 2024-02-19 11:26:33 -05:00
parent 9791f0d1f3
commit 43e0bd4304
4 changed files with 5 additions and 5 deletions

View File

@ -401,7 +401,7 @@ var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceMana
Should(HaveAllocatableDevices())
})
ginkgo.It("should deploy pod consuming devices first but fail with admission error after kubelet restart in case device plugin hasn't re-registered", func(ctx context.Context) {
framework.It("should deploy pod consuming devices first but fail with admission error after kubelet restart in case device plugin hasn't re-registered", framework.WithFlaky(), func(ctx context.Context) {
var err error
podCMD := "while true; do sleep 1000; done;"

View File

@ -844,7 +844,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
// simulate node reboot scenario by removing pods using CRI before kubelet is started. In addition to that,
// intentionally a scenario is created where after node reboot, application pods requesting devices appear before the device plugin pod
// exposing those devices as resource has restarted. The expected behavior is that the application pod fails at admission time.
ginkgo.It("Keeps device plugin assignments across node reboots (no pod restart, no device plugin re-registration)", func(ctx context.Context) {
framework.It("Keeps device plugin assignments across node reboots (no pod restart, no device plugin re-registration)", framework.WithFlaky(), func(ctx context.Context) {
podRECMD := fmt.Sprintf("devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s", sleepIntervalForever)
pod1 := e2epod.NewPodClient(f).CreateSync(ctx, makeBusyboxPod(SampleDeviceResourceName, podRECMD))
deviceIDRE := "stub devices: (Dev-[0-9]+)"

View File

@ -376,7 +376,7 @@ var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.
})
})
ginkgo.Context("when gracefully shutting down with Pod priority", func() {
framework.Context("when gracefully shutting down with Pod priority", framework.WithFlaky(), func() {
const (
pollInterval = 1 * time.Second

View File

@ -953,7 +953,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
})
})
ginkgo.Context("without SRIOV devices in the system", func() {
framework.Context("without SRIOV devices in the system", framework.WithFlaky(), func() {
ginkgo.BeforeEach(func() {
requireLackOfSRIOVDevices()
})
@ -1224,7 +1224,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
})
})
ginkgo.Context("with the builtin rate limit values", func() {
framework.Context("with the builtin rate limit values", framework.WithFlaky(), func() {
ginkgo.It("should hit throttling when calling podresources List in a tight loop", func(ctx context.Context) {
// ensure APIs have been called at least once
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)