Merge pull request #113309 from swatisehgal/devicemgr-e2e-remove-flakiness

node: e2e: device plugins: Deflake e2e tests
This commit is contained in:
Kubernetes Prow Robot 2022-12-14 10:47:34 -08:00 committed by GitHub
commit 7403090e40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 28 additions and 8 deletions

View File

@ -50,7 +50,8 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func()
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize
})
ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) {
var logRotationPod *v1.Pod
ginkgo.BeforeEach(func() {
ginkgo.By("create log container")
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -72,12 +73,17 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func()
},
},
}
pod = e2epod.NewPodClient(f).CreateSync(pod)
logRotationPod = e2epod.NewPodClient(f).CreateSync(pod)
ginkgo.DeferCleanup(e2epod.NewPodClient(f).DeleteSync, logRotationPod.Name, metav1.DeleteOptions{}, time.Minute)
})
ginkgo.It("should be rotated and limited to a fixed amount of files", func(ctx context.Context) {
ginkgo.By("get container log path")
framework.ExpectEqual(len(pod.Status.ContainerStatuses), 1)
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
framework.ExpectEqual(len(logRotationPod.Status.ContainerStatuses), 1, "log rotation pod should have one container")
id := kubecontainer.ParseContainerID(logRotationPod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
framework.ExpectNoError(err)
framework.ExpectNoError(err, "should connect to CRI and obtain runtime service clients and image service client")
resp, err := r.ContainerStatus(context.Background(), id, false)
framework.ExpectNoError(err)
logPath := resp.GetStatus().GetLogPath()

View File

@ -94,7 +94,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
// and then use the same here
devsLen := int64(2)
var devicePluginPod, dptemplate *v1.Pod
var v1alphaPodResources *kubeletpodresourcesv1alpha1.ListPodResourcesResponse
var v1PodResources *kubeletpodresourcesv1.ListPodResourcesResponse
var err error
ginkgo.BeforeEach(func() {
ginkgo.By("Wait for node to be ready")
gomega.Eventually(func() bool {
@ -103,6 +105,18 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue())
v1alphaPodResources, err = getV1alpha1NodeDevices()
framework.ExpectNoError(err, "should get node local podresources by accessing the (v1alpha) podresources API endpoint")
v1PodResources, err = getV1NodeDevices()
framework.ExpectNoError(err, "should get node local podresources by accessing the (v1) podresources API endpoint")
// Before we run the device plugin test, we need to ensure
// that the cluster is in a clean state and there are no
// pods running on this node.
gomega.Expect(v1alphaPodResources.PodResources).To(gomega.BeEmpty(), "should have no pod resources")
gomega.Expect(v1PodResources.PodResources).To(gomega.BeEmpty(), "should have no pod resources")
ginkgo.By("Scheduling a sample device plugin pod")
data, err := e2etestfiles.Read(SampleDevicePluginDSYAML)
if err != nil {
@ -175,10 +189,10 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
devID1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devID1).To(gomega.Not(gomega.Equal("")))
v1alphaPodResources, err := getV1alpha1NodeDevices()
v1alphaPodResources, err = getV1alpha1NodeDevices()
framework.ExpectNoError(err)
v1PodResources, err := getV1NodeDevices()
v1PodResources, err = getV1NodeDevices()
framework.ExpectNoError(err)
framework.Logf("v1alphaPodResources.PodResources:%+v\n", v1alphaPodResources.PodResources)