From 9d78c96e567b2a62114c76df4c0b342ccbf17ec2 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 7 Mar 2019 12:35:52 -0500 Subject: [PATCH] Fix volume attach limit flake It looks like node does become unschedulable for the pod but condition does not get added to the pod in time. Also ginkgo could retry the test and hence it helps to use unique node label for scheduling. --- test/e2e/storage/csi_mock_volume.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 16993fa1510..3331db86043 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -49,7 +49,7 @@ type cleanupFuncs func() const ( csiNodeLimitUpdateTimeout = 5 * time.Minute - csiPodUnschedulableTimeout = 2 * time.Minute + csiPodUnschedulableTimeout = 5 * time.Minute ) var _ = utils.SIGDescribe("CSI mock volume", func() { @@ -310,7 +310,8 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // define volume limit to be 2 for this test var err error - init(testParameters{nodeSelectorKey: "node-attach-limit-csi", attachLimit: 2}) + nodeSelectorKey := fmt.Sprintf("attach-limit-csi-%s", f.Namespace.Name) + init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2}) defer cleanup() nodeName := m.config.ClientNodeName attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner))