Fix volume attach limit flake

It looks like node does become unschedulable for the pod
but condition does not get added to the pod in time.

Also ginkgo could retry the test and hence it helps to use
unique node label for scheduling.
This commit is contained in:
Hemant Kumar 2019-03-07 12:35:52 -05:00
parent 18cc11566f
commit 9d78c96e56

View File

@ -49,7 +49,7 @@ type cleanupFuncs func()
const ( const (
csiNodeLimitUpdateTimeout = 5 * time.Minute csiNodeLimitUpdateTimeout = 5 * time.Minute
csiPodUnschedulableTimeout = 2 * time.Minute csiPodUnschedulableTimeout = 5 * time.Minute
) )
var _ = utils.SIGDescribe("CSI mock volume", func() { var _ = utils.SIGDescribe("CSI mock volume", func() {
@ -310,7 +310,8 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
// define volume limit to be 2 for this test // define volume limit to be 2 for this test
var err error var err error
init(testParameters{nodeSelectorKey: "node-attach-limit-csi", attachLimit: 2}) nodeSelectorKey := fmt.Sprintf("attach-limit-csi-%s", f.Namespace.Name)
init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2})
defer cleanup() defer cleanup()
nodeName := m.config.ClientNodeName nodeName := m.config.ClientNodeName
attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner)) attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner))