mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 12:32:03 +00:00
verifyVolumeNoStatusUpdateNeeded may cause flake and so only keep the last ones
This commit is contained in:
parent
c14068c202
commit
8e36e948ce
@ -742,39 +742,24 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
|
||||
// Right before detach operation is performed, the volume will be first removed from being reported
|
||||
// as attached on node status (RemoveVolumeFromReportAsAttached). After detach operation which is expected to fail,
|
||||
// controller then added the volume back as attached.
|
||||
// Here verifyVolumeReportedAsAttachedToNode will wait until the detach is triggered.
|
||||
// Here it sleeps 100ms so that detach should be triggered already at this point.
|
||||
// verifyVolumeReportedAsAttachedToNode will check volume is in the list of volume attached that needs to be updated
|
||||
// in node status. By calling this function (GetVolumesToReportAttached), node status should be updated, and the volume
|
||||
// will not need to be updated until new changes are applied (detach is triggered again)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
|
||||
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw, volumeAttachedCheckTimeout)
|
||||
|
||||
// The below verifies check intermediate state and is flaky but useful
|
||||
//
|
||||
// // After the first detach fails, reconciler will wait for a period of time before retrying to detach.
|
||||
// // The wait time is increasing exponentially from initial value of 0.5s (0.5, 1, 2, 4, ...).
|
||||
// // The test here waits for 100 Millisecond to make sure it is in exponential backoff period after
|
||||
// // the first detach operation. At this point, volumes status should not be updated
|
||||
// time.Sleep(100 * time.Millisecond)
|
||||
// verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
|
||||
// verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
|
||||
|
||||
// // Wait for 600ms to make sure second detach operation triggered. Again, The volume will be
|
||||
// // removed from being reported as attached on node status and then added back as attached.
|
||||
// // The volume will be in the list of attached volumes that need to be updated to node status.
|
||||
// time.Sleep(600 * time.Millisecond)
|
||||
// verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
|
||||
// verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw, volumeAttachedCheckTimeout)
|
||||
|
||||
// Add a second pod which tries to attach the volume to the same node.
|
||||
// After adding pod to the same node, detach will not be triggered any more.
|
||||
generatedVolumeName, podAddErr = dsw.AddPod(types.UniquePodName(podName2), controllervolumetesting.NewPod(podName2, podName2), volumeSpec, nodeName1)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
// verify no detach are triggered after second pod is added in the future.
|
||||
// Sleep 1s to verify no detach are triggered after second pod is added in the future.
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
|
||||
verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
|
||||
// verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
|
||||
|
||||
// Add a third pod which tries to attach the volume to a different node.
|
||||
// At this point, volume is still attached to first node. There are no status update for both nodes.
|
||||
|
Loading…
Reference in New Issue
Block a user