mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Merge pull request #101280 from huffmanca/address-snapshotting-flakes
Force NodeUnstageVolume to finish for all distros
This commit is contained in:
commit
82d8d08c78
@ -164,22 +164,23 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
// - a pod was created with a PV that's supposed to have data
|
// - a pod was created with a PV that's supposed to have data
|
||||||
//
|
//
|
||||||
// However there's a caching issue that @jinxu97 explained and it's related with the pod & volume
|
// However there's a caching issue that @jinxu97 explained and it's related with the pod & volume
|
||||||
// lifecycle in windows, to understand it we first analyze what the volumemanager does:
|
// lifecycle, to understand it we first analyze what the volumemanager does:
|
||||||
// - when a pod is delete the volumemanager will try to cleanup the volume mounts
|
// - when a pod is delete the volumemanager will try to cleanup the volume mounts
|
||||||
// - NodeUnpublishVolume: unbinds the bind mount from the container
|
// - NodeUnpublishVolume: unbinds the bind mount from the container
|
||||||
// - Linux: the data is flushed to disk
|
// - Linux: the bind mount is removed, which does not flush any cache
|
||||||
// - Windows: we delete a symlink, data's not flushed yet to disk
|
// - Windows: we delete a symlink, data's not flushed yet to disk
|
||||||
// - NodeUnstageVolume: unmount the global mount
|
// - NodeUnstageVolume: unmount the global mount
|
||||||
// - Linux: disk is detached
|
// - Linux: disk is unmounted and all caches flushed.
|
||||||
// - Windows: data is flushed to disk and the disk is detached
|
// - Windows: data is flushed to disk and the disk is detached
|
||||||
//
|
//
|
||||||
// Pod deletion might not guarantee a data flush to disk, however NodeUnstageVolume adds the logic
|
// Pod deletion might not guarantee a data flush to disk, however NodeUnstageVolume adds the logic
|
||||||
// to flush the data to disk (see #81690 for details).
|
// to flush the data to disk (see #81690 for details). We need to wait for NodeUnstageVolume, as
|
||||||
|
// NodeUnpublishVolume only removes the bind mount, which doesn't force the caches to flush.
|
||||||
|
// It's possible to create empty snapshots if we don't wait (see #101279 for details).
|
||||||
//
|
//
|
||||||
// In the following code by checking if the PV is not in the node.Status.VolumesInUse field we
|
// In the following code by checking if the PV is not in the node.Status.VolumesInUse field we
|
||||||
// ensure that the volume is not used by the node anymore (an indicator that NodeUnstageVolume has
|
// ensure that the volume is not used by the node anymore (an indicator that NodeUnstageVolume has
|
||||||
// already finished)
|
// already finished)
|
||||||
if framework.NodeOSDistroIs("windows") {
|
|
||||||
nodeName := pod.Spec.NodeName
|
nodeName := pod.Spec.NodeName
|
||||||
gomega.Expect(nodeName).NotTo(gomega.BeEmpty(), "pod.Spec.NodeName must not be empty")
|
gomega.Expect(nodeName).NotTo(gomega.BeEmpty(), "pod.Spec.NodeName must not be empty")
|
||||||
|
|
||||||
@ -198,7 +199,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
})
|
})
|
||||||
framework.ExpectEqual(success, true)
|
framework.ExpectEqual(success, true)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
// Don't register an AfterEach then a cleanup step because the order
|
// Don't register an AfterEach then a cleanup step because the order
|
||||||
|
Loading…
Reference in New Issue
Block a user