mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
e2e/storage: disable caching when writing/reading to block PVCs
By passing "oflag=nocache" and "iflag=direct", caching should be disabled while writing/reading with "dd" to a block device. The TestConcurrentAccessToSingleVolume() test is known to fail with certain storage backends (like Ceph RBD) when caching is enabled. The default BusyBox image used for testing does not support the required options for "dd". So instead of running with BusyBox, run the test with a Debian image.
This commit is contained in:
parent
00b6d7ccee
commit
f2bf2ab76e
@ -35,6 +35,7 @@ import (
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type multiVolumeTestSuite struct {
|
||||
@ -423,14 +424,14 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
||||
|
||||
if readSeedBase > 0 {
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, readSeedBase+int64(i))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, readSeedBase+int64(i))
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume%d works properly", index))
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, writeSeedBase+int64(i))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, false, path, byteLen, writeSeedBase+int64(i))
|
||||
}
|
||||
|
||||
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
@ -481,6 +482,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
|
||||
podConfig := e2epod.Config{
|
||||
NS: ns,
|
||||
ImageID: imageutils.DebianIptables,
|
||||
PVCs: []*v1.PersistentVolumeClaim{pvc},
|
||||
SeLinuxLabel: e2epv.SELinuxLabel,
|
||||
NodeSelection: node,
|
||||
@ -506,6 +508,14 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
|
||||
var seed int64
|
||||
byteLen := 64
|
||||
directIO := false
|
||||
// direct IO is needed for Block-mode PVs
|
||||
if *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
// byteLen should be the size of a sector to enable direct I/O
|
||||
byteLen = 512
|
||||
directIO = true
|
||||
}
|
||||
|
||||
path := "/mnt/volume1"
|
||||
// Check if volume can be accessed from each pod
|
||||
for i, pod := range pods {
|
||||
@ -521,17 +531,17 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
if i != 0 {
|
||||
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
|
||||
// For 1st pod, no one has written data yet, so pass the read check
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||
}
|
||||
|
||||
// Update the seed and check if write/read works properly
|
||||
seed = time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if write to the volume in pod%d works properly", index))
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume in pod%d works properly", index))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||
}
|
||||
|
||||
// Delete the last pod and remove from slice of pods
|
||||
@ -560,16 +570,16 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
} else {
|
||||
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that pod%d write works properly", index, index-1))
|
||||
}
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||
|
||||
// Update the seed and check if write/read works properly
|
||||
seed = time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Rechecking if write to the volume in pod%d works properly", index))
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckWriteToPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Rechecking if read from the volume in pod%d works properly", index))
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, directIO, path, byteLen, seed)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,13 +241,13 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written file is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, path)
|
||||
}
|
||||
@ -259,13 +259,13 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
|
||||
seed := time.Now().UTC().UnixNano()
|
||||
|
||||
ginkgo.By("Writing to the volume.")
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
|
||||
|
||||
ginkgo.By("Restarting kubelet")
|
||||
KubeletCommand(KRestart, c, clientPod)
|
||||
|
||||
ginkgo.By("Testing that written pv is accessible.")
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, path, byteLen, seed)
|
||||
CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, path, byteLen, seed)
|
||||
|
||||
framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, path)
|
||||
}
|
||||
@ -656,33 +656,54 @@ func genBinDataFromSeed(len int, seed int64) []byte {
|
||||
}
|
||||
|
||||
// CheckReadFromPath validate that file can be properly read.
|
||||
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||
//
|
||||
// Note: directIO does not work with (default) BusyBox Pods. A requirement for
|
||||
// directIO to function correctly, is to read whole sector(s) for Block-mode
|
||||
// PVCs (normally a sector is 512 bytes), or memory pages for files (commonly
|
||||
// 4096 bytes).
|
||||
func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
|
||||
var pathForVolMode string
|
||||
var iflag string
|
||||
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
pathForVolMode = path
|
||||
} else {
|
||||
pathForVolMode = filepath.Join(path, "file1.txt")
|
||||
}
|
||||
|
||||
if directIO {
|
||||
iflag = "iflag=direct"
|
||||
}
|
||||
|
||||
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
|
||||
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", pathForVolMode, len))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, len, sum))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
|
||||
}
|
||||
|
||||
// CheckWriteToPath that file can be properly written.
|
||||
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string, len int, seed int64) {
|
||||
//
|
||||
// Note: nocache does not work with (default) BusyBox Pods. To read without
|
||||
// caching, enable directIO with CheckReadFromPath and check the hints about
|
||||
// the len requirements.
|
||||
func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
|
||||
var pathForVolMode string
|
||||
var oflag string
|
||||
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
pathForVolMode = path
|
||||
} else {
|
||||
pathForVolMode = filepath.Join(path, "file1.txt")
|
||||
}
|
||||
|
||||
if nocache {
|
||||
oflag = "oflag=nocache"
|
||||
}
|
||||
|
||||
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
|
||||
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, pathForVolMode, len))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
}
|
||||
|
||||
// findMountPoints returns all mount points on given node under specified directory.
|
||||
|
Loading…
Reference in New Issue
Block a user