mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Added local storage e2e test for VolumeMode: block
VolumeMode block requires alpha feature-gate BlockVolume=true Added VolumeMode to e2e test framework for pv and pvc Added volumeDevices to e2e test framework for block volumes
This commit is contained in:
parent
62c5f21d5d
commit
4812f9e5d8
@ -61,7 +61,7 @@ func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed device discover command for path %s: %v", path, err)
|
||||
glog.V(2).Infof("Failed device discover command for path %s: %v %s", path, err, out)
|
||||
return "", err
|
||||
}
|
||||
return parseLosetupOutputForDevice(out)
|
||||
@ -72,7 +72,7 @@ func makeLoopDevice(path string) (string, error) {
|
||||
cmd := exec.Command(losetupPath, args...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Failed device create command for path %s: %v", path, err)
|
||||
glog.V(2).Infof("Failed device create command for path: %s %v %s ", path, err, out)
|
||||
return "", err
|
||||
}
|
||||
return parseLosetupOutputForDevice(out)
|
||||
|
@ -81,6 +81,7 @@ type PersistentVolumeConfig struct {
|
||||
Labels labels.Set
|
||||
StorageClassName string
|
||||
NodeAffinity *v1.VolumeNodeAffinity
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to generate a PVC object.
|
||||
@ -92,6 +93,7 @@ type PersistentVolumeClaimConfig struct {
|
||||
Annotations map[string]string
|
||||
Selector *metav1.LabelSelector
|
||||
StorageClassName *string
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
}
|
||||
|
||||
// Clean up a pv and pvc in a single pv/pvc test case.
|
||||
@ -581,7 +583,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||
Namespace: pvConfig.Prebind.Namespace,
|
||||
}
|
||||
}
|
||||
pv := &v1.PersistentVolume{
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: pvConfig.NamePrefix,
|
||||
Labels: pvConfig.Labels,
|
||||
@ -603,9 +605,9 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
|
||||
ClaimRef: claimRef,
|
||||
StorageClassName: pvConfig.StorageClassName,
|
||||
NodeAffinity: pvConfig.NodeAffinity,
|
||||
VolumeMode: pvConfig.VolumeMode,
|
||||
},
|
||||
}
|
||||
return pv
|
||||
}
|
||||
|
||||
// Returns a PVC definition based on the namespace.
|
||||
@ -635,6 +637,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
||||
},
|
||||
},
|
||||
StorageClassName: cfg.StorageClassName,
|
||||
VolumeMode: cfg.VolumeMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -868,14 +871,21 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
var volumeDevices = make([]v1.VolumeDevice, 0)
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
|
||||
} else {
|
||||
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
|
||||
}
|
||||
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Spec.Volumes = volumes
|
||||
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel
|
||||
return podSpec
|
||||
|
@ -67,18 +67,22 @@ const (
|
||||
TmpfsLocalVolumeType localVolumeType = "tmpfs"
|
||||
// tests based on local ssd at /mnt/disks/by-uuid/
|
||||
GCELocalSSDVolumeType localVolumeType = "gce-localssd-scsi-fs"
|
||||
// Creates a local file, formats it, and maps it as a block device.
|
||||
BlockLocalVolumeType localVolumeType = "block"
|
||||
)
|
||||
|
||||
var setupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *v1.Node) *localTestVolume{
|
||||
GCELocalSSDVolumeType: setupLocalVolumeGCELocalSSD,
|
||||
TmpfsLocalVolumeType: setupLocalVolumeTmpfs,
|
||||
DirectoryLocalVolumeType: setupLocalVolumeDirectory,
|
||||
BlockLocalVolumeType: setupLocalVolumeBlock,
|
||||
}
|
||||
|
||||
var cleanupLocalVolumeMap = map[localVolumeType]func(*localTestConfig, *localTestVolume){
|
||||
GCELocalSSDVolumeType: cleanupLocalVolumeGCELocalSSD,
|
||||
TmpfsLocalVolumeType: cleanupLocalVolumeTmpfs,
|
||||
DirectoryLocalVolumeType: cleanupLocalVolumeDirectory,
|
||||
BlockLocalVolumeType: cleanupLocalVolumeBlock,
|
||||
}
|
||||
|
||||
type localTestVolume struct {
|
||||
@ -92,6 +96,9 @@ type localTestVolume struct {
|
||||
pv *v1.PersistentVolume
|
||||
// Type of local volume
|
||||
localVolumeType localVolumeType
|
||||
// Path to the loop block device on the host node.
|
||||
// Used during cleanup after block tests.
|
||||
loopDevDir string
|
||||
}
|
||||
|
||||
const (
|
||||
@ -168,7 +175,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
}
|
||||
})
|
||||
|
||||
localVolumeTypes := []localVolumeType{DirectoryLocalVolumeType, TmpfsLocalVolumeType, GCELocalSSDVolumeType}
|
||||
localVolumeTypes := []localVolumeType{DirectoryLocalVolumeType, TmpfsLocalVolumeType, GCELocalSSDVolumeType, BlockLocalVolumeType}
|
||||
for _, tempTestVolType := range localVolumeTypes {
|
||||
|
||||
// New variable required for gingko test closures
|
||||
@ -177,7 +184,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
if testVolType == GCELocalSSDVolumeType {
|
||||
serialStr = " [Serial]"
|
||||
}
|
||||
ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr)
|
||||
alphaStr := ""
|
||||
if testVolType == BlockLocalVolumeType {
|
||||
alphaStr = " [Feature:BlockVolume]"
|
||||
}
|
||||
ctxString := fmt.Sprintf("[Volume type: %s]%v%v", testVolType, serialStr, alphaStr)
|
||||
testMode := immediateMode
|
||||
|
||||
Context(ctxString, func() {
|
||||
@ -218,15 +229,15 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
It("should be able to mount volume and read from pod1", func() {
|
||||
By("Reading in pod1")
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||
})
|
||||
|
||||
It("should be able to mount volume and write from pod1", func() {
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVolType)
|
||||
|
||||
By("Writing in pod1")
|
||||
writeCmd, _ := createWriteAndReadCmds(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/)
|
||||
writeCmd, _ := createWriteAndReadCmds(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVolType)
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
})
|
||||
})
|
||||
@ -365,7 +376,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
|
||||
// Delete the persistent volume claim: file will be cleaned up and volume be re-created.
|
||||
By("Deleting the persistent volume claim to clean up persistent volume and re-create one")
|
||||
writeCmd, _ := createWriteAndReadCmds(volumePath, testFile, testFileContent)
|
||||
writeCmd, _ := createWriteAndReadCmds(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
|
||||
err = framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, config.node0)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = config.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
|
||||
@ -495,7 +506,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
for i := 0; i < numConcurrentPods; i++ {
|
||||
pvcs := []*v1.PersistentVolumeClaim{}
|
||||
for j := 0; j < volsPerPod; j++ {
|
||||
pvc := framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config), config.ns)
|
||||
pvc := framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
|
||||
pvc, err := framework.CreatePVC(config.client, config.ns, pvc)
|
||||
framework.ExpectNoError(err)
|
||||
pvcs = append(pvcs, pvc)
|
||||
@ -621,7 +632,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
verifyLocalPod(config, testVol, pod1, config.node0.Name)
|
||||
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
|
||||
By("Creating pod2 to read from the PV")
|
||||
pod2, pod2Err := createLocalPod(config, testVol)
|
||||
@ -629,15 +640,15 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
||||
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2)
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/)
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
|
||||
|
||||
By("Writing in pod1")
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
|
||||
By("Reading in pod2")
|
||||
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2)
|
||||
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2, testVol.localVolumeType)
|
||||
|
||||
By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
@ -653,9 +664,9 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
||||
verifyLocalPod(config, testVol, pod1, config.node0.Name)
|
||||
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/)
|
||||
writeCmd := createWriteCmd(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/, testVol.localVolumeType)
|
||||
|
||||
By("Writing in pod1")
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
@ -669,7 +680,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
||||
verifyLocalPod(config, testVol, pod2, config.node0.Name)
|
||||
|
||||
By("Reading in pod2")
|
||||
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2)
|
||||
testReadFileContent(volumeDir, testFile, testVol.hostDir, pod2, testVol.localVolumeType)
|
||||
|
||||
By("Deleting pod2")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
@ -731,8 +742,8 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
|
||||
}
|
||||
|
||||
func setupWriteTestFile(hostDir string, config *localTestConfig, localVolumeType localVolumeType, node *v1.Node) *localTestVolume {
|
||||
writeCmd, _ := createWriteAndReadCmds(hostDir, testFile, testFileContent)
|
||||
By(fmt.Sprintf("Creating local volume on node %q at path %q", node.Name, hostDir))
|
||||
writeCmd, _ := createWriteAndReadCmds(hostDir, testFile, testFileContent, localVolumeType)
|
||||
By(fmt.Sprintf("Creating test file on node %q in path %q", node.Name, hostDir))
|
||||
err := framework.IssueSSHCommand(writeCmd, framework.TestContext.Provider, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return &localTestVolume{
|
||||
@ -755,17 +766,37 @@ func setupLocalVolumeGCELocalSSD(config *localTestConfig, node *v1.Node) *localT
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
dirName := strings.Fields(res.Stdout)[0]
|
||||
hostDir := "/mnt/disks/by-uuid/google-local-ssds-scsi-fs/" + dirName
|
||||
// populate volume with testFile containing testFileContent
|
||||
// Populate volume with testFile containing testFileContent.
|
||||
return setupWriteTestFile(hostDir, config, GCELocalSSDVolumeType, node)
|
||||
}
|
||||
|
||||
func setupLocalVolumeDirectory(config *localTestConfig, node *v1.Node) *localTestVolume {
|
||||
testDirName := "local-volume-test-" + string(uuid.NewUUID())
|
||||
hostDir := filepath.Join(hostBase, testDirName)
|
||||
// populate volume with testFile containing testFileContent
|
||||
// Populate volume with testFile containing testFileContent.
|
||||
return setupWriteTestFile(hostDir, config, DirectoryLocalVolumeType, node)
|
||||
}
|
||||
|
||||
func setupLocalVolumeBlock(config *localTestConfig, node *v1.Node) *localTestVolume {
|
||||
testDirName := "local-volume-test-" + string(uuid.NewUUID())
|
||||
hostDir := filepath.Join(hostBase, testDirName)
|
||||
createAndMapBlockLocalVolume(config, hostDir, node)
|
||||
loopDev := getBlockLoopDev(hostDir, node)
|
||||
// Populate block volume with testFile containing testFileContent.
|
||||
volume := setupWriteTestFile(loopDev, config, BlockLocalVolumeType, node)
|
||||
volume.hostDir = loopDev
|
||||
volume.loopDevDir = hostDir
|
||||
return volume
|
||||
}
|
||||
|
||||
// Determine the /dev/loopXXX device associated with this test, via its hostDir.
|
||||
func getBlockLoopDev(hostDir string, node *v1.Node) string {
|
||||
loopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup | grep %s/file | awk '{ print $1 }') 2>&1 > /dev/null && echo ${E2E_LOOP_DEV}", hostDir)
|
||||
loopDevResult, err := framework.IssueSSHCommandWithResult(loopDevCmd, framework.TestContext.Provider, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return strings.TrimSpace(loopDevResult.Stdout)
|
||||
}
|
||||
|
||||
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
|
||||
framework.ExpectNoError(framework.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc))
|
||||
}
|
||||
@ -777,7 +808,7 @@ func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Po
|
||||
Expect(podNodeName).To(Equal(expectedNodeName))
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
|
||||
func cleanupLocalVolumeGCELocalSSD(config *localTestConfig, volume *localTestVolume) {
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm %s", volume.hostDir+"/"+testFile)
|
||||
@ -785,7 +816,7 @@ func cleanupLocalVolumeGCELocalSSD(config *localTestConfig, volume *localTestVol
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
|
||||
func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
|
||||
unmountTmpfsLocalVolume(config, volume.hostDir, volume.node)
|
||||
|
||||
@ -795,7 +826,7 @@ func cleanupLocalVolumeTmpfs(config *localTestConfig, volume *localTestVolume) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
|
||||
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory.
|
||||
func cleanupLocalVolumeDirectory(config *localTestConfig, volume *localTestVolume) {
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
|
||||
@ -803,11 +834,26 @@ func cleanupLocalVolumeDirectory(config *localTestConfig, volume *localTestVolum
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func makeLocalPVCConfig(config *localTestConfig) framework.PersistentVolumeClaimConfig {
|
||||
return framework.PersistentVolumeClaimConfig{
|
||||
// Deletes the PVC/PV and removes the test directory holding the block file.
|
||||
func cleanupLocalVolumeBlock(config *localTestConfig, volume *localTestVolume) {
|
||||
volume.hostDir = volume.loopDevDir
|
||||
unmapBlockLocalVolume(config, volume.hostDir, volume.node)
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", volume.hostDir)
|
||||
err := framework.IssueSSHCommand(removeCmd, framework.TestContext.Provider, volume.node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func makeLocalPVCConfig(config *localTestConfig, volumeType localVolumeType) framework.PersistentVolumeClaimConfig {
|
||||
pvcConfig := framework.PersistentVolumeClaimConfig{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &config.scName,
|
||||
}
|
||||
if volumeType == BlockLocalVolumeType {
|
||||
pvcVolumeMode := v1.PersistentVolumeBlock
|
||||
pvcConfig.VolumeMode = &pvcVolumeMode
|
||||
}
|
||||
return pvcConfig
|
||||
}
|
||||
|
||||
func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framework.PersistentVolumeConfig {
|
||||
@ -821,7 +867,7 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo
|
||||
framework.Failf("Node does not have required label %q", nodeKey)
|
||||
}
|
||||
|
||||
return framework.PersistentVolumeConfig{
|
||||
pvConfig := framework.PersistentVolumeConfig{
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: volume.hostDir,
|
||||
@ -845,6 +891,12 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if volume.localVolumeType == BlockLocalVolumeType {
|
||||
pvVolumeMode := v1.PersistentVolumeBlock
|
||||
pvConfig.VolumeMode = &pvVolumeMode
|
||||
}
|
||||
return pvConfig
|
||||
}
|
||||
|
||||
// Creates a PVC and PV with prebinding
|
||||
@ -852,8 +904,9 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
|
||||
var err error
|
||||
|
||||
for _, volume := range volumes {
|
||||
pvcConfig := makeLocalPVCConfig(config)
|
||||
pvcConfig := makeLocalPVCConfig(config, volume.localVolumeType)
|
||||
pvConfig := makeLocalPVConfig(config, volume)
|
||||
|
||||
volume.pv, volume.pvc, err = framework.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, false)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@ -878,7 +931,15 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
|
||||
}
|
||||
|
||||
func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string) *v1.Pod {
|
||||
return framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel)
|
||||
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel)
|
||||
if pod == nil {
|
||||
return pod
|
||||
}
|
||||
if volume.localVolumeType == BlockLocalVolumeType {
|
||||
// Block e2e tests require utilities for writing to block devices (e.g. dd), and nginx has this utilites.
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
|
||||
@ -955,26 +1016,67 @@ func unmountTmpfsLocalVolume(config *localTestConfig, dir string, node *v1.Node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createAndMapBlockLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
|
||||
By(fmt.Sprintf("Creating block device on node %q using path %q", node.Name, dir))
|
||||
mkdirCmd := fmt.Sprintf("mkdir -p %s", dir)
|
||||
// Create 10MB file that will serve as the backing for block device.
|
||||
ddCmd := fmt.Sprintf("dd if=/dev/zero of=%s/file bs=512 count=20480", dir)
|
||||
losetupLoopDevCmd := fmt.Sprintf("E2E_LOOP_DEV=$(sudo losetup -f) && echo ${E2E_LOOP_DEV}")
|
||||
losetupCmd := fmt.Sprintf("sudo losetup ${E2E_LOOP_DEV} %s/file", dir)
|
||||
err := framework.IssueSSHCommand(fmt.Sprintf("%s && %s && %s && %s", mkdirCmd, ddCmd, losetupLoopDevCmd, losetupCmd), framework.TestContext.Provider, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func unmapBlockLocalVolume(config *localTestConfig, dir string, node *v1.Node) {
|
||||
loopDev := getBlockLoopDev(dir, node)
|
||||
By(fmt.Sprintf("Unmap block device %q on node %q at path %s/file", loopDev, node.Name, dir))
|
||||
losetupDeleteCmd := fmt.Sprintf("sudo losetup -d %s", loopDev)
|
||||
err := framework.IssueSSHCommand(losetupDeleteCmd, framework.TestContext.Provider, node)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Create corresponding write and read commands
|
||||
// to be executed via SSH on the node with the local PV
|
||||
func createWriteAndReadCmds(testFileDir string, testFile string, writeTestFileContent string) (writeCmd string, readCmd string) {
|
||||
writeCmd = createWriteCmd(testFileDir, testFile, writeTestFileContent)
|
||||
readCmd = createReadCmd(testFileDir, testFile)
|
||||
func createWriteAndReadCmds(testFileDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) (writeCmd string, readCmd string) {
|
||||
writeCmd = createWriteCmd(testFileDir, testFile, writeTestFileContent, volumeType)
|
||||
readCmd = createReadCmd(testFileDir, testFile, volumeType)
|
||||
return writeCmd, readCmd
|
||||
}
|
||||
|
||||
func createWriteCmd(testFileDir string, testFile string, writeTestFileContent string) string {
|
||||
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {
|
||||
if volumeType == BlockLocalVolumeType {
|
||||
// testDir is the block device.
|
||||
testFileDir := filepath.Join("/tmp", testDir)
|
||||
testFilePath := filepath.Join(testFileDir, testFile)
|
||||
return fmt.Sprintf("mkdir -p %s; echo %s > %s", testFileDir, writeTestFileContent, testFilePath)
|
||||
// Create a file containing the testFileContent.
|
||||
writeTestFileCmd := fmt.Sprintf("mkdir -p %s; echo %s > %s", testFileDir, writeTestFileContent, testFilePath)
|
||||
// sudo is needed when using ssh exec to node.
|
||||
// sudo is not needed and does not exist in some containers (e.g. busybox), when using pod exec.
|
||||
sudoCmd := fmt.Sprintf("SUDO_CMD=$(which sudo); echo ${SUDO_CMD}")
|
||||
// Write the testFileContent into the block device.
|
||||
writeBlockCmd := fmt.Sprintf("${SUDO_CMD} dd if=%s of=%s bs=512 count=100", testFilePath, testDir)
|
||||
// Cleanup the file containing testFileContent.
|
||||
deleteTestFileCmd := fmt.Sprintf("rm %s", testFilePath)
|
||||
return fmt.Sprintf("%s && %s && %s && %s", writeTestFileCmd, sudoCmd, writeBlockCmd, deleteTestFileCmd)
|
||||
} else {
|
||||
testFilePath := filepath.Join(testDir, testFile)
|
||||
return fmt.Sprintf("mkdir -p %s; echo %s > %s", testDir, writeTestFileContent, testFilePath)
|
||||
}
|
||||
func createReadCmd(testFileDir string, testFile string) string {
|
||||
}
|
||||
func createReadCmd(testFileDir string, testFile string, volumeType localVolumeType) string {
|
||||
if volumeType == BlockLocalVolumeType {
|
||||
// Create the command to read the beginning of the block device and print it in ascii.
|
||||
return fmt.Sprintf("hexdump -n 100 -e '100 \"%%_p\"' %s | head -1", testFileDir)
|
||||
} else {
|
||||
// Create the command to read (aka cat) a file.
|
||||
testFilePath := filepath.Join(testFileDir, testFile)
|
||||
return fmt.Sprintf("cat %s", testFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Read testFile and evaluate whether it contains the testFileContent
|
||||
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod) {
|
||||
readCmd := createReadCmd(volumeDir, testFile)
|
||||
func testReadFileContent(testFileDir string, testFile string, testFileContent string, pod *v1.Pod, volumeType localVolumeType) {
|
||||
readCmd := createReadCmd(volumeDir, testFile, volumeType)
|
||||
readOut := podRWCmdExec(pod, readCmd)
|
||||
Expect(readOut).To(ContainSubstring(testFileContent))
|
||||
}
|
||||
@ -990,6 +1092,7 @@ func createFileDoesntExistCmd(testFileDir string, testFile string) string {
|
||||
// Fail on error
|
||||
func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
||||
out, err := utils.PodExec(pod, cmd)
|
||||
framework.Logf("podRWCmdExec out: %q err: %q", out, err)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return out
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user