mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Rename and restructure local PV tests
This commit is contained in:
parent
1e1b32bf01
commit
64c139ddbb
@ -141,7 +141,7 @@ var (
|
||||
Level: "s0:c0,c1"}
|
||||
)
|
||||
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [Serial]", func() {
|
||||
var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes]", func() {
|
||||
f := framework.NewDefaultFramework("persistent-local-volumes-test")
|
||||
|
||||
var (
|
||||
@ -168,104 +168,89 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: move this under volumeType loop
|
||||
Context("when one pod requests one prebound PVC", func() {
|
||||
|
||||
var testVol *localTestVolume
|
||||
|
||||
BeforeEach(func() {
|
||||
setupStorageClass(config, &waitMode)
|
||||
testVols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, config.node0, 1, waitMode)
|
||||
testVol = testVols[0]
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanupLocalVolumes(config, []*localTestVolume{testVol})
|
||||
cleanupStorageClass(config)
|
||||
})
|
||||
|
||||
It("should be able to mount volume and read from pod1", func() {
|
||||
By("Creating pod1")
|
||||
pod1, pod1Err := createLocalPod(config, testVol)
|
||||
Expect(pod1Err).NotTo(HaveOccurred())
|
||||
verifyLocalPod(config, testVol, pod1, config.node0.Name)
|
||||
|
||||
By("Reading in pod1")
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
|
||||
By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
})
|
||||
|
||||
It("should be able to mount volume and write from pod1", func() {
|
||||
By("Creating pod1")
|
||||
pod1, pod1Err := createLocalPod(config, testVol)
|
||||
Expect(pod1Err).NotTo(HaveOccurred())
|
||||
verifyLocalPod(config, testVol, pod1, config.node0.Name)
|
||||
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
|
||||
By("Writing in pod1")
|
||||
writeCmd, _ := createWriteAndReadCmds(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/)
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
|
||||
By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
})
|
||||
})
|
||||
|
||||
localVolumeTypes := []localVolumeType{DirectoryLocalVolumeType, TmpfsLocalVolumeType, GCELocalSSDVolumeType}
|
||||
for _, tempTestVolType := range localVolumeTypes {
|
||||
|
||||
// New variable required for gingko test closures
|
||||
testVolType := tempTestVolType
|
||||
ctxString := fmt.Sprintf("[Volume type: %s]", testVolType)
|
||||
serialStr := ""
|
||||
if testVolType == GCELocalSSDVolumeType {
|
||||
serialStr = " [Serial]"
|
||||
}
|
||||
ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr)
|
||||
testMode := immediateMode
|
||||
|
||||
Context(ctxString, func() {
|
||||
var testVol *localTestVolume
|
||||
|
||||
BeforeEach(func() {
|
||||
if testVolType == GCELocalSSDVolumeType {
|
||||
SkipUnlessLocalSSDExists("scsi", "fs", config.node0)
|
||||
}
|
||||
setupStorageClass(config, &testMode)
|
||||
|
||||
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, testMode)
|
||||
testVol = testVols[0]
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanupLocalVolumes(config, []*localTestVolume{testVol})
|
||||
cleanupStorageClass(config)
|
||||
})
|
||||
|
||||
Context("when two pods mount a local volume at the same time", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
var testVol *localTestVolume
|
||||
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, testMode)
|
||||
testVol = testVols[0]
|
||||
twoPodsReadWriteTest(config, testVol)
|
||||
cleanupLocalVolumes(config, testVols)
|
||||
Context("One pod requesting one prebound PVC", func() {
|
||||
var (
|
||||
pod1 *v1.Pod
|
||||
pod1Err error
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
By("Creating pod1")
|
||||
pod1, pod1Err = createLocalPod(config, testVol)
|
||||
Expect(pod1Err).NotTo(HaveOccurred())
|
||||
verifyLocalPod(config, testVol, pod1, config.node0.Name)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
})
|
||||
|
||||
It("should be able to mount volume and read from pod1", func() {
|
||||
By("Reading in pod1")
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
})
|
||||
|
||||
It("should be able to mount volume and write from pod1", func() {
|
||||
// testFileContent was written during setupLocalVolume
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1)
|
||||
|
||||
By("Writing in pod1")
|
||||
writeCmd, _ := createWriteAndReadCmds(volumeDir, testFile, testVol.hostDir /*writeTestFileContent*/)
|
||||
podRWCmdExec(pod1, writeCmd)
|
||||
})
|
||||
})
|
||||
|
||||
Context("when two pods mount a local volume one after the other", func() {
|
||||
Context("Two pods mounting a local volume at the same time", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
twoPodsReadWriteTest(config, testVol)
|
||||
})
|
||||
})
|
||||
|
||||
Context("Two pods mounting a local volume one after the other", func() {
|
||||
It("should be able to write from pod1 and read from pod2", func() {
|
||||
var testVol *localTestVolume
|
||||
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, testMode)
|
||||
testVol = testVols[0]
|
||||
twoPodsReadWriteSerialTest(config, testVol)
|
||||
cleanupLocalVolumes(config, testVols)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
Context("when local volume cannot be mounted [Slow]", func() {
|
||||
Context("Local volume that cannot be mounted [Slow]", func() {
|
||||
// TODO:
|
||||
// - make the pod create timeout shorter
|
||||
// - check for these errors in unit tests intead
|
||||
It("should fail mount due to non-existant path", func() {
|
||||
It("should fail due to non-existant path", func() {
|
||||
ep := &eventPatterns{
|
||||
reason: "FailedMount",
|
||||
pattern: make([]string, 2)}
|
||||
@ -282,11 +267,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum
|
||||
pod, err := createLocalPod(config, testVol)
|
||||
Expect(err).To(HaveOccurred())
|
||||
checkPodEvents(config, pod.Name, ep)
|
||||
verifyLocalVolume(config, testVol)
|
||||
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
|
||||
})
|
||||
|
||||
It("should fail mount due to wrong node", func() {
|
||||
It("should fail due to wrong node", func() {
|
||||
if len(config.nodes) < 2 {
|
||||
framework.Skipf("Runs only when number of nodes >= 2")
|
||||
}
|
||||
@ -312,7 +296,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum
|
||||
})
|
||||
})
|
||||
|
||||
Context("when pod's node is different from PV's NodeAffinity", func() {
|
||||
Context("Pod with node different from PV's NodeAffinity", func() {
|
||||
var (
|
||||
testVol *localTestVolume
|
||||
volumeType localVolumeType
|
||||
@ -334,16 +318,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum
|
||||
cleanupStorageClass(config)
|
||||
})
|
||||
|
||||
It("should not be able to mount due to different NodeAffinity", func() {
|
||||
It("should fail scheduling due to different NodeAffinity", func() {
|
||||
testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeAffinity, immediateMode)
|
||||
})
|
||||
|
||||
It("should not be able to mount due to different NodeSelector", func() {
|
||||
It("should fail scheduling due to different NodeSelector", func() {
|
||||
testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeSelector, immediateMode)
|
||||
})
|
||||
})
|
||||
|
||||
Context("when using local volume provisioner", func() {
|
||||
Context("Local volume provisioner [Serial]", func() {
|
||||
var volumePath string
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -400,7 +384,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolum
|
||||
})
|
||||
})
|
||||
|
||||
Context("when StatefulSet has pod anti-affinity", func() {
|
||||
Context("StatefulSet with pod anti-affinity", func() {
|
||||
var testVols map[string][]*localTestVolume
|
||||
const (
|
||||
ssReplicas = 3
|
||||
@ -806,6 +790,7 @@ func createSecPod(config *localTestConfig, volume *localTestVolume, hostIPC bool
|
||||
}
|
||||
|
||||
func createLocalPod(config *localTestConfig, volume *localTestVolume) (*v1.Pod, error) {
|
||||
By("Creating a pod")
|
||||
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel)
|
||||
}
|
||||
|
||||
@ -1288,7 +1273,17 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet) {
|
||||
|
||||
Expect(nodes.Len()).To(Equal(len(pods.Items)))
|
||||
|
||||
// TODO: validate all PVCs are bound
|
||||
// Validate all PVCs are bound
|
||||
for _, pod := range pods.Items {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
pvcSource := volume.VolumeSource.PersistentVolumeClaim
|
||||
if pvcSource != nil {
|
||||
err := framework.WaitForPersistentVolumeClaimPhase(
|
||||
v1.ClaimBound, config.client, config.ns, pvcSource.ClaimName, framework.Poll, time.Second)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SkipUnlessLocalSSDExists takes in an ssdInterface (scsi/nvme) and a filesystemType (fs/block)
|
||||
|
Loading…
Reference in New Issue
Block a user