Remove redundant tests

We check that block / filesystem volume is writable in volumes.go
This commit is contained in:
Jan Safranek 2019-07-10 15:17:09 +02:00
parent 5fc9e23b50
commit b5a800e765
4 changed files with 58 additions and 130 deletions

View File

@ -10,11 +10,12 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",

View File

@ -500,6 +500,48 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
return clientPod, nil return clientPod, nil
} }
func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock {
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
utils.CheckVolumeModeOfPath(pod, test.Mode, deviceName)
} else {
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
dirName := filepath.Dir(fileName)
utils.CheckVolumeModeOfPath(pod, test.Mode, dirName)
if !framework.NodeOSDistroIs("windows") {
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
}
}
}
// TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer()) // TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees expected data, e.g. from the server pod. // and check that the pod sees expected data, e.g. from the server pod.
// Multiple Tests can be specified to mount multiple volumes to a single // Multiple Tests can be specified to mount multiple volumes to a single
@ -511,52 +553,13 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
} }
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod)) framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
testVolumeContent(client, clientPod, fsGroup, fsType, tests)
ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests {
if test.Mode == v1.PersistentVolumeBlock {
// Block: check content
deviceName := fmt.Sprintf("/opt/%d", i)
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
// Check that it's a real block device
utils.CheckVolumeModeOfPath(clientPod, test.Mode, deviceName)
} else {
// Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
dirName := filepath.Dir(fileName)
utils.CheckVolumeModeOfPath(clientPod, test.Mode, dirName)
if !framework.NodeOSDistroIs("windows") {
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
}
}
} }
// InjectContent inserts index.html with given content into given volume. It does so by // InjectContent inserts index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there. // starting and auxiliary pod which writes the file there.
// The volume must be writable. // The volume must be writable.
func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64, tests []Test) { func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
injectorPod, err := runVolumeTesterPod(client, config, "injector", fsGroup, tests) injectorPod, err := runVolumeTesterPod(client, config, "injector", fsGroup, tests)
if err != nil { if err != nil {
e2elog.Failf("Failed to create injector pod: %v", err) e2elog.Failf("Failed to create injector pod: %v", err)
@ -583,6 +586,10 @@ func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64
out, err := framework.RunKubectl(commands...) out, err := framework.RunKubectl(commands...)
framework.ExpectNoError(err, "failed: writing the contents: %s", out) framework.ExpectNoError(err, "failed: writing the contents: %s", out)
} }
// Check that the data have been really written in this pod.
// This tests non-persistent volume types
testVolumeContent(client, injectorPod, fsGroup, fsType, tests)
} }
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume. // CreateGCEVolume creates PersistentVolumeSource for GCEVolume.

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
) )
const ( const (
@ -196,45 +195,9 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}() }()
framework.ExpectError(err) framework.ExpectError(err)
}) })
} else {
ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
init()
defer cleanup()
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err)
ginkgo.By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
framework.ExpectNoError(err)
// Prebind pv
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
framework.ExpectNoError(err)
ginkgo.By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
ginkgo.By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests // TODO(mkimuram): Add more tests
} }
case testpatterns.DynamicPV: case testpatterns.DynamicPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() { ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() {
@ -254,45 +217,6 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectError(err) framework.ExpectError(err)
}) })
} else {
ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
init()
defer cleanup()
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err)
ginkgo.By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
framework.ExpectNoError(err)
ginkgo.By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
ginkgo.By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests // TODO(mkimuram): Add more tests
} }
default: default:

View File

@ -81,13 +81,6 @@ func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
} }
func skipPersistenceTest(driver TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapPersistence] {
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
}
}
func skipExecTest(driver TestDriver) { func skipExecTest(driver TestDriver) {
dInfo := driver.GetDriverInfo() dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapExec] { if !dInfo.Capabilities[CapExec] {
@ -149,8 +142,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
} }
ginkgo.It("should persist data across different pods", func() { ginkgo.It("should store data", func() {
skipPersistenceTest(driver)
if pattern.VolMode == v1.PersistentVolumeBlock { if pattern.VolMode == v1.PersistentVolumeBlock {
skipBlockTest(driver) skipBlockTest(driver)
} }
@ -181,8 +173,12 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
// local), plugin skips setting fsGroup if volume is already mounted // local), plugin skips setting fsGroup if volume is already mounted
// and we don't have reliable way to detect volumes are unmounted or // and we don't have reliable way to detect volumes are unmounted or
// not before starting the second pod. // not before starting the second pod.
volume.InjectContent(f.ClientSet, config, fsGroup, tests) volume.InjectContent(f.ClientSet, config, fsGroup, pattern.FsType, tests)
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) if driver.GetDriverInfo().Capabilities[CapPersistence] {
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
} else {
ginkgo.By("Skipping persistence check for non-persistent volume")
}
}) })
// Exec works only on filesystem volumes // Exec works only on filesystem volumes