Merge pull request #79730 from jsafrane/add-block-tests

Consolidate block and filesystem tests
This commit is contained in:
Kubernetes Prow Robot 2019-07-15 20:27:23 -07:00 committed by GitHub
commit 40edce1235
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 169 additions and 217 deletions

View File

@ -10,11 +10,12 @@ go_library(
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",

View File

@ -42,18 +42,18 @@ package volume
import ( import (
"fmt" "fmt"
"path/filepath" "path/filepath"
"strconv" "strconv"
"time" "time"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -126,7 +126,9 @@ type TestConfig struct {
// Test contains a volume to mount into a client pod and its // Test contains a volume to mount into a client pod and its
// expected content. // expected content.
type Test struct { type Test struct {
Volume v1.VolumeSource Volume v1.VolumeSource
Mode v1.PersistentVolumeMode
// Name of file to read/write in FileSystem mode
File string File string
ExpectedContent string ExpectedContent string
} }
@ -424,19 +426,15 @@ func TestCleanup(f *framework.Framework, config TestConfig) {
} }
} }
// TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer()) func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix string, fsGroup *int64, tests []Test) (*v1.Pod, error) {
// and check that the pod sees expected data, e.g. from the server pod. ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
// Multiple Tests can be specified to mount multiple volumes to a single
// pod.
func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-client"))
var gracePeriod int64 = 1 var gracePeriod int64 = 1
var command string var command string
if !framework.NodeOSDistroIs("windows") { if !framework.NodeOSDistroIs("windows") {
command = "while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done " command = "while true ; do sleep 2; done "
} else { } else {
command = "while(1) {cat /opt/0/index.html ; sleep 2 ; ls /opt/; sleep 2}" command = "while(1) {sleep 2}"
} }
seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"} seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
clientPod := &v1.Pod{ clientPod := &v1.Pod{
@ -445,18 +443,18 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-client", Name: config.Prefix + "-" + podSuffix,
Labels: map[string]string{ Labels: map[string]string{
"role": config.Prefix + "-client", "role": config.Prefix + "-" + podSuffix,
}, },
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: config.Prefix + "-client", Name: config.Prefix + "-" + podSuffix,
Image: GetTestImage(framework.BusyBoxImage), Image: GetTestImage(framework.BusyBoxImage),
WorkingDir: "/opt", WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for // An imperative and easily debuggable container which reads/writes vol contents for
// us to scan in the tests or by eye. // us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test. // We expect that /opt is empty in the minimal containers which we use in this test.
Command: GenerateScriptCmd(command), Command: GenerateScriptCmd(command),
@ -470,105 +468,128 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
NodeSelector: config.NodeSelector, NodeSelector: config.NodeSelector,
}, },
} }
podsNamespacer := client.CoreV1().Pods(config.Namespace)
for i, test := range tests { for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i) volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ if test.Mode == v1.PersistentVolumeBlock {
Name: volumeName, clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
MountPath: fmt.Sprintf("/opt/%d", i), Name: volumeName,
}) DevicePath: fmt.Sprintf("/opt/%d", i),
})
} else {
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/opt/%d", i),
})
}
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{ clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
Name: volumeName, Name: volumeName,
VolumeSource: test.Volume, VolumeSource: test.Volume,
}) })
} }
podsNamespacer := client.CoreV1().Pods(config.Namespace)
clientPod, err := podsNamespacer.Create(clientPod) clientPod, err := podsNamespacer.Create(clientPod)
if err != nil { if err != nil {
e2elog.Failf("Failed to create %s pod: %v", clientPod.Name, err) return nil, err
} }
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod)) err = e2epod.WaitForPodRunningInNamespace(client, clientPod)
if err != nil {
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
return nil, err
}
return clientPod, nil
}
func testVolumeContent(client clientset.Interface, pod *v1.Pod, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By("Checking that text file contents are perfect.") ginkgo.By("Checking that text file contents are perfect.")
for i, test := range tests { for i, test := range tests {
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File) if test.Mode == v1.PersistentVolumeBlock {
commands := GenerateReadFileCmd(fileName) // Block: check content
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, commands, test.ExpectedContent, time.Minute) deviceName := fmt.Sprintf("/opt/%d", i)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName) commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
} _, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
if !framework.NodeOSDistroIs("windows") { framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
if fsType != "" { // Check that it's a real block device
ginkgo.By("Checking fsType is correct.") utils.CheckVolumeModeOfPath(pod, test.Mode, deviceName)
_, err = framework.LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute) } else {
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType) // Filesystem: check content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands := GenerateReadFileCmd(fileName)
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
// Check that a directory has been mounted
dirName := filepath.Dir(fileName)
utils.CheckVolumeModeOfPath(pod, test.Mode, dirName)
if !framework.NodeOSDistroIs("windows") {
// Filesystem: check fsgroup
if fsGroup != nil {
ginkgo.By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
}
// Filesystem: check fsType
if fsType != "" {
ginkgo.By("Checking fsType is correct.")
_, err = framework.LookForStringInPodExec(pod.Namespace, pod.Name, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
}
}
} }
} }
} }
// InjectHTML inserts index.html with given content into given volume. It does so by // TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees expected data, e.g. from the server pod.
// Multiple Tests can be specified to mount multiple volumes to a single
// pod.
func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
clientPod, err := runVolumeTesterPod(client, config, "client", fsGroup, tests)
if err != nil {
e2elog.Failf("Failed to create client pod: %v", err)
}
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
testVolumeContent(client, clientPod, fsGroup, fsType, tests)
}
// InjectContent inserts index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there. // starting and auxiliary pod which writes the file there.
// The volume must be writable. // The volume must be writable.
func InjectHTML(client clientset.Interface, config TestConfig, fsGroup *int64, volume v1.VolumeSource, content string) { func InjectContent(client clientset.Interface, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, " injector")) injectorPod, err := runVolumeTesterPod(client, config, "injector", fsGroup, tests)
podClient := client.CoreV1().Pods(config.Namespace) if err != nil {
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4)) e2elog.Failf("Failed to create injector pod: %v", err)
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4)) return
fileName := "/mnt/index.html"
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"role": config.Prefix + "-injector",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-injector",
Image: GetTestImage(framework.BusyBoxImage),
Command: GenerateWriteFileCmd(content, fileName),
VolumeMounts: []v1.VolumeMount{
{
Name: volMountName,
MountPath: "/mnt",
},
},
SecurityContext: GenerateSecurityContext(true),
},
},
SecurityContext: GeneratePodSecurityContext(fsGroup, nil),
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volMountName,
VolumeSource: volume,
},
},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
} }
defer func() { defer func() {
podClient.Delete(podName, nil) e2epod.DeletePodOrFail(client, injectorPod.Namespace, injectorPod.Name)
e2epod.WaitForPodToDisappear(client, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
}() }()
injectPod, err := podClient.Create(injectPod) ginkgo.By("Writing text file contents in the container.")
framework.ExpectNoError(err, "Failed to create injector pod: %v", err) for i, test := range tests {
err = e2epod.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace) commands := []string{"exec", injectorPod.Name, fmt.Sprintf("--namespace=%v", injectorPod.Namespace), "--"}
framework.ExpectNoError(err) if test.Mode == v1.PersistentVolumeBlock {
// Block: write content
deviceName := fmt.Sprintf("/opt/%d", i)
commands = append(commands, GenerateWriteBlockCmd(test.ExpectedContent, deviceName)...)
} else {
// Filesystem: write content
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
commands = append(commands, GenerateWriteFileCmd(test.ExpectedContent, fileName)...)
}
out, err := framework.RunKubectl(commands...)
framework.ExpectNoError(err, "failed: writing the contents: %s", out)
}
// Check that the data have been really written in this pod.
// This tests non-persistent volume types
testVolumeContent(client, injectorPod, fsGroup, fsType, tests)
} }
// CreateGCEVolume creates PersistentVolumeSource for GCEVolume. // CreateGCEVolume creates PersistentVolumeSource for GCEVolume.
@ -596,6 +617,18 @@ func GenerateScriptCmd(command string) []string {
return commands return commands
} }
// GenerateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateWriteBlockCmd(content, fullPath string) []string {
var commands []string
if !framework.NodeOSDistroIs("windows") {
commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + fullPath}
} else {
commands = []string{"powershell", "/c", "echo '" + content + "' > " + fullPath}
}
return commands
}
// GenerateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path. // GenerateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateWriteFileCmd(content, fullPath string) []string { func GenerateWriteFileCmd(content, fullPath string) []string {
@ -620,6 +653,19 @@ func GenerateReadFileCmd(fullPath string) []string {
return commands return commands
} }
// GenerateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
var commands []string
if !framework.NodeOSDistroIs("windows") {
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
} else {
// TODO: is there a way on windows to get the first X bytes from a device?
commands = []string{"powershell", "/c", "type " + fullPath}
}
return commands
}
// GenerateWriteandExecuteScriptFileCmd generates the corresponding command lines to write a file with the given file path // GenerateWriteandExecuteScriptFileCmd generates the corresponding command lines to write a file with the given file path
// and also execute this file. // and also execute this file.
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh // Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh

View File

@ -151,16 +151,6 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
} }
ginkgo.It("should provision storage with defaults", func() {
init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
ginkgo.It("should provision storage with mount options", func() { ginkgo.It("should provision storage with mount options", func() {
if dInfo.SupportedMountOption == nil { if dInfo.SupportedMountOption == nil {
framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
@ -176,29 +166,6 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning()
}) })
ginkgo.It("should access volume from different nodes", func() {
init()
defer cleanup()
// The assumption is that if the test hasn't been
// locked onto a single node, then the driver is
// usable on all of them *and* supports accessing a volume
// from any node.
if l.config.ClientNodeName != "" {
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
}
// Ensure that we actually have more than one node.
nodes := framework.GetReadySchedulableNodesOrDie(l.cs)
if len(nodes.Items) <= 1 {
framework.Skipf("need more than one node - skipping")
}
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVMultiNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
if !dInfo.Capabilities[CapDataSource] { if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
) )
const ( const (
@ -196,45 +195,9 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
}() }()
framework.ExpectError(err) framework.ExpectError(err)
}) })
} else {
ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
init()
defer cleanup()
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err)
ginkgo.By("Creating pv and pvc")
l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv)
framework.ExpectNoError(err)
// Prebind pv
l.pvc.Spec.VolumeName = l.pv.Name
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc))
ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
framework.ExpectNoError(err)
ginkgo.By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
ginkgo.By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests // TODO(mkimuram): Add more tests
} }
case testpatterns.DynamicPV: case testpatterns.DynamicPV:
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() { ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() {
@ -254,45 +217,6 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectError(err) framework.ExpectError(err)
}) })
} else {
ginkgo.It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
init()
defer cleanup()
var err error
ginkgo.By("Creating sc")
l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc)
framework.ExpectNoError(err)
ginkgo.By("Creating pv and pvc")
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc)
framework.ExpectNoError(err)
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Creating pod")
pod, err := framework.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc},
false, "", false, false, framework.SELinuxLabel,
nil, framework.NodeSelection{Name: l.config.ClientNodeName}, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod))
}()
framework.ExpectNoError(err)
ginkgo.By("Checking if persistent volume exists as expected volume mode")
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
ginkgo.By("Checking if read/write to persistent volume works properly")
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
})
// TODO(mkimuram): Add more tests // TODO(mkimuram): Add more tests
} }
default: default:

View File

@ -66,6 +66,9 @@ func InitVolumesTestSuite() TestSuite {
testpatterns.NtfsInlineVolume, testpatterns.NtfsInlineVolume,
testpatterns.NtfsPreprovisionedPV, testpatterns.NtfsPreprovisionedPV,
testpatterns.NtfsDynamicPV, testpatterns.NtfsDynamicPV,
// block volumes
testpatterns.BlockVolModePreprovisionedPV,
testpatterns.BlockVolModeDynamicPV,
}, },
}, },
} }
@ -78,13 +81,6 @@ func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo {
func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
} }
func skipPersistenceTest(driver TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapPersistence] {
framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name)
}
}
func skipExecTest(driver TestDriver) { func skipExecTest(driver TestDriver) {
dInfo := driver.GetDriverInfo() dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapExec] { if !dInfo.Capabilities[CapExec] {
@ -92,6 +88,13 @@ func skipExecTest(driver TestDriver) {
} }
} }
func skipBlockTest(driver TestDriver) {
dInfo := driver.GetDriverInfo()
if !dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %q does not provide raw block - skipping", dInfo.Name)
}
}
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
type local struct { type local struct {
config *PerTestConfig config *PerTestConfig
@ -139,8 +142,11 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps) validateMigrationVolumeOpCounts(f.ClientSet, dInfo.InTreePluginName, l.intreeOps, l.migratedOps)
} }
ginkgo.It("should be mountable", func() { ginkgo.It("should store data", func() {
skipPersistenceTest(driver) if pattern.VolMode == v1.PersistentVolumeBlock {
skipBlockTest(driver)
}
init() init()
defer func() { defer func() {
volume.TestCleanup(f, convertTestConfig(l.config)) volume.TestCleanup(f, convertTestConfig(l.config))
@ -150,6 +156,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
tests := []volume.Test{ tests := []volume.Test{
{ {
Volume: *l.resource.volSource, Volume: *l.resource.volSource,
Mode: pattern.VolMode,
File: "index.html", File: "index.html",
// Must match content // Must match content
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s", ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
@ -166,17 +173,24 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
// local), plugin skips setting fsGroup if volume is already mounted // local), plugin skips setting fsGroup if volume is already mounted
// and we don't have reliable way to detect volumes are unmounted or // and we don't have reliable way to detect volumes are unmounted or
// not before starting the second pod. // not before starting the second pod.
volume.InjectHTML(f.ClientSet, config, fsGroup, tests[0].Volume, tests[0].ExpectedContent) volume.InjectContent(f.ClientSet, config, fsGroup, pattern.FsType, tests)
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) if driver.GetDriverInfo().Capabilities[CapPersistence] {
volume.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
} else {
ginkgo.By("Skipping persistence check for non-persistent volume")
}
}) })
ginkgo.It("should allow exec of files on the volume", func() { // Exec works only on filesystem volumes
skipExecTest(driver) if pattern.VolMode != v1.PersistentVolumeBlock {
init() ginkgo.It("should allow exec of files on the volume", func() {
defer cleanup() skipExecTest(driver)
init()
defer cleanup()
testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config) testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config)
}) })
}
} }
func testScriptInPod( func testScriptInPod(