Merge pull request #33837 from jsafrane/e2e-configmap

Automatic merge from submit-queue (batch tested with PRs 34353, 33837, 38878)

Add e2e test for configmap volume

There are two patches:
- refactor e2e volume tests to allow multiple volumes mounted into single pod
- add a test for ConfigMap volume mounted twice to test #28502
This commit is contained in:
Kubernetes Submit Queue 2016-12-19 06:42:58 -08:00 committed by GitHub
commit a366ca3732

View File

@ -72,7 +72,15 @@ type VolumeTestConfig struct {
serverArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
volumes map[string]string
serverVolumes map[string]string
}
// VolumeTest contains a volumes to mount into a client pod and its
// expected content.
type VolumeTest struct {
volume v1.VolumeSource
file string
expectedContent string
}
// Starts a container specified by config.serverImage and exports all
@ -94,12 +102,12 @@ func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
}
}
volumeCount := len(config.volumes)
volumeCount := len(config.serverVolumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.volumes {
for src, dst := range config.serverVolumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
@ -193,8 +201,10 @@ func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
}
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod.
func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
// and check that the pod sees expected data, e.g. from the server pod.
// Multiple VolumeTests can be specified to mount multiple volumes to a single
// pod.
func testVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, tests []VolumeTest) {
By(fmt.Sprint("starting ", config.prefix, " client"))
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
@ -219,14 +229,9 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
Command: []string{
"/bin/sh",
"-c",
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/opt/",
},
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{},
},
},
SecurityContext: &v1.PodSecurityContext{
@ -234,12 +239,7 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
Level: "s0:c0,c1",
},
},
Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
},
},
Volumes: []v1.Volume{},
},
}
podsNamespacer := client.Core().Pods(config.namespace)
@ -247,6 +247,18 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
if fsGroup != nil {
clientPod.Spec.SecurityContext.FSGroup = fsGroup
}
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.prefix, "volume", i)
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/opt/%d", i),
})
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
Name: volumeName,
VolumeSource: test.volume,
})
}
clientPod, err := podsNamespacer.Create(clientPod)
if err != nil {
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
@ -254,13 +266,15 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod))
By("Checking that text file contents are perfect.")
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", "/opt/index.html"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")
for i, test := range tests {
fileName := fmt.Sprintf("/opt/%d/%s", i, test.file)
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", fileName}, test.expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
}
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
}
}
@ -383,15 +397,21 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
volume := v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
tests := []VolumeTest{
{
volume: v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
},
file: "index.html",
// Must match content of test/images/volumes-tester/nfs/index.html
expectedContent: "Hello from NFS!",
},
}
// Must match content of test/images/volumes-tester/nfs/index.html
testVolumeClient(cs, config, volume, nil, "Hello from NFS!")
testVolumeClient(cs, config, nil, tests)
})
})
@ -456,16 +476,22 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
}
volume := v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
tests := []VolumeTest{
{
volume: v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
},
file: "index.html",
// Must match content of test/images/volumes-tester/gluster/index.html
expectedContent: "Hello from GlusterFS!",
},
}
// Must match content of test/images/volumes-tester/gluster/index.html
testVolumeClient(cs, config, volume, nil, "Hello from GlusterFS!")
testVolumeClient(cs, config, nil, tests)
})
})
@ -485,7 +511,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
prefix: "iscsi",
serverImage: "gcr.io/google_containers/volume-iscsi:0.1",
serverPorts: []int{3260},
volumes: map[string]string{
serverVolumes: map[string]string{
// iSCSI container needs to insert modules from the host
"/lib/modules": "/lib/modules",
},
@ -500,19 +526,24 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
serverIP := pod.Status.PodIP
framework.Logf("iSCSI server IP address: %v", serverIP)
volume := v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
tests := []VolumeTest{
{
volume: v1.VolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
Lun: 0,
FSType: "ext2",
},
},
file: "index.html",
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
expectedContent: "Hello from iSCSI",
},
}
fsGroup := int64(1234)
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
testVolumeClient(cs, config, volume, &fsGroup, "Hello from iSCSI")
testVolumeClient(cs, config, &fsGroup, tests)
})
})
@ -527,7 +558,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
prefix: "rbd",
serverImage: "gcr.io/google_containers/volume-rbd:0.1",
serverPorts: []int{6789},
volumes: map[string]string{
serverVolumes: map[string]string{
// iSCSI container needs to insert modules from the host
"/lib/modules": "/lib/modules",
"/sys": "/sys",
@ -571,23 +602,27 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
}
volume := v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: config.prefix + "-secret",
tests := []VolumeTest{
{
volume: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: config.prefix + "-secret",
},
FSType: "ext2",
},
},
FSType: "ext2",
file: "index.html",
// Must match content of test/images/volumes-tester/rbd/create_block.sh
expectedContent: "Hello from RBD",
},
}
fsGroup := int64(1234)
// Must match content of test/images/volumes-tester/gluster/index.html
testVolumeClient(cs, config, volume, &fsGroup, "Hello from RBD")
testVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
@ -644,16 +679,22 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
volume := v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: config.prefix + "-secret"},
ReadOnly: true,
tests := []VolumeTest{
{
volume: v1.VolumeSource{
CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
SecretRef: &v1.LocalObjectReference{Name: config.prefix + "-secret"},
ReadOnly: true,
},
},
file: "index.html",
// Must match content of test/images/volumes-tester/ceph/index.html
expectedContent: "Hello Ceph!",
},
}
// Must match content of contrib/for-tests/volumes-ceph/ceph/index.html
testVolumeClient(cs, config, volume, nil, "Hello Ceph!")
testVolumeClient(cs, config, nil, tests)
})
})
@ -714,21 +755,27 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
volumeTestCleanup(f, config)
}
}()
volume := v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
tests := []VolumeTest{
{
volume: v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
},
},
file: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
expectedContent: "Hello from Cinder from namespace " + volumeName,
},
}
// Insert index.html into the test volume with some random content
// to make sure we don't see the content from previous test runs.
content := "Hello from Cinder from namespace " + volumeName
injectHtml(cs, config, volume, content)
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
fsGroup := int64(1234)
testVolumeClient(cs, config, volume, &fsGroup, content)
testVolumeClient(cs, config, &fsGroup, tests)
})
})
@ -758,21 +805,105 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
volumeTestCleanup(f, config)
}
}()
volume := v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: "ext3",
ReadOnly: false,
tests := []VolumeTest{
{
volume: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: "ext3",
ReadOnly: false,
},
},
file: "index.html",
// Randomize index.html to make sure we don't see the
// content from previous test runs.
expectedContent: "Hello from GCE from namespace " + volumeName,
},
}
// Insert index.html into the test volume with some random content
// to make sure we don't see the content from previous test runs.
content := "Hello from GCE PD from namespace " + volumeName
injectHtml(cs, config, volume, content)
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
fsGroup := int64(1234)
testVolumeClient(cs, config, volume, &fsGroup, content)
testVolumeClient(cs, config, &fsGroup, tests)
})
})
////////////////////////////////////////////////////////////////////////
// ConfigMap
////////////////////////////////////////////////////////////////////////
framework.KubeDescribe("ConfigMap", func() {
It("should be mountable", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "configmap",
}
defer func() {
if clean {
volumeTestCleanup(f, config)
}
}()
configMap := &v1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-map",
},
Data: map[string]string{
"first": "this is the first file",
"second": "this is the second file",
"third": "this is the third file",
},
}
if _, err := cs.Core().ConfigMaps(namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configmap: %v", err)
}
defer func() {
_ = cs.Core().ConfigMaps(namespace.Name).Delete(configMap.Name, nil)
}()
// Test one ConfigMap mounted several times to test #28502
tests := []VolumeTest{
{
volume: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.prefix + "-map",
},
Items: []v1.KeyToPath{
{
Key: "first",
Path: "firstfile",
},
},
},
},
file: "firstfile",
expectedContent: "this is the first file",
},
{
volume: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: config.prefix + "-map",
},
Items: []v1.KeyToPath{
{
Key: "second",
Path: "secondfile",
},
},
},
},
file: "secondfile",
expectedContent: "this is the second file",
},
}
testVolumeClient(cs, config, nil, tests)
})
})
})