mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Extract volume test utils into framework.
Moved remaining util functions moved cinder specific function back to volumes.go, will have to be extracted later when a cinder e2e package is created. remove dupe code from common/volume.go Moved [Volume] tags to KubeDescribe
This commit is contained in:
parent
879f8e0c9c
commit
cef9edcdee
@ -51,7 +51,6 @@ go_library(
|
|||||||
"//vendor:github.com/onsi/ginkgo",
|
"//vendor:github.com/onsi/ginkgo",
|
||||||
"//vendor:github.com/onsi/gomega",
|
"//vendor:github.com/onsi/gomega",
|
||||||
"//vendor:golang.org/x/net/websocket",
|
"//vendor:golang.org/x/net/websocket",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||||
|
@ -40,307 +40,14 @@ limitations under the License.
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Configuration of one tests. The test consist of:
|
|
||||||
// - server pod - runs serverImage, exports ports[]
|
|
||||||
// - client pod - does not need any special configuration
|
|
||||||
type VolumeTestConfig struct {
|
|
||||||
namespace string
|
|
||||||
// Prefix of all pods. Typically the test name.
|
|
||||||
prefix string
|
|
||||||
// Name of container image for the server pod.
|
|
||||||
serverImage string
|
|
||||||
// Ports to export from the server pod. TCP only.
|
|
||||||
serverPorts []int
|
|
||||||
// Arguments to pass to the container image.
|
|
||||||
serverArgs []string
|
|
||||||
// Volumes needed to be mounted to the server container from the host
|
|
||||||
// map <host (source) path> -> <container (dst.) path>
|
|
||||||
volumes map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Starts a container specified by config.serverImage and exports all
|
|
||||||
// config.serverPorts from it. The returned pod should be used to get the server
|
|
||||||
// IP address and create appropriate VolumeSource.
|
|
||||||
func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
|
|
||||||
podClient := f.PodClient()
|
|
||||||
|
|
||||||
portCount := len(config.serverPorts)
|
|
||||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
|
||||||
|
|
||||||
for i := 0; i < portCount; i++ {
|
|
||||||
portName := fmt.Sprintf("%s-%d", config.prefix, i)
|
|
||||||
|
|
||||||
serverPodPorts[i] = v1.ContainerPort{
|
|
||||||
Name: portName,
|
|
||||||
ContainerPort: int32(config.serverPorts[i]),
|
|
||||||
Protocol: v1.ProtocolTCP,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeCount := len(config.volumes)
|
|
||||||
volumes := make([]v1.Volume, volumeCount)
|
|
||||||
mounts := make([]v1.VolumeMount, volumeCount)
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for src, dst := range config.volumes {
|
|
||||||
mountName := fmt.Sprintf("path%d", i)
|
|
||||||
volumes[i].Name = mountName
|
|
||||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
|
||||||
Path: src,
|
|
||||||
}
|
|
||||||
|
|
||||||
mounts[i].Name = mountName
|
|
||||||
mounts[i].ReadOnly = false
|
|
||||||
mounts[i].MountPath = dst
|
|
||||||
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
By(fmt.Sprint("creating ", config.prefix, " server pod"))
|
|
||||||
privileged := new(bool)
|
|
||||||
*privileged = true
|
|
||||||
serverPod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.prefix + "-server",
|
|
||||||
Labels: map[string]string{
|
|
||||||
"role": config.prefix + "-server",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-server",
|
|
||||||
Image: config.serverImage,
|
|
||||||
SecurityContext: &v1.SecurityContext{
|
|
||||||
Privileged: privileged,
|
|
||||||
},
|
|
||||||
Args: config.serverArgs,
|
|
||||||
Ports: serverPodPorts,
|
|
||||||
VolumeMounts: mounts,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Volumes: volumes,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
serverPod = podClient.CreateSync(serverPod)
|
|
||||||
|
|
||||||
By("locating the server pod")
|
|
||||||
pod, err := podClient.Get(serverPod.Name, metav1.GetOptions{})
|
|
||||||
framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
|
|
||||||
|
|
||||||
By("sleeping a bit to give the server time to start")
|
|
||||||
time.Sleep(20 * time.Second)
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean both server and client pods.
|
|
||||||
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
|
|
||||||
By(fmt.Sprint("cleaning the environment after ", config.prefix))
|
|
||||||
|
|
||||||
defer GinkgoRecover()
|
|
||||||
|
|
||||||
podClient := f.PodClient()
|
|
||||||
|
|
||||||
err := podClient.Delete(config.prefix+"-client", nil)
|
|
||||||
if err != nil {
|
|
||||||
// Log the error before failing test: if the test has already failed,
|
|
||||||
// framework.ExpectNoError() won't print anything to logs!
|
|
||||||
glog.Warningf("Failed to delete client pod: %v", err)
|
|
||||||
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.serverImage != "" {
|
|
||||||
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
|
||||||
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
|
||||||
}
|
|
||||||
// See issue #24100.
|
|
||||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
|
||||||
By("sleeping a bit so client can stop and unmount")
|
|
||||||
time.Sleep(20 * time.Second)
|
|
||||||
|
|
||||||
err = podClient.Delete(config.prefix+"-server", nil)
|
|
||||||
if err != nil {
|
|
||||||
glog.Warningf("Failed to delete server pod: %v", err)
|
|
||||||
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
|
||||||
// and check that the pod sees the data from the server pod.
|
|
||||||
func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
|
|
||||||
By(fmt.Sprint("starting ", config.prefix, " client"))
|
|
||||||
clientPod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.prefix + "-client",
|
|
||||||
Labels: map[string]string{
|
|
||||||
"role": config.prefix + "-client",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-client",
|
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
|
||||||
WorkingDir: "/opt",
|
|
||||||
// An imperative and easily debuggable container which reads vol contents for
|
|
||||||
// us to scan in the tests or by eye.
|
|
||||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
|
||||||
Command: []string{
|
|
||||||
"/bin/sh",
|
|
||||||
"-c",
|
|
||||||
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-volume",
|
|
||||||
MountPath: "/opt/",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &v1.PodSecurityContext{
|
|
||||||
SELinuxOptions: &v1.SELinuxOptions{
|
|
||||||
Level: "s0:c0,c1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-volume",
|
|
||||||
VolumeSource: volume,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
podClient := f.PodClient()
|
|
||||||
|
|
||||||
if fsGroup != nil {
|
|
||||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
|
||||||
}
|
|
||||||
clientPod = podClient.CreateSync(clientPod)
|
|
||||||
|
|
||||||
By("Checking that text file contents are perfect.")
|
|
||||||
result := f.ExecCommandInPod(clientPod.Name, "cat", "/opt/index.html")
|
|
||||||
var err error
|
|
||||||
if !strings.Contains(result, expectedContent) {
|
|
||||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedContent, result)
|
|
||||||
}
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")
|
|
||||||
|
|
||||||
if fsGroup != nil {
|
|
||||||
|
|
||||||
By("Checking fsGroup is correct.")
|
|
||||||
_, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert index.html with given content into given volume. It does so by
|
|
||||||
// starting and auxiliary pod which writes the file there.
|
|
||||||
// The volume must be writable.
|
|
||||||
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
|
||||||
By(fmt.Sprint("starting ", config.prefix, " injector"))
|
|
||||||
podClient := client.Core().Pods(config.namespace)
|
|
||||||
|
|
||||||
injectPod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.prefix + "-injector",
|
|
||||||
Labels: map[string]string{
|
|
||||||
"role": config.prefix + "-injector",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-injector",
|
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
|
||||||
Command: []string{"/bin/sh"},
|
|
||||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-volume",
|
|
||||||
MountPath: "/mnt",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &v1.PodSecurityContext{
|
|
||||||
SELinuxOptions: &v1.SELinuxOptions{
|
|
||||||
Level: "s0:c0,c1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-volume",
|
|
||||||
VolumeSource: volume,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
podClient.Delete(config.prefix+"-injector", nil)
|
|
||||||
}()
|
|
||||||
|
|
||||||
injectPod, err := podClient.Create(injectPod)
|
|
||||||
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
|
|
||||||
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteCinderVolume(name string) error {
|
|
||||||
// Try to delete the volume for several seconds - it takes
|
|
||||||
// a while for the plugin to detach it.
|
|
||||||
var output []byte
|
|
||||||
var err error
|
|
||||||
timeout := time.Second * 120
|
|
||||||
|
|
||||||
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
|
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
|
||||||
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
|
|
||||||
if err == nil {
|
|
||||||
framework.Logf("Cinder volume %s deleted", name)
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
framework.Logf("Failed to delete volume %s: %v", name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// These tests need privileged containers, which are disabled by default. Run
|
// These tests need privileged containers, which are disabled by default. Run
|
||||||
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
|
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
|
||||||
var _ = framework.KubeDescribe("GCP Volumes", func() {
|
var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||||
@ -351,12 +58,11 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
clean := true
|
clean := true
|
||||||
// filled in BeforeEach
|
// filled in BeforeEach
|
||||||
var namespace *v1.Namespace
|
var namespace *v1.Namespace
|
||||||
|
var c clientset.Interface
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
if !isTestEnabled(f.ClientSet) {
|
|
||||||
framework.Skipf("NFS tests are not supported for this distro")
|
|
||||||
}
|
|
||||||
namespace = f.Namespace
|
namespace = f.Namespace
|
||||||
|
c = f.ClientSet
|
||||||
})
|
})
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -365,61 +71,75 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("NFSv4", func() {
|
framework.KubeDescribe("NFSv4", func() {
|
||||||
It("should be mountable for NFSv4 [Volume]", func() {
|
It("should be mountable for NFSv4 [Volume]", func() {
|
||||||
config := VolumeTestConfig{
|
|
||||||
namespace: namespace.Name,
|
config := framework.VolumeTestConfig{
|
||||||
prefix: "nfs",
|
Namespace: namespace.Name,
|
||||||
serverImage: "gcr.io/google_containers/volume-nfs:0.8",
|
Prefix: "nfs",
|
||||||
serverPorts: []int{2049},
|
ServerImage: framework.NfsServerImage,
|
||||||
|
ServerPorts: []int{2049},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(f, config)
|
|
||||||
|
pod := framework.StartVolumeServer(c, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("NFS server IP address: %v", serverIP)
|
framework.Logf("NFS server IP address: %v", serverIP)
|
||||||
|
|
||||||
volume := v1.VolumeSource{
|
tests := []framework.VolumeTest{
|
||||||
NFS: &v1.NFSVolumeSource{
|
{
|
||||||
Server: serverIP,
|
Volume: v1.VolumeSource{
|
||||||
Path: "/",
|
NFS: &v1.NFSVolumeSource{
|
||||||
ReadOnly: true,
|
Server: serverIP,
|
||||||
|
Path: "/",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
File: "index.html",
|
||||||
|
ExpectedContent: "Hello from NFS!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||||
testVolumeClient(f, config, volume, nil, "Hello from NFS!")
|
framework.TestVolumeClient(c, config, nil, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.KubeDescribe("NFSv3", func() {
|
framework.KubeDescribe("NFSv3", func() {
|
||||||
It("should be mountable for NFSv3 [Volume]", func() {
|
It("should be mountable for NFSv3 [Volume]", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "nfs",
|
Prefix: "nfs",
|
||||||
serverImage: "gcr.io/google_containers/volume-nfs:0.8",
|
ServerImage: framework.NfsServerImage,
|
||||||
serverPorts: []int{2049},
|
ServerPorts: []int{2049},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(f, config)
|
pod := framework.StartVolumeServer(c, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("NFS server IP address: %v", serverIP)
|
framework.Logf("NFS server IP address: %v", serverIP)
|
||||||
|
tests := []framework.VolumeTest{
|
||||||
volume := v1.VolumeSource{
|
{
|
||||||
NFS: &v1.NFSVolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
Server: serverIP,
|
NFS: &v1.NFSVolumeSource{
|
||||||
Path: "/exports",
|
Server: serverIP,
|
||||||
ReadOnly: true,
|
Path: "/",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
File: "index.html",
|
||||||
|
ExpectedContent: "Hello from NFS!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Must match content of test/images/volume-tester/nfs/index.html
|
// Must match content of test/images/volume-tester/nfs/index.html
|
||||||
testVolumeClient(f, config, volume, nil, "Hello from NFS!")
|
framework.TestVolumeClient(c, config, nil, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -429,19 +149,19 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("GlusterFS", func() {
|
framework.KubeDescribe("GlusterFS", func() {
|
||||||
It("should be mountable [Volume]", func() {
|
It("should be mountable [Volume]", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "gluster",
|
Prefix: "gluster",
|
||||||
serverImage: "gcr.io/google_containers/volume-gluster:0.2",
|
ServerImage: framework.GlusterfsServerImage,
|
||||||
serverPorts: []int{24007, 24008, 49152},
|
ServerPorts: []int{24007, 24008, 49152},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(f, config)
|
pod := framework.StartVolumeServer(c, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("Gluster server IP address: %v", serverIP)
|
framework.Logf("Gluster server IP address: %v", serverIP)
|
||||||
|
|
||||||
@ -452,7 +172,7 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: config.prefix + "-server",
|
Name: config.Prefix + "-server",
|
||||||
},
|
},
|
||||||
Subsets: []v1.EndpointSubset{
|
Subsets: []v1.EndpointSubset{
|
||||||
{
|
{
|
||||||
@ -472,11 +192,11 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
endClient := f.ClientSet.Core().Endpoints(config.namespace)
|
endClient := f.ClientSet.CoreV1().Endpoints(config.Namespace)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
endClient.Delete(config.prefix+"-server", nil)
|
endClient.Delete(config.Prefix+"-server", nil)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -484,46 +204,22 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
|||||||
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
volume := v1.VolumeSource{
|
tests := []framework.VolumeTest{
|
||||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
{
|
||||||
EndpointsName: config.prefix + "-server",
|
Volume: v1.VolumeSource{
|
||||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||||
Path: "test_vol",
|
EndpointsName: config.Prefix + "-server",
|
||||||
ReadOnly: true,
|
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||||
|
Path: "test_vol",
|
||||||
|
ReadOnly: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
File: "index.html",
|
||||||
|
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||||
|
ExpectedContent: "Hello from GlusterFS!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
framework.TestVolumeClient(c, config, nil, tests)
|
||||||
testVolumeClient(f, config, volume, nil, "Hello from GlusterFS!")
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func isTestEnabled(c clientset.Interface) bool {
|
|
||||||
// Enable the test on node e2e if the node image is GCI.
|
|
||||||
nodeName := framework.TestContext.NodeName
|
|
||||||
if nodeName != "" {
|
|
||||||
if strings.Contains(nodeName, "-gci-dev-") {
|
|
||||||
gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
|
|
||||||
matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
|
|
||||||
if len(matches) == 2 {
|
|
||||||
version, err := strconv.Atoi(matches[1])
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Error parsing GCI version from NodeName %q: %v", nodeName, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return version >= 54
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// For cluster e2e test, because nodeName is empty, retrieve the node objects from api server
|
|
||||||
// and check their images. Only run NFS and GlusterFS tests if nodes are using GCI image for now.
|
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
if !strings.Contains(node.Status.NodeInfo.OSImage, "Container-Optimized OS") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
@ -35,6 +35,7 @@ go_library(
|
|||||||
"test_context.go",
|
"test_context.go",
|
||||||
"upgrade_util.go",
|
"upgrade_util.go",
|
||||||
"util.go",
|
"util.go",
|
||||||
|
"volume_util.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
|
358
test/e2e/framework/volume_util.go
Normal file
358
test/e2e/framework/volume_util.go
Normal file
@ -0,0 +1,358 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This test checks that various VolumeSources are working.
|
||||||
|
*
|
||||||
|
* There are two ways, how to test the volumes:
|
||||||
|
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||||
|
* The test creates a server pod, exporting simple 'index.html' file.
|
||||||
|
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||||
|
* and checks that the pod can see the file. It does so by importing the file
|
||||||
|
* into web server root and loadind the index.html from it.
|
||||||
|
*
|
||||||
|
* These tests work only when privileged containers are allowed, exporting
|
||||||
|
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||||
|
* other privileged magic in the server pod.
|
||||||
|
*
|
||||||
|
* Note that the server containers are for testing purposes only and should not
|
||||||
|
* be used in production.
|
||||||
|
*
|
||||||
|
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||||
|
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||||
|
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||||
|
* and checks, that Kubernetes can use it as a volume.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
|
||||||
|
const (
|
||||||
|
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
|
||||||
|
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
|
||||||
|
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
|
||||||
|
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
|
||||||
|
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
|
||||||
|
BusyBoxImage string = "gcr.io/google_containers/busybox:1.24"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Configuration of one tests. The test consist of:
|
||||||
|
// - server pod - runs serverImage, exports ports[]
|
||||||
|
// - client pod - does not need any special configuration
|
||||||
|
type VolumeTestConfig struct {
|
||||||
|
Namespace string
|
||||||
|
// Prefix of all pods. Typically the test name.
|
||||||
|
Prefix string
|
||||||
|
// Name of container image for the server pod.
|
||||||
|
ServerImage string
|
||||||
|
// Ports to export from the server pod. TCP only.
|
||||||
|
ServerPorts []int
|
||||||
|
// Arguments to pass to the container image.
|
||||||
|
ServerArgs []string
|
||||||
|
// Volumes needed to be mounted to the server container from the host
|
||||||
|
// map <host (source) path> -> <container (dst.) path>
|
||||||
|
ServerVolumes map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeTest contains a volume to mount into a client pod and its
|
||||||
|
// expected content.
|
||||||
|
type VolumeTest struct {
|
||||||
|
Volume v1.VolumeSource
|
||||||
|
File string
|
||||||
|
ExpectedContent string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Starts a container specified by config.serverImage and exports all
|
||||||
|
// config.serverPorts from it. The returned pod should be used to get the server
|
||||||
|
// IP address and create appropriate VolumeSource.
|
||||||
|
func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
|
||||||
|
podClient := client.CoreV1().Pods(config.Namespace)
|
||||||
|
|
||||||
|
portCount := len(config.ServerPorts)
|
||||||
|
serverPodPorts := make([]v1.ContainerPort, portCount)
|
||||||
|
|
||||||
|
for i := 0; i < portCount; i++ {
|
||||||
|
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
|
||||||
|
|
||||||
|
serverPodPorts[i] = v1.ContainerPort{
|
||||||
|
Name: portName,
|
||||||
|
ContainerPort: int32(config.ServerPorts[i]),
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeCount := len(config.ServerVolumes)
|
||||||
|
volumes := make([]v1.Volume, volumeCount)
|
||||||
|
mounts := make([]v1.VolumeMount, volumeCount)
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for src, dst := range config.ServerVolumes {
|
||||||
|
mountName := fmt.Sprintf("path%d", i)
|
||||||
|
volumes[i].Name = mountName
|
||||||
|
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||||
|
Path: src,
|
||||||
|
}
|
||||||
|
|
||||||
|
mounts[i].Name = mountName
|
||||||
|
mounts[i].ReadOnly = false
|
||||||
|
mounts[i].MountPath = dst
|
||||||
|
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
|
||||||
|
By(fmt.Sprint("creating ", serverPodName, " pod"))
|
||||||
|
privileged := new(bool)
|
||||||
|
*privileged = true
|
||||||
|
serverPod := &v1.Pod{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: serverPodName,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"role": serverPodName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: serverPodName,
|
||||||
|
Image: config.ServerImage,
|
||||||
|
SecurityContext: &v1.SecurityContext{
|
||||||
|
Privileged: privileged,
|
||||||
|
},
|
||||||
|
Args: config.ServerArgs,
|
||||||
|
Ports: serverPodPorts,
|
||||||
|
VolumeMounts: mounts,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Volumes: volumes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var pod *v1.Pod
|
||||||
|
serverPod, err := podClient.Create(serverPod)
|
||||||
|
// ok if the server pod already exists. TODO: make this controllable by callers
|
||||||
|
if err != nil {
|
||||||
|
if apierrs.IsAlreadyExists(err) {
|
||||||
|
Logf("Ignore \"already-exists\" error, re-get pod...")
|
||||||
|
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
||||||
|
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||||
|
ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
|
||||||
|
pod = serverPod
|
||||||
|
} else {
|
||||||
|
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
|
||||||
|
|
||||||
|
if pod == nil {
|
||||||
|
By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||||
|
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||||
|
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pod
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean both server and client pods.
|
||||||
|
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||||
|
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||||
|
|
||||||
|
defer GinkgoRecover()
|
||||||
|
|
||||||
|
client := f.ClientSet
|
||||||
|
podClient := client.CoreV1().Pods(config.Namespace)
|
||||||
|
|
||||||
|
err := podClient.Delete(config.Prefix+"-client", nil)
|
||||||
|
if err != nil {
|
||||||
|
// Log the error before failing test: if the test has already failed,
|
||||||
|
// framework.ExpectNoError() won't print anything to logs!
|
||||||
|
glog.Warningf("Failed to delete client pod: %v", err)
|
||||||
|
ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ServerImage != "" {
|
||||||
|
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||||
|
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||||
|
}
|
||||||
|
// See issue #24100.
|
||||||
|
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||||
|
By("sleeping a bit so client can stop and unmount")
|
||||||
|
time.Sleep(20 * time.Second)
|
||||||
|
|
||||||
|
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to delete server pod: %v", err)
|
||||||
|
ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
||||||
|
// and check that the pod sees expected data, e.g. from the server pod.
|
||||||
|
// Multiple VolumeTests can be specified to mount multiple volumes to a single
|
||||||
|
// pod.
|
||||||
|
func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, tests []VolumeTest) {
|
||||||
|
By(fmt.Sprint("starting ", config.Prefix, " client"))
|
||||||
|
clientPod := &v1.Pod{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: config.Prefix + "-client",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"role": config.Prefix + "-client",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: config.Prefix + "-client",
|
||||||
|
Image: BusyBoxImage,
|
||||||
|
WorkingDir: "/opt",
|
||||||
|
// An imperative and easily debuggable container which reads vol contents for
|
||||||
|
// us to scan in the tests or by eye.
|
||||||
|
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||||
|
Command: []string{
|
||||||
|
"/bin/sh",
|
||||||
|
"-c",
|
||||||
|
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
||||||
|
},
|
||||||
|
VolumeMounts: []v1.VolumeMount{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SecurityContext: &v1.PodSecurityContext{
|
||||||
|
SELinuxOptions: &v1.SELinuxOptions{
|
||||||
|
Level: "s0:c0,c1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Volumes: []v1.Volume{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||||
|
|
||||||
|
if fsGroup != nil {
|
||||||
|
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||||
|
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||||
|
Name: volumeName,
|
||||||
|
MountPath: fmt.Sprintf("/opt/%d", i),
|
||||||
|
})
|
||||||
|
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
|
||||||
|
Name: volumeName,
|
||||||
|
VolumeSource: test.Volume,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
clientPod, err := podsNamespacer.Create(clientPod)
|
||||||
|
if err != nil {
|
||||||
|
Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||||
|
}
|
||||||
|
ExpectNoError(WaitForPodRunningInNamespace(client, clientPod))
|
||||||
|
|
||||||
|
By("Checking that text file contents are perfect.")
|
||||||
|
for i, test := range tests {
|
||||||
|
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||||
|
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fsGroup != nil {
|
||||||
|
By("Checking fsGroup is correct.")
|
||||||
|
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert index.html with given content into given volume. It does so by
|
||||||
|
// starting and auxiliary pod which writes the file there.
|
||||||
|
// The volume must be writable.
|
||||||
|
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||||
|
By(fmt.Sprint("starting ", config.Prefix, " injector"))
|
||||||
|
podClient := client.CoreV1().Pods(config.Namespace)
|
||||||
|
|
||||||
|
injectPod := &v1.Pod{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: config.Prefix + "-injector",
|
||||||
|
Labels: map[string]string{
|
||||||
|
"role": config.Prefix + "-injector",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: config.Prefix + "-injector",
|
||||||
|
Image: "gcr.io/google_containers/busybox:1.24",
|
||||||
|
Command: []string{"/bin/sh"},
|
||||||
|
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: config.Prefix + "-volume",
|
||||||
|
MountPath: "/mnt",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SecurityContext: &v1.PodSecurityContext{
|
||||||
|
SELinuxOptions: &v1.SELinuxOptions{
|
||||||
|
Level: "s0:c0,c1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: config.Prefix + "-volume",
|
||||||
|
VolumeSource: volume,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
podClient.Delete(config.Prefix+"-injector", nil)
|
||||||
|
}()
|
||||||
|
|
||||||
|
injectPod, err := podClient.Create(injectPod)
|
||||||
|
ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||||
|
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
@ -134,9 +134,9 @@ func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRem
|
|||||||
// ip address.
|
// ip address.
|
||||||
// Note: startVolumeServer() waits for the nfs-server pod to be Running and sleeps some
|
// Note: startVolumeServer() waits for the nfs-server pod to be Running and sleeps some
|
||||||
// so that the nfs server can start up.
|
// so that the nfs server can start up.
|
||||||
func createNfsServerPod(c clientset.Interface, config VolumeTestConfig) (*v1.Pod, string) {
|
func createNfsServerPod(c clientset.Interface, config framework.VolumeTestConfig) (*v1.Pod, string) {
|
||||||
|
|
||||||
pod := startVolumeServer(c, config)
|
pod := framework.StartVolumeServer(c, config)
|
||||||
Expect(pod).NotTo(BeNil())
|
Expect(pod).NotTo(BeNil())
|
||||||
ip := pod.Status.PodIP
|
ip := pod.Status.PodIP
|
||||||
Expect(len(ip)).NotTo(BeZero())
|
Expect(len(ip)).NotTo(BeZero())
|
||||||
@ -387,7 +387,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||||||
var (
|
var (
|
||||||
nfsServerPod *v1.Pod
|
nfsServerPod *v1.Pod
|
||||||
nfsIP string
|
nfsIP string
|
||||||
NFSconfig VolumeTestConfig
|
NFSconfig framework.VolumeTestConfig
|
||||||
pod *v1.Pod // client pod
|
pod *v1.Pod // client pod
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -404,12 +404,12 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
NFSconfig = VolumeTestConfig{
|
NFSconfig = framework.VolumeTestConfig{
|
||||||
namespace: ns,
|
Namespace: ns,
|
||||||
prefix: "nfs",
|
Prefix: "nfs",
|
||||||
serverImage: NfsServerImage,
|
ServerImage: framework.NfsServerImage,
|
||||||
serverPorts: []int{2049},
|
ServerPorts: []int{2049},
|
||||||
serverArgs: []string{"-G", "777", "/exports"},
|
ServerArgs: []string{"-G", "777", "/exports"},
|
||||||
}
|
}
|
||||||
nfsServerPod, nfsIP = createNfsServerPod(c, NFSconfig)
|
nfsServerPod, nfsIP = createNfsServerPod(c, NFSconfig)
|
||||||
})
|
})
|
||||||
|
@ -14,10 +14,6 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// This test references
|
|
||||||
// persistent_volumes.go
|
|
||||||
// volumes.go
|
|
||||||
|
|
||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -77,12 +77,12 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
|
|||||||
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
|
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
|
||||||
// commonly used by persistent volume testing
|
// commonly used by persistent volume testing
|
||||||
func initNFSserverPod(c clientset.Interface, ns string) *v1.Pod {
|
func initNFSserverPod(c clientset.Interface, ns string) *v1.Pod {
|
||||||
return startVolumeServer(c, VolumeTestConfig{
|
return framework.StartVolumeServer(c, framework.VolumeTestConfig{
|
||||||
namespace: ns,
|
Namespace: ns,
|
||||||
prefix: "nfs",
|
Prefix: "nfs",
|
||||||
serverImage: NfsServerImage,
|
ServerImage: framework.NfsServerImage,
|
||||||
serverPorts: []int{2049},
|
ServerPorts: []int{2049},
|
||||||
serverArgs: []string{"-G", "777", "/exports"},
|
ServerArgs: []string{"-G", "777", "/exports"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,327 +40,21 @@ limitations under the License.
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||||
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Configuration of one tests. The test consist of:
|
func DeleteCinderVolume(name string) error {
|
||||||
// - server pod - runs serverImage, exports ports[]
|
|
||||||
// - client pod - does not need any special configuration
|
|
||||||
type VolumeTestConfig struct {
|
|
||||||
namespace string
|
|
||||||
// Prefix of all pods. Typically the test name.
|
|
||||||
prefix string
|
|
||||||
// Name of container image for the server pod.
|
|
||||||
serverImage string
|
|
||||||
// Ports to export from the server pod. TCP only.
|
|
||||||
serverPorts []int
|
|
||||||
// Arguments to pass to the container image.
|
|
||||||
serverArgs []string
|
|
||||||
// Volumes needed to be mounted to the server container from the host
|
|
||||||
// map <host (source) path> -> <container (dst.) path>
|
|
||||||
serverVolumes map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumeTest contains a volumes to mount into a client pod and its
|
|
||||||
// expected content.
|
|
||||||
type VolumeTest struct {
|
|
||||||
volume v1.VolumeSource
|
|
||||||
file string
|
|
||||||
expectedContent string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
|
|
||||||
const (
|
|
||||||
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
|
|
||||||
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
|
|
||||||
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
|
|
||||||
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
|
|
||||||
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Starts a container specified by config.serverImage and exports all
|
|
||||||
// config.serverPorts from it. The returned pod should be used to get the server
|
|
||||||
// IP address and create appropriate VolumeSource.
|
|
||||||
func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
|
|
||||||
podClient := client.Core().Pods(config.namespace)
|
|
||||||
|
|
||||||
portCount := len(config.serverPorts)
|
|
||||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
|
||||||
|
|
||||||
for i := 0; i < portCount; i++ {
|
|
||||||
portName := fmt.Sprintf("%s-%d", config.prefix, i)
|
|
||||||
|
|
||||||
serverPodPorts[i] = v1.ContainerPort{
|
|
||||||
Name: portName,
|
|
||||||
ContainerPort: int32(config.serverPorts[i]),
|
|
||||||
Protocol: v1.ProtocolTCP,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeCount := len(config.serverVolumes)
|
|
||||||
volumes := make([]v1.Volume, volumeCount)
|
|
||||||
mounts := make([]v1.VolumeMount, volumeCount)
|
|
||||||
|
|
||||||
i := 0
|
|
||||||
for src, dst := range config.serverVolumes {
|
|
||||||
mountName := fmt.Sprintf("path%d", i)
|
|
||||||
volumes[i].Name = mountName
|
|
||||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
|
||||||
Path: src,
|
|
||||||
}
|
|
||||||
|
|
||||||
mounts[i].Name = mountName
|
|
||||||
mounts[i].ReadOnly = false
|
|
||||||
mounts[i].MountPath = dst
|
|
||||||
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
serverPodName := fmt.Sprintf("%s-server", config.prefix)
|
|
||||||
By(fmt.Sprint("creating ", serverPodName, " pod"))
|
|
||||||
privileged := new(bool)
|
|
||||||
*privileged = true
|
|
||||||
serverPod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: serverPodName,
|
|
||||||
Labels: map[string]string{
|
|
||||||
"role": serverPodName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: serverPodName,
|
|
||||||
Image: config.serverImage,
|
|
||||||
SecurityContext: &v1.SecurityContext{
|
|
||||||
Privileged: privileged,
|
|
||||||
},
|
|
||||||
Args: config.serverArgs,
|
|
||||||
Ports: serverPodPorts,
|
|
||||||
VolumeMounts: mounts,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Volumes: volumes,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var pod *v1.Pod
|
|
||||||
serverPod, err := podClient.Create(serverPod)
|
|
||||||
// ok if the server pod already exists. TODO: make this controllable by callers
|
|
||||||
if err != nil {
|
|
||||||
if apierrs.IsAlreadyExists(err) {
|
|
||||||
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
|
|
||||||
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
|
||||||
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
|
||||||
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
|
|
||||||
pod = serverPod
|
|
||||||
} else {
|
|
||||||
framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod))
|
|
||||||
|
|
||||||
if pod == nil {
|
|
||||||
By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
|
||||||
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
|
||||||
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean both server and client pods.
|
|
||||||
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
|
|
||||||
By(fmt.Sprint("cleaning the environment after ", config.prefix))
|
|
||||||
|
|
||||||
defer GinkgoRecover()
|
|
||||||
|
|
||||||
client := f.ClientSet
|
|
||||||
podClient := client.Core().Pods(config.namespace)
|
|
||||||
|
|
||||||
err := podClient.Delete(config.prefix+"-client", nil)
|
|
||||||
if err != nil {
|
|
||||||
// Log the error before failing test: if the test has already failed,
|
|
||||||
// framework.ExpectNoError() won't print anything to logs!
|
|
||||||
glog.Warningf("Failed to delete client pod: %v", err)
|
|
||||||
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.serverImage != "" {
|
|
||||||
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
|
||||||
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
|
||||||
}
|
|
||||||
// See issue #24100.
|
|
||||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
|
||||||
By("sleeping a bit so client can stop and unmount")
|
|
||||||
time.Sleep(20 * time.Second)
|
|
||||||
|
|
||||||
err = podClient.Delete(config.prefix+"-server", nil)
|
|
||||||
if err != nil {
|
|
||||||
glog.Warningf("Failed to delete server pod: %v", err)
|
|
||||||
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
|
||||||
// and check that the pod sees expected data, e.g. from the server pod.
|
|
||||||
// Multiple VolumeTests can be specified to mount multiple volumes to a single
|
|
||||||
// pod.
|
|
||||||
func testVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, tests []VolumeTest) {
|
|
||||||
By(fmt.Sprint("starting ", config.prefix, " client"))
|
|
||||||
clientPod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.prefix + "-client",
|
|
||||||
Labels: map[string]string{
|
|
||||||
"role": config.prefix + "-client",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-client",
|
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
|
||||||
WorkingDir: "/opt",
|
|
||||||
// An imperative and easily debuggable container which reads vol contents for
|
|
||||||
// us to scan in the tests or by eye.
|
|
||||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
|
||||||
Command: []string{
|
|
||||||
"/bin/sh",
|
|
||||||
"-c",
|
|
||||||
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &v1.PodSecurityContext{
|
|
||||||
SELinuxOptions: &v1.SELinuxOptions{
|
|
||||||
Level: "s0:c0,c1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Volumes: []v1.Volume{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
podsNamespacer := client.Core().Pods(config.namespace)
|
|
||||||
|
|
||||||
if fsGroup != nil {
|
|
||||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, test := range tests {
|
|
||||||
volumeName := fmt.Sprintf("%s-%s-%d", config.prefix, "volume", i)
|
|
||||||
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
|
||||||
Name: volumeName,
|
|
||||||
MountPath: fmt.Sprintf("/opt/%d", i),
|
|
||||||
})
|
|
||||||
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
|
|
||||||
Name: volumeName,
|
|
||||||
VolumeSource: test.volume,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
clientPod, err := podsNamespacer.Create(clientPod)
|
|
||||||
if err != nil {
|
|
||||||
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
|
||||||
}
|
|
||||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod))
|
|
||||||
|
|
||||||
By("Checking that text file contents are perfect.")
|
|
||||||
for i, test := range tests {
|
|
||||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.file)
|
|
||||||
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", fileName}, test.expectedContent, time.Minute)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fsGroup != nil {
|
|
||||||
By("Checking fsGroup is correct.")
|
|
||||||
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert index.html with given content into given volume. It does so by
|
|
||||||
// starting and auxiliary pod which writes the file there.
|
|
||||||
// The volume must be writable.
|
|
||||||
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
|
||||||
By(fmt.Sprint("starting ", config.prefix, " injector"))
|
|
||||||
podClient := client.Core().Pods(config.namespace)
|
|
||||||
|
|
||||||
injectPod := &v1.Pod{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "Pod",
|
|
||||||
APIVersion: "v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: config.prefix + "-injector",
|
|
||||||
Labels: map[string]string{
|
|
||||||
"role": config.prefix + "-injector",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-injector",
|
|
||||||
Image: "gcr.io/google_containers/busybox:1.24",
|
|
||||||
Command: []string{"/bin/sh"},
|
|
||||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-volume",
|
|
||||||
MountPath: "/mnt",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &v1.PodSecurityContext{
|
|
||||||
SELinuxOptions: &v1.SELinuxOptions{
|
|
||||||
Level: "s0:c0,c1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: config.prefix + "-volume",
|
|
||||||
VolumeSource: volume,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
podClient.Delete(config.prefix+"-injector", nil)
|
|
||||||
}()
|
|
||||||
|
|
||||||
injectPod, err := podClient.Create(injectPod)
|
|
||||||
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
|
|
||||||
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteCinderVolume(name string) error {
|
|
||||||
// Try to delete the volume for several seconds - it takes
|
// Try to delete the volume for several seconds - it takes
|
||||||
// a while for the plugin to detach it.
|
// a while for the plugin to detach it.
|
||||||
var output []byte
|
var output []byte
|
||||||
@ -382,7 +76,7 @@ func deleteCinderVolume(name string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// These tests need privileged containers, which are disabled by default.
|
// These tests need privileged containers, which are disabled by default.
|
||||||
var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
var _ = framework.KubeDescribe("Volumes [Volume][Volume]", func() {
|
||||||
f := framework.NewDefaultFramework("volume")
|
f := framework.NewDefaultFramework("volume")
|
||||||
|
|
||||||
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
|
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
|
||||||
@ -403,37 +97,37 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("NFS", func() {
|
framework.KubeDescribe("NFS", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "nfs",
|
Prefix: "nfs",
|
||||||
serverImage: NfsServerImage,
|
ServerImage: framework.NfsServerImage,
|
||||||
serverPorts: []int{2049},
|
ServerPorts: []int{2049},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(cs, config)
|
pod := framework.StartVolumeServer(cs, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("NFS server IP address: %v", serverIP)
|
framework.Logf("NFS server IP address: %v", serverIP)
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
NFS: &v1.NFSVolumeSource{
|
NFS: &v1.NFSVolumeSource{
|
||||||
Server: serverIP,
|
Server: serverIP,
|
||||||
Path: "/",
|
Path: "/",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||||
expectedContent: "Hello from NFS!",
|
ExpectedContent: "Hello from NFS!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testVolumeClient(cs, config, nil, tests)
|
framework.TestVolumeClient(cs, config, nil, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -443,19 +137,19 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("GlusterFS [Feature:Volumes]", func() {
|
framework.KubeDescribe("GlusterFS [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "gluster",
|
Prefix: "gluster",
|
||||||
serverImage: GlusterfsServerImage,
|
ServerImage: framework.GlusterfsServerImage,
|
||||||
serverPorts: []int{24007, 24008, 49152},
|
ServerPorts: []int{24007, 24008, 49152},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(cs, config)
|
pod := framework.StartVolumeServer(cs, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("Gluster server IP address: %v", serverIP)
|
framework.Logf("Gluster server IP address: %v", serverIP)
|
||||||
|
|
||||||
@ -466,7 +160,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: config.prefix + "-server",
|
Name: config.Prefix + "-server",
|
||||||
},
|
},
|
||||||
Subsets: []v1.EndpointSubset{
|
Subsets: []v1.EndpointSubset{
|
||||||
{
|
{
|
||||||
@ -486,11 +180,11 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
endClient := cs.Core().Endpoints(config.namespace)
|
endClient := cs.Core().Endpoints(config.Namespace)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
endClient.Delete(config.prefix+"-server", nil)
|
endClient.Delete(config.Prefix+"-server", nil)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -498,22 +192,22 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||||
EndpointsName: config.prefix + "-server",
|
EndpointsName: config.Prefix + "-server",
|
||||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||||
Path: "test_vol",
|
Path: "test_vol",
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||||
expectedContent: "Hello from GlusterFS!",
|
ExpectedContent: "Hello from GlusterFS!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testVolumeClient(cs, config, nil, tests)
|
framework.TestVolumeClient(cs, config, nil, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -528,12 +222,12 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("iSCSI [Feature:Volumes]", func() {
|
framework.KubeDescribe("iSCSI [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "iscsi",
|
Prefix: "iscsi",
|
||||||
serverImage: IscsiServerImage,
|
ServerImage: framework.IscsiServerImage,
|
||||||
serverPorts: []int{3260},
|
ServerPorts: []int{3260},
|
||||||
serverVolumes: map[string]string{
|
ServerVolumes: map[string]string{
|
||||||
// iSCSI container needs to insert modules from the host
|
// iSCSI container needs to insert modules from the host
|
||||||
"/lib/modules": "/lib/modules",
|
"/lib/modules": "/lib/modules",
|
||||||
},
|
},
|
||||||
@ -541,16 +235,16 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(cs, config)
|
pod := framework.StartVolumeServer(cs, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("iSCSI server IP address: %v", serverIP)
|
framework.Logf("iSCSI server IP address: %v", serverIP)
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
ISCSI: &v1.ISCSIVolumeSource{
|
ISCSI: &v1.ISCSIVolumeSource{
|
||||||
TargetPortal: serverIP + ":3260",
|
TargetPortal: serverIP + ":3260",
|
||||||
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
|
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
|
||||||
@ -559,13 +253,13 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
FSType: "ext2",
|
FSType: "ext2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
|
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
|
||||||
expectedContent: "Hello from iSCSI",
|
ExpectedContent: "Hello from iSCSI",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fsGroup := int64(1234)
|
fsGroup := int64(1234)
|
||||||
testVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -575,12 +269,12 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("Ceph RBD [Feature:Volumes]", func() {
|
framework.KubeDescribe("Ceph RBD [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "rbd",
|
Prefix: "rbd",
|
||||||
serverImage: RbdServerImage,
|
ServerImage: framework.RbdServerImage,
|
||||||
serverPorts: []int{6789},
|
ServerPorts: []int{6789},
|
||||||
serverVolumes: map[string]string{
|
ServerVolumes: map[string]string{
|
||||||
// iSCSI container needs to insert modules from the host
|
// iSCSI container needs to insert modules from the host
|
||||||
"/lib/modules": "/lib/modules",
|
"/lib/modules": "/lib/modules",
|
||||||
"/sys": "/sys",
|
"/sys": "/sys",
|
||||||
@ -589,10 +283,10 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(cs, config)
|
pod := framework.StartVolumeServer(cs, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("Ceph server IP address: %v", serverIP)
|
framework.Logf("Ceph server IP address: %v", serverIP)
|
||||||
|
|
||||||
@ -603,7 +297,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: config.prefix + "-secret",
|
Name: config.Prefix + "-secret",
|
||||||
},
|
},
|
||||||
Data: map[string][]byte{
|
Data: map[string][]byte{
|
||||||
// from test/images/volumes-tester/rbd/keyring
|
// from test/images/volumes-tester/rbd/keyring
|
||||||
@ -612,11 +306,11 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
Type: "kubernetes.io/rbd",
|
Type: "kubernetes.io/rbd",
|
||||||
}
|
}
|
||||||
|
|
||||||
secClient := cs.Core().Secrets(config.namespace)
|
secClient := cs.Core().Secrets(config.Namespace)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
secClient.Delete(config.prefix+"-secret", nil)
|
secClient.Delete(config.Prefix+"-secret", nil)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -624,27 +318,27 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
RBD: &v1.RBDVolumeSource{
|
RBD: &v1.RBDVolumeSource{
|
||||||
CephMonitors: []string{serverIP},
|
CephMonitors: []string{serverIP},
|
||||||
RBDPool: "rbd",
|
RBDPool: "rbd",
|
||||||
RBDImage: "foo",
|
RBDImage: "foo",
|
||||||
RadosUser: "admin",
|
RadosUser: "admin",
|
||||||
SecretRef: &v1.LocalObjectReference{
|
SecretRef: &v1.LocalObjectReference{
|
||||||
Name: config.prefix + "-secret",
|
Name: config.Prefix + "-secret",
|
||||||
},
|
},
|
||||||
FSType: "ext2",
|
FSType: "ext2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Must match content of test/images/volumes-tester/rbd/create_block.sh
|
// Must match content of test/images/volumes-tester/rbd/create_block.sh
|
||||||
expectedContent: "Hello from RBD",
|
ExpectedContent: "Hello from RBD",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
fsGroup := int64(1234)
|
fsGroup := int64(1234)
|
||||||
testVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////
|
||||||
@ -653,19 +347,19 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("CephFS [Feature:Volumes]", func() {
|
framework.KubeDescribe("CephFS [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "cephfs",
|
Prefix: "cephfs",
|
||||||
serverImage: CephServerImage,
|
ServerImage: framework.CephServerImage,
|
||||||
serverPorts: []int{6789},
|
ServerPorts: []int{6789},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pod := startVolumeServer(cs, config)
|
pod := framework.StartVolumeServer(cs, config)
|
||||||
serverIP := pod.Status.PodIP
|
serverIP := pod.Status.PodIP
|
||||||
framework.Logf("Ceph server IP address: %v", serverIP)
|
framework.Logf("Ceph server IP address: %v", serverIP)
|
||||||
By("sleeping a bit to give ceph server time to initialize")
|
By("sleeping a bit to give ceph server time to initialize")
|
||||||
@ -678,7 +372,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: config.prefix + "-secret",
|
Name: config.Prefix + "-secret",
|
||||||
},
|
},
|
||||||
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
|
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
|
||||||
// and encode in base64
|
// and encode in base64
|
||||||
@ -701,22 +395,22 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
CephFS: &v1.CephFSVolumeSource{
|
CephFS: &v1.CephFSVolumeSource{
|
||||||
Monitors: []string{serverIP + ":6789"},
|
Monitors: []string{serverIP + ":6789"},
|
||||||
User: "kube",
|
User: "kube",
|
||||||
SecretRef: &v1.LocalObjectReference{Name: config.prefix + "-secret"},
|
SecretRef: &v1.LocalObjectReference{Name: config.Prefix + "-secret"},
|
||||||
ReadOnly: true,
|
ReadOnly: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Must match content of test/images/volumes-tester/ceph/index.html
|
// Must match content of test/images/volumes-tester/ceph/index.html
|
||||||
expectedContent: "Hello Ceph!",
|
ExpectedContent: "Hello Ceph!",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testVolumeClient(cs, config, nil, tests)
|
framework.TestVolumeClient(cs, config, nil, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -732,9 +426,9 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
framework.KubeDescribe("Cinder [Feature:Volumes]", func() {
|
framework.KubeDescribe("Cinder [Feature:Volumes]", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
framework.SkipUnlessProviderIs("openstack")
|
framework.SkipUnlessProviderIs("openstack")
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "cinder",
|
Prefix: "cinder",
|
||||||
}
|
}
|
||||||
|
|
||||||
// We assume that namespace.Name is a random string
|
// We assume that namespace.Name is a random string
|
||||||
@ -748,7 +442,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
// Ignore any cleanup errors, there is not much we can do about
|
// Ignore any cleanup errors, there is not much we can do about
|
||||||
// them. They were already logged.
|
// them. They were already logged.
|
||||||
deleteCinderVolume(volumeName)
|
DeleteCinderVolume(volumeName)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Parse 'id'' from stdout. Expected format:
|
// Parse 'id'' from stdout. Expected format:
|
||||||
@ -774,30 +468,30 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.Logf("Running volumeTestCleanup")
|
framework.Logf("Running volumeTestCleanup")
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
Cinder: &v1.CinderVolumeSource{
|
Cinder: &v1.CinderVolumeSource{
|
||||||
VolumeID: volumeID,
|
VolumeID: volumeID,
|
||||||
FSType: "ext3",
|
FSType: "ext3",
|
||||||
ReadOnly: false,
|
ReadOnly: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Randomize index.html to make sure we don't see the
|
// Randomize index.html to make sure we don't see the
|
||||||
// content from previous test runs.
|
// content from previous test runs.
|
||||||
expectedContent: "Hello from Cinder from namespace " + volumeName,
|
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
|
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||||
|
|
||||||
fsGroup := int64(1234)
|
fsGroup := int64(1234)
|
||||||
testVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -808,9 +502,9 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
framework.KubeDescribe("PD", func() {
|
framework.KubeDescribe("PD", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "pd",
|
Prefix: "pd",
|
||||||
}
|
}
|
||||||
|
|
||||||
By("creating a test gce pd volume")
|
By("creating a test gce pd volume")
|
||||||
@ -824,30 +518,30 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.Logf("Running volumeTestCleanup")
|
framework.Logf("Running volumeTestCleanup")
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||||
PDName: volumeName,
|
PDName: volumeName,
|
||||||
FSType: "ext3",
|
FSType: "ext3",
|
||||||
ReadOnly: false,
|
ReadOnly: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Randomize index.html to make sure we don't see the
|
// Randomize index.html to make sure we don't see the
|
||||||
// content from previous test runs.
|
// content from previous test runs.
|
||||||
expectedContent: "Hello from GCE from namespace " + volumeName,
|
ExpectedContent: "Hello from GCE from namespace " + volumeName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
|
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||||
|
|
||||||
fsGroup := int64(1234)
|
fsGroup := int64(1234)
|
||||||
testVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -857,14 +551,14 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
|
|
||||||
framework.KubeDescribe("ConfigMap", func() {
|
framework.KubeDescribe("ConfigMap", func() {
|
||||||
It("should be mountable", func() {
|
It("should be mountable", func() {
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "configmap",
|
Prefix: "configmap",
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
configMap := &v1.ConfigMap{
|
configMap := &v1.ConfigMap{
|
||||||
@ -873,7 +567,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
},
|
},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: config.prefix + "-map",
|
Name: config.Prefix + "-map",
|
||||||
},
|
},
|
||||||
Data: map[string]string{
|
Data: map[string]string{
|
||||||
"first": "this is the first file",
|
"first": "this is the first file",
|
||||||
@ -889,12 +583,12 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Test one ConfigMap mounted several times to test #28502
|
// Test one ConfigMap mounted several times to test #28502
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
Name: config.prefix + "-map",
|
Name: config.Prefix + "-map",
|
||||||
},
|
},
|
||||||
Items: []v1.KeyToPath{
|
Items: []v1.KeyToPath{
|
||||||
{
|
{
|
||||||
@ -904,14 +598,14 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "firstfile",
|
File: "firstfile",
|
||||||
expectedContent: "this is the first file",
|
ExpectedContent: "this is the first file",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
LocalObjectReference: v1.LocalObjectReference{
|
||||||
Name: config.prefix + "-map",
|
Name: config.Prefix + "-map",
|
||||||
},
|
},
|
||||||
Items: []v1.KeyToPath{
|
Items: []v1.KeyToPath{
|
||||||
{
|
{
|
||||||
@ -921,11 +615,11 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "secondfile",
|
File: "secondfile",
|
||||||
expectedContent: "this is the second file",
|
ExpectedContent: "this is the second file",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testVolumeClient(cs, config, nil, tests)
|
framework.TestVolumeClient(cs, config, nil, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -939,9 +633,9 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
var (
|
var (
|
||||||
volumePath string
|
volumePath string
|
||||||
)
|
)
|
||||||
config := VolumeTestConfig{
|
config := framework.VolumeTestConfig{
|
||||||
namespace: namespace.Name,
|
Namespace: namespace.Name,
|
||||||
prefix: "vsphere",
|
Prefix: "vsphere",
|
||||||
}
|
}
|
||||||
By("creating a test vsphere volume")
|
By("creating a test vsphere volume")
|
||||||
vsp, err := vsphere.GetVSphere()
|
vsp, err := vsphere.GetVSphere()
|
||||||
@ -957,29 +651,29 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
if clean {
|
if clean {
|
||||||
framework.Logf("Running volumeTestCleanup")
|
framework.Logf("Running volumeTestCleanup")
|
||||||
volumeTestCleanup(f, config)
|
framework.VolumeTestCleanup(f, config)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tests := []VolumeTest{
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
volume: v1.VolumeSource{
|
Volume: v1.VolumeSource{
|
||||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||||
VolumePath: volumePath,
|
VolumePath: volumePath,
|
||||||
FSType: "ext4",
|
FSType: "ext4",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
file: "index.html",
|
File: "index.html",
|
||||||
// Randomize index.html to make sure we don't see the
|
// Randomize index.html to make sure we don't see the
|
||||||
// content from previous test runs.
|
// content from previous test runs.
|
||||||
expectedContent: "Hello from vSphere from namespace " + namespace.Name,
|
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
|
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||||
|
|
||||||
fsGroup := int64(1234)
|
fsGroup := int64(1234)
|
||||||
testVolumeClient(cs, config, &fsGroup, tests)
|
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user