mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #42363 from copejon/extract-volume-utils
Automatic merge from submit-queue Extract e2e utility code into framework **What this PR does / why we need it**: There's a growing dependency on Volume e2e utilities related to creating / test against NFS volumes. For this reason, it's useful to relocate the relevant functions to the `framework` pkg. Doing so makes these utility functions available to e2e tests outside the `e2e` package. This PR only moves code from the `e2e` package to `framework` and handle the relevant changes in calls. It does not change any logic. ```release-note NONE ``` @jingxu97 I think there's value here in reducing duplicate code in the `common` package, given that these functions have been copied down to it. However, there's been some divergence. Can you PTAL and let me know if there's any reason we can't remove the duplicate `common` code? cc @jeffvance
This commit is contained in:
commit
c2f3c483a1
@ -51,7 +51,6 @@ go_library(
|
||||
"//vendor:github.com/onsi/ginkgo",
|
||||
"//vendor:github.com/onsi/gomega",
|
||||
"//vendor:golang.org/x/net/websocket",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
|
@ -37,310 +37,20 @@ limitations under the License.
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
// GlusterFS test is duplicated from test/e2e/volumes.go. Any changes made there
|
||||
// should be duplicated here
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
// - server pod - runs serverImage, exports ports[]
|
||||
// - client pod - does not need any special configuration
|
||||
type VolumeTestConfig struct {
|
||||
namespace string
|
||||
// Prefix of all pods. Typically the test name.
|
||||
prefix string
|
||||
// Name of container image for the server pod.
|
||||
serverImage string
|
||||
// Ports to export from the server pod. TCP only.
|
||||
serverPorts []int
|
||||
// Arguments to pass to the container image.
|
||||
serverArgs []string
|
||||
// Volumes needed to be mounted to the server container from the host
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
volumes map[string]string
|
||||
}
|
||||
|
||||
// Starts a container specified by config.serverImage and exports all
|
||||
// config.serverPorts from it. The returned pod should be used to get the server
|
||||
// IP address and create appropriate VolumeSource.
|
||||
func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
|
||||
podClient := f.PodClient()
|
||||
|
||||
portCount := len(config.serverPorts)
|
||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
||||
|
||||
for i := 0; i < portCount; i++ {
|
||||
portName := fmt.Sprintf("%s-%d", config.prefix, i)
|
||||
|
||||
serverPodPorts[i] = v1.ContainerPort{
|
||||
Name: portName,
|
||||
ContainerPort: int32(config.serverPorts[i]),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
}
|
||||
|
||||
volumeCount := len(config.volumes)
|
||||
volumes := make([]v1.Volume, volumeCount)
|
||||
mounts := make([]v1.VolumeMount, volumeCount)
|
||||
|
||||
i := 0
|
||||
for src, dst := range config.volumes {
|
||||
mountName := fmt.Sprintf("path%d", i)
|
||||
volumes[i].Name = mountName
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
}
|
||||
|
||||
mounts[i].Name = mountName
|
||||
mounts[i].ReadOnly = false
|
||||
mounts[i].MountPath = dst
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
By(fmt.Sprint("creating ", config.prefix, " server pod"))
|
||||
privileged := new(bool)
|
||||
*privileged = true
|
||||
serverPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-server",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-server",
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.prefix + "-server",
|
||||
Image: config.serverImage,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: privileged,
|
||||
},
|
||||
Args: config.serverArgs,
|
||||
Ports: serverPodPorts,
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
serverPod = podClient.CreateSync(serverPod)
|
||||
|
||||
By("locating the server pod")
|
||||
pod, err := podClient.Get(serverPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
|
||||
|
||||
By("sleeping a bit to give the server time to start")
|
||||
time.Sleep(20 * time.Second)
|
||||
return pod
|
||||
}
|
||||
|
||||
// Clean both server and client pods.
|
||||
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
|
||||
By(fmt.Sprint("cleaning the environment after ", config.prefix))
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
podClient := f.PodClient()
|
||||
|
||||
err := podClient.Delete(config.prefix+"-client", nil)
|
||||
if err != nil {
|
||||
// Log the error before failing test: if the test has already failed,
|
||||
// framework.ExpectNoError() won't print anything to logs!
|
||||
glog.Warningf("Failed to delete client pod: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||
}
|
||||
|
||||
if config.serverImage != "" {
|
||||
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so client can stop and unmount")
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
err = podClient.Delete(config.prefix+"-server", nil)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete server pod: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
||||
// and check that the pod sees the data from the server pod.
|
||||
func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
|
||||
By(fmt.Sprint("starting ", config.prefix, " client"))
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-client",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-client",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.prefix + "-client",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
WorkingDir: "/opt",
|
||||
// An imperative and easily debuggable container which reads vol contents for
|
||||
// us to scan in the tests or by eye.
|
||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.prefix + "-volume",
|
||||
MountPath: "/opt/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.prefix + "-volume",
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podClient := f.PodClient()
|
||||
|
||||
if fsGroup != nil {
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
}
|
||||
clientPod = podClient.CreateSync(clientPod)
|
||||
|
||||
By("Checking that text file contents are perfect.")
|
||||
result := f.ExecCommandInPod(clientPod.Name, "cat", "/opt/index.html")
|
||||
var err error
|
||||
if !strings.Contains(result, expectedContent) {
|
||||
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedContent, result)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")
|
||||
|
||||
if fsGroup != nil {
|
||||
|
||||
By("Checking fsGroup is correct.")
|
||||
_, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
||||
}
|
||||
}
|
||||
|
||||
// Insert index.html with given content into given volume. It does so by
|
||||
// starting and auxiliary pod which writes the file there.
|
||||
// The volume must be writable.
|
||||
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||
By(fmt.Sprint("starting ", config.prefix, " injector"))
|
||||
podClient := client.Core().Pods(config.namespace)
|
||||
|
||||
injectPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-injector",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-injector",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.prefix + "-injector",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.prefix + "-volume",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.prefix + "-volume",
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
podClient.Delete(config.prefix+"-injector", nil)
|
||||
}()
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func deleteCinderVolume(name string) error {
|
||||
// Try to delete the volume for several seconds - it takes
|
||||
// a while for the plugin to detach it.
|
||||
var output []byte
|
||||
var err error
|
||||
timeout := time.Second * 120
|
||||
|
||||
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
|
||||
if err == nil {
|
||||
framework.Logf("Cinder volume %s deleted", name)
|
||||
return nil
|
||||
} else {
|
||||
framework.Logf("Failed to delete volume %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
|
||||
return err
|
||||
}
|
||||
|
||||
// These tests need privileged containers, which are disabled by default. Run
|
||||
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
|
||||
var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
@ -351,12 +61,13 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
clean := true
|
||||
// filled in BeforeEach
|
||||
var namespace *v1.Namespace
|
||||
var c clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
if !isTestEnabled(f.ClientSet) {
|
||||
framework.Skipf("NFS tests are not supported for this distro")
|
||||
}
|
||||
framework.SkipUnlessNodeOSDistroIs("gci")
|
||||
|
||||
namespace = f.Namespace
|
||||
c = f.ClientSet
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
@ -365,61 +76,75 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
|
||||
framework.KubeDescribe("NFSv4", func() {
|
||||
It("should be mountable for NFSv4 [Volume]", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "nfs",
|
||||
serverImage: "gcr.io/google_containers/volume-nfs:0.8",
|
||||
serverPorts: []int{2049},
|
||||
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "nfs",
|
||||
ServerImage: framework.NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(f, config)
|
||||
|
||||
pod := framework.StartVolumeServer(c, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("NFS server IP address: %v", serverIP)
|
||||
|
||||
volume := v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/",
|
||||
ReadOnly: true,
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
ExpectedContent: "Hello from NFS!",
|
||||
},
|
||||
}
|
||||
|
||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||
testVolumeClient(f, config, volume, nil, "Hello from NFS!")
|
||||
framework.TestVolumeClient(c, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("NFSv3", func() {
|
||||
It("should be mountable for NFSv3 [Volume]", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "nfs",
|
||||
serverImage: "gcr.io/google_containers/volume-nfs:0.8",
|
||||
serverPorts: []int{2049},
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "nfs",
|
||||
ServerImage: framework.NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(f, config)
|
||||
pod := framework.StartVolumeServer(c, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("NFS server IP address: %v", serverIP)
|
||||
|
||||
volume := v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/exports",
|
||||
ReadOnly: true,
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/exports",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
ExpectedContent: "Hello from NFS!",
|
||||
},
|
||||
}
|
||||
// Must match content of test/images/volume-tester/nfs/index.html
|
||||
testVolumeClient(f, config, volume, nil, "Hello from NFS!")
|
||||
framework.TestVolumeClient(c, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -429,19 +154,20 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
|
||||
framework.KubeDescribe("GlusterFS", func() {
|
||||
It("should be mountable [Volume]", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "gluster",
|
||||
serverImage: "gcr.io/google_containers/volume-gluster:0.2",
|
||||
serverPorts: []int{24007, 24008, 49152},
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "gluster",
|
||||
ServerImage: framework.GlusterfsServerImage,
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(f, config)
|
||||
|
||||
pod := framework.StartVolumeServer(c, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("Gluster server IP address: %v", serverIP)
|
||||
|
||||
@ -452,7 +178,7 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-server",
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
@ -472,11 +198,11 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
},
|
||||
}
|
||||
|
||||
endClient := f.ClientSet.Core().Endpoints(config.namespace)
|
||||
endClient := f.ClientSet.CoreV1().Endpoints(config.Namespace)
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
endClient.Delete(config.prefix+"-server", nil)
|
||||
endClient.Delete(config.Prefix+"-server", nil)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -484,46 +210,22 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
|
||||
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
||||
}
|
||||
|
||||
volume := v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: config.prefix + "-server",
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: true,
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: config.Prefix + "-server",
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||
ExpectedContent: "Hello from GlusterFS!",
|
||||
},
|
||||
}
|
||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||
testVolumeClient(f, config, volume, nil, "Hello from GlusterFS!")
|
||||
framework.TestVolumeClient(c, config, nil, tests)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func isTestEnabled(c clientset.Interface) bool {
|
||||
// Enable the test on node e2e if the node image is GCI.
|
||||
nodeName := framework.TestContext.NodeName
|
||||
if nodeName != "" {
|
||||
if strings.Contains(nodeName, "-gci-dev-") {
|
||||
gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
|
||||
matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
|
||||
if len(matches) == 2 {
|
||||
version, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
glog.Errorf("Error parsing GCI version from NodeName %q: %v", nodeName, err)
|
||||
return false
|
||||
}
|
||||
return version >= 54
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// For cluster e2e test, because nodeName is empty, retrieve the node objects from api server
|
||||
// and check their images. Only run NFS and GlusterFS tests if nodes are using GCI image for now.
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
for _, node := range nodes.Items {
|
||||
if !strings.Contains(node.Status.NodeInfo.OSImage, "Container-Optimized OS") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ go_library(
|
||||
"test_context.go",
|
||||
"upgrade_util.go",
|
||||
"util.go",
|
||||
"volume_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
357
test/e2e/framework/volume_util.go
Normal file
357
test/e2e/framework/volume_util.go
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test checks that various VolumeSources are working.
|
||||
*
|
||||
* There are two ways, how to test the volumes:
|
||||
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||
* The test creates a server pod, exporting simple 'index.html' file.
|
||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||
* and checks that the pod can see the file. It does so by importing the file
|
||||
* into web server root and loadind the index.html from it.
|
||||
*
|
||||
* These tests work only when privileged containers are allowed, exporting
|
||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||
* other privileged magic in the server pod.
|
||||
*
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
|
||||
const (
|
||||
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
|
||||
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
|
||||
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
|
||||
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
|
||||
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
|
||||
BusyBoxImage string = "gcr.io/google_containers/busybox:1.24"
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
// - server pod - runs serverImage, exports ports[]
|
||||
// - client pod - does not need any special configuration
|
||||
type VolumeTestConfig struct {
|
||||
Namespace string
|
||||
// Prefix of all pods. Typically the test name.
|
||||
Prefix string
|
||||
// Name of container image for the server pod.
|
||||
ServerImage string
|
||||
// Ports to export from the server pod. TCP only.
|
||||
ServerPorts []int
|
||||
// Arguments to pass to the container image.
|
||||
ServerArgs []string
|
||||
// Volumes needed to be mounted to the server container from the host
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
ServerVolumes map[string]string
|
||||
}
|
||||
|
||||
// VolumeTest contains a volume to mount into a client pod and its
|
||||
// expected content.
|
||||
type VolumeTest struct {
|
||||
Volume v1.VolumeSource
|
||||
File string
|
||||
ExpectedContent string
|
||||
}
|
||||
|
||||
// Starts a container specified by config.serverImage and exports all
|
||||
// config.serverPorts from it. The returned pod should be used to get the server
|
||||
// IP address and create appropriate VolumeSource.
|
||||
func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
portCount := len(config.ServerPorts)
|
||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
||||
|
||||
for i := 0; i < portCount; i++ {
|
||||
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
|
||||
|
||||
serverPodPorts[i] = v1.ContainerPort{
|
||||
Name: portName,
|
||||
ContainerPort: int32(config.ServerPorts[i]),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
}
|
||||
|
||||
volumeCount := len(config.ServerVolumes)
|
||||
volumes := make([]v1.Volume, volumeCount)
|
||||
mounts := make([]v1.VolumeMount, volumeCount)
|
||||
|
||||
i := 0
|
||||
for src, dst := range config.ServerVolumes {
|
||||
mountName := fmt.Sprintf("path%d", i)
|
||||
volumes[i].Name = mountName
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
}
|
||||
|
||||
mounts[i].Name = mountName
|
||||
mounts[i].ReadOnly = false
|
||||
mounts[i].MountPath = dst
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
|
||||
By(fmt.Sprint("creating ", serverPodName, " pod"))
|
||||
privileged := new(bool)
|
||||
*privileged = true
|
||||
serverPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverPodName,
|
||||
Labels: map[string]string{
|
||||
"role": serverPodName,
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: serverPodName,
|
||||
Image: config.ServerImage,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: privileged,
|
||||
},
|
||||
Args: config.ServerArgs,
|
||||
Ports: serverPodPorts,
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
|
||||
var pod *v1.Pod
|
||||
serverPod, err := podClient.Create(serverPod)
|
||||
// ok if the server pod already exists. TODO: make this controllable by callers
|
||||
if err != nil {
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
Logf("Ignore \"already-exists\" error, re-get pod...")
|
||||
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
||||
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
|
||||
pod = serverPod
|
||||
} else {
|
||||
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
|
||||
|
||||
if pod == nil {
|
||||
By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// Clean both server and client pods.
|
||||
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
client := f.ClientSet
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
err := podClient.Delete(config.Prefix+"-client", nil)
|
||||
if err != nil {
|
||||
// Log the error before failing test: if the test has already failed,
|
||||
// framework.ExpectNoError() won't print anything to logs!
|
||||
glog.Warningf("Failed to delete client pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||
}
|
||||
|
||||
if config.ServerImage != "" {
|
||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so client can stop and unmount")
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete server pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
||||
// and check that the pod sees expected data, e.g. from the server pod.
|
||||
// Multiple VolumeTests can be specified to mount multiple volumes to a single
|
||||
// pod.
|
||||
func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, tests []VolumeTest) {
|
||||
By(fmt.Sprint("starting ", config.Prefix, " client"))
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-client",
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-client",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-client",
|
||||
Image: BusyBoxImage,
|
||||
WorkingDir: "/opt",
|
||||
// An imperative and easily debuggable container which reads vol contents for
|
||||
// us to scan in the tests or by eye.
|
||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{},
|
||||
},
|
||||
}
|
||||
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
if fsGroup != nil {
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: fmt.Sprintf("/opt/%d", i),
|
||||
})
|
||||
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: test.Volume,
|
||||
})
|
||||
}
|
||||
clientPod, err := podsNamespacer.Create(clientPod)
|
||||
if err != nil {
|
||||
Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
}
|
||||
ExpectNoError(WaitForPodRunningInNamespace(client, clientPod))
|
||||
|
||||
By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
|
||||
}
|
||||
|
||||
if fsGroup != nil {
|
||||
By("Checking fsGroup is correct.")
|
||||
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
||||
}
|
||||
}
|
||||
|
||||
// Insert index.html with given content into given volume. It does so by
|
||||
// starting and auxiliary pod which writes the file there.
|
||||
// The volume must be writable.
|
||||
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||
By(fmt.Sprint("starting ", config.Prefix, " injector"))
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
injectPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-injector",
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-injector",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-injector",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
podClient.Delete(config.Prefix+"-injector", nil)
|
||||
}()
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
@ -134,9 +134,9 @@ func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRem
|
||||
// ip address.
|
||||
// Note: startVolumeServer() waits for the nfs-server pod to be Running and sleeps some
|
||||
// so that the nfs server can start up.
|
||||
func createNfsServerPod(c clientset.Interface, config VolumeTestConfig) (*v1.Pod, string) {
|
||||
func createNfsServerPod(c clientset.Interface, config framework.VolumeTestConfig) (*v1.Pod, string) {
|
||||
|
||||
pod := startVolumeServer(c, config)
|
||||
pod := framework.StartVolumeServer(c, config)
|
||||
Expect(pod).NotTo(BeNil())
|
||||
ip := pod.Status.PodIP
|
||||
Expect(len(ip)).NotTo(BeZero())
|
||||
@ -387,7 +387,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
||||
var (
|
||||
nfsServerPod *v1.Pod
|
||||
nfsIP string
|
||||
NFSconfig VolumeTestConfig
|
||||
NFSconfig framework.VolumeTestConfig
|
||||
pod *v1.Pod // client pod
|
||||
)
|
||||
|
||||
@ -404,12 +404,12 @@ var _ = framework.KubeDescribe("kubelet", func() {
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
NFSconfig = VolumeTestConfig{
|
||||
namespace: ns,
|
||||
prefix: "nfs",
|
||||
serverImage: NfsServerImage,
|
||||
serverPorts: []int{2049},
|
||||
serverArgs: []string{"-G", "777", "/exports"},
|
||||
NFSconfig = framework.VolumeTestConfig{
|
||||
Namespace: ns,
|
||||
Prefix: "nfs",
|
||||
ServerImage: framework.NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
ServerArgs: []string{"-G", "777", "/exports"},
|
||||
}
|
||||
nfsServerPod, nfsIP = createNfsServerPod(c, NFSconfig)
|
||||
})
|
||||
|
@ -14,10 +14,6 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This test references
|
||||
// persistent_volumes.go
|
||||
// volumes.go
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
|
@ -77,12 +77,12 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
|
||||
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
|
||||
// commonly used by persistent volume testing
|
||||
func initNFSserverPod(c clientset.Interface, ns string) *v1.Pod {
|
||||
return startVolumeServer(c, VolumeTestConfig{
|
||||
namespace: ns,
|
||||
prefix: "nfs",
|
||||
serverImage: NfsServerImage,
|
||||
serverPorts: []int{2049},
|
||||
serverArgs: []string{"-G", "777", "/exports"},
|
||||
return framework.StartVolumeServer(c, framework.VolumeTestConfig{
|
||||
Namespace: ns,
|
||||
Prefix: "nfs",
|
||||
ServerImage: framework.NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
ServerArgs: []string{"-G", "777", "/exports"},
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -37,330 +37,27 @@ limitations under the License.
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this
|
||||
// test should be made there as well.
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// Configuration of one tests. The test consist of:
|
||||
// - server pod - runs serverImage, exports ports[]
|
||||
// - client pod - does not need any special configuration
|
||||
type VolumeTestConfig struct {
|
||||
namespace string
|
||||
// Prefix of all pods. Typically the test name.
|
||||
prefix string
|
||||
// Name of container image for the server pod.
|
||||
serverImage string
|
||||
// Ports to export from the server pod. TCP only.
|
||||
serverPorts []int
|
||||
// Arguments to pass to the container image.
|
||||
serverArgs []string
|
||||
// Volumes needed to be mounted to the server container from the host
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
serverVolumes map[string]string
|
||||
}
|
||||
|
||||
// VolumeTest contains a volumes to mount into a client pod and its
|
||||
// expected content.
|
||||
type VolumeTest struct {
|
||||
volume v1.VolumeSource
|
||||
file string
|
||||
expectedContent string
|
||||
}
|
||||
|
||||
// Current supported images for e2e volume testing to be assigned to VolumeTestConfig.serverImage
|
||||
const (
|
||||
NfsServerImage string = "gcr.io/google_containers/volume-nfs:0.8"
|
||||
IscsiServerImage string = "gcr.io/google_containers/volume-iscsi:0.1"
|
||||
GlusterfsServerImage string = "gcr.io/google_containers/volume-gluster:0.2"
|
||||
CephServerImage string = "gcr.io/google_containers/volume-ceph:0.1"
|
||||
RbdServerImage string = "gcr.io/google_containers/volume-rbd:0.1"
|
||||
)
|
||||
|
||||
// Starts a container specified by config.serverImage and exports all
|
||||
// config.serverPorts from it. The returned pod should be used to get the server
|
||||
// IP address and create appropriate VolumeSource.
|
||||
func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
|
||||
podClient := client.Core().Pods(config.namespace)
|
||||
|
||||
portCount := len(config.serverPorts)
|
||||
serverPodPorts := make([]v1.ContainerPort, portCount)
|
||||
|
||||
for i := 0; i < portCount; i++ {
|
||||
portName := fmt.Sprintf("%s-%d", config.prefix, i)
|
||||
|
||||
serverPodPorts[i] = v1.ContainerPort{
|
||||
Name: portName,
|
||||
ContainerPort: int32(config.serverPorts[i]),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}
|
||||
}
|
||||
|
||||
volumeCount := len(config.serverVolumes)
|
||||
volumes := make([]v1.Volume, volumeCount)
|
||||
mounts := make([]v1.VolumeMount, volumeCount)
|
||||
|
||||
i := 0
|
||||
for src, dst := range config.serverVolumes {
|
||||
mountName := fmt.Sprintf("path%d", i)
|
||||
volumes[i].Name = mountName
|
||||
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: src,
|
||||
}
|
||||
|
||||
mounts[i].Name = mountName
|
||||
mounts[i].ReadOnly = false
|
||||
mounts[i].MountPath = dst
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
serverPodName := fmt.Sprintf("%s-server", config.prefix)
|
||||
By(fmt.Sprint("creating ", serverPodName, " pod"))
|
||||
privileged := new(bool)
|
||||
*privileged = true
|
||||
serverPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverPodName,
|
||||
Labels: map[string]string{
|
||||
"role": serverPodName,
|
||||
},
|
||||
},
|
||||
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: serverPodName,
|
||||
Image: config.serverImage,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: privileged,
|
||||
},
|
||||
Args: config.serverArgs,
|
||||
Ports: serverPodPorts,
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
|
||||
var pod *v1.Pod
|
||||
serverPod, err := podClient.Create(serverPod)
|
||||
// ok if the server pod already exists. TODO: make this controllable by callers
|
||||
if err != nil {
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
|
||||
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
|
||||
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
|
||||
pod = serverPod
|
||||
} else {
|
||||
framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod))
|
||||
|
||||
if pod == nil {
|
||||
By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
// Clean both server and client pods.
|
||||
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
|
||||
By(fmt.Sprint("cleaning the environment after ", config.prefix))
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
client := f.ClientSet
|
||||
podClient := client.Core().Pods(config.namespace)
|
||||
|
||||
err := podClient.Delete(config.prefix+"-client", nil)
|
||||
if err != nil {
|
||||
// Log the error before failing test: if the test has already failed,
|
||||
// framework.ExpectNoError() won't print anything to logs!
|
||||
glog.Warningf("Failed to delete client pod: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||
}
|
||||
|
||||
if config.serverImage != "" {
|
||||
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so client can stop and unmount")
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
err = podClient.Delete(config.prefix+"-server", nil)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete server pod: %v", err)
|
||||
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start a client pod using given VolumeSource (exported by startVolumeServer())
|
||||
// and check that the pod sees expected data, e.g. from the server pod.
|
||||
// Multiple VolumeTests can be specified to mount multiple volumes to a single
|
||||
// pod.
|
||||
func testVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, tests []VolumeTest) {
|
||||
By(fmt.Sprint("starting ", config.prefix, " client"))
|
||||
clientPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-client",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-client",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.prefix + "-client",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
WorkingDir: "/opt",
|
||||
// An imperative and easily debuggable container which reads vol contents for
|
||||
// us to scan in the tests or by eye.
|
||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{},
|
||||
},
|
||||
}
|
||||
podsNamespacer := client.Core().Pods(config.namespace)
|
||||
|
||||
if fsGroup != nil {
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.prefix, "volume", i)
|
||||
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
|
||||
Name: volumeName,
|
||||
MountPath: fmt.Sprintf("/opt/%d", i),
|
||||
})
|
||||
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: test.volume,
|
||||
})
|
||||
}
|
||||
clientPod, err := podsNamespacer.Create(clientPod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
}
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod))
|
||||
|
||||
By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.file)
|
||||
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"cat", fileName}, test.expectedContent, time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
|
||||
}
|
||||
|
||||
if fsGroup != nil {
|
||||
By("Checking fsGroup is correct.")
|
||||
_, err = framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
|
||||
}
|
||||
}
|
||||
|
||||
// Insert index.html with given content into given volume. It does so by
|
||||
// starting and auxiliary pod which writes the file there.
|
||||
// The volume must be writable.
|
||||
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||
By(fmt.Sprint("starting ", config.prefix, " injector"))
|
||||
podClient := client.Core().Pods(config.namespace)
|
||||
|
||||
injectPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-injector",
|
||||
Labels: map[string]string{
|
||||
"role": config.prefix + "-injector",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.prefix + "-injector",
|
||||
Image: "gcr.io/google_containers/busybox:1.24",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.prefix + "-volume",
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.prefix + "-volume",
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
podClient.Delete(config.prefix+"-injector", nil)
|
||||
}()
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func deleteCinderVolume(name string) error {
|
||||
func DeleteCinderVolume(name string) error {
|
||||
// Try to delete the volume for several seconds - it takes
|
||||
// a while for the plugin to detach it.
|
||||
var output []byte
|
||||
@ -403,37 +100,37 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
framework.KubeDescribe("NFS", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "nfs",
|
||||
serverImage: NfsServerImage,
|
||||
serverPorts: []int{2049},
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "nfs",
|
||||
ServerImage: framework.NfsServerImage,
|
||||
ServerPorts: []int{2049},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(cs, config)
|
||||
pod := framework.StartVolumeServer(cs, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("NFS server IP address: %v", serverIP)
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||
expectedContent: "Hello from NFS!",
|
||||
ExpectedContent: "Hello from NFS!",
|
||||
},
|
||||
}
|
||||
testVolumeClient(cs, config, nil, tests)
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -443,19 +140,22 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
framework.KubeDescribe("GlusterFS [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "gluster",
|
||||
serverImage: GlusterfsServerImage,
|
||||
serverPorts: []int{24007, 24008, 49152},
|
||||
//TODO (copejon) GFS is not supported on debian image.
|
||||
framework.SkipUnlessNodeOSDistroIs("gci")
|
||||
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "gluster",
|
||||
ServerImage: framework.GlusterfsServerImage,
|
||||
ServerPorts: []int{24007, 24008, 49152},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(cs, config)
|
||||
pod := framework.StartVolumeServer(cs, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("Gluster server IP address: %v", serverIP)
|
||||
|
||||
@ -466,7 +166,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-server",
|
||||
Name: config.Prefix + "-server",
|
||||
},
|
||||
Subsets: []v1.EndpointSubset{
|
||||
{
|
||||
@ -486,11 +186,11 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
},
|
||||
}
|
||||
|
||||
endClient := cs.Core().Endpoints(config.namespace)
|
||||
endClient := cs.Core().Endpoints(config.Namespace)
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
endClient.Delete(config.prefix+"-server", nil)
|
||||
endClient.Delete(config.Prefix+"-server", nil)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -498,22 +198,22 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
|
||||
}
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: config.prefix + "-server",
|
||||
EndpointsName: config.Prefix + "-server",
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||
expectedContent: "Hello from GlusterFS!",
|
||||
ExpectedContent: "Hello from GlusterFS!",
|
||||
},
|
||||
}
|
||||
testVolumeClient(cs, config, nil, tests)
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -528,12 +228,12 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
framework.KubeDescribe("iSCSI [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "iscsi",
|
||||
serverImage: IscsiServerImage,
|
||||
serverPorts: []int{3260},
|
||||
serverVolumes: map[string]string{
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "iscsi",
|
||||
ServerImage: framework.IscsiServerImage,
|
||||
ServerPorts: []int{3260},
|
||||
ServerVolumes: map[string]string{
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
@ -541,16 +241,16 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(cs, config)
|
||||
pod := framework.StartVolumeServer(cs, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("iSCSI server IP address: %v", serverIP)
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
ISCSI: &v1.ISCSIVolumeSource{
|
||||
TargetPortal: serverIP + ":3260",
|
||||
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
|
||||
@ -559,13 +259,13 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
FSType: "ext2",
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/iscsi/block.tar.gz
|
||||
expectedContent: "Hello from iSCSI",
|
||||
ExpectedContent: "Hello from iSCSI",
|
||||
},
|
||||
}
|
||||
fsGroup := int64(1234)
|
||||
testVolumeClient(cs, config, &fsGroup, tests)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -575,12 +275,12 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
framework.KubeDescribe("Ceph RBD [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "rbd",
|
||||
serverImage: RbdServerImage,
|
||||
serverPorts: []int{6789},
|
||||
serverVolumes: map[string]string{
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "rbd",
|
||||
ServerImage: framework.RbdServerImage,
|
||||
ServerPorts: []int{6789},
|
||||
ServerVolumes: map[string]string{
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
"/sys": "/sys",
|
||||
@ -589,10 +289,10 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(cs, config)
|
||||
pod := framework.StartVolumeServer(cs, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("Ceph server IP address: %v", serverIP)
|
||||
|
||||
@ -603,7 +303,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-secret",
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
// from test/images/volumes-tester/rbd/keyring
|
||||
@ -612,11 +312,11 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
Type: "kubernetes.io/rbd",
|
||||
}
|
||||
|
||||
secClient := cs.Core().Secrets(config.namespace)
|
||||
secClient := cs.Core().Secrets(config.Namespace)
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
secClient.Delete(config.prefix+"-secret", nil)
|
||||
secClient.Delete(config.Prefix+"-secret", nil)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -624,27 +324,27 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
|
||||
}
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
RBD: &v1.RBDVolumeSource{
|
||||
CephMonitors: []string{serverIP},
|
||||
RBDPool: "rbd",
|
||||
RBDImage: "foo",
|
||||
RadosUser: "admin",
|
||||
SecretRef: &v1.LocalObjectReference{
|
||||
Name: config.prefix + "-secret",
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
FSType: "ext2",
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/rbd/create_block.sh
|
||||
expectedContent: "Hello from RBD",
|
||||
ExpectedContent: "Hello from RBD",
|
||||
},
|
||||
}
|
||||
fsGroup := int64(1234)
|
||||
testVolumeClient(cs, config, &fsGroup, tests)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
@ -653,19 +353,19 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
framework.KubeDescribe("CephFS [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "cephfs",
|
||||
serverImage: CephServerImage,
|
||||
serverPorts: []int{6789},
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "cephfs",
|
||||
ServerImage: framework.CephServerImage,
|
||||
ServerPorts: []int{6789},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
pod := startVolumeServer(cs, config)
|
||||
pod := framework.StartVolumeServer(cs, config)
|
||||
serverIP := pod.Status.PodIP
|
||||
framework.Logf("Ceph server IP address: %v", serverIP)
|
||||
By("sleeping a bit to give ceph server time to initialize")
|
||||
@ -678,7 +378,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-secret",
|
||||
Name: config.Prefix + "-secret",
|
||||
},
|
||||
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
|
||||
// and encode in base64
|
||||
@ -701,22 +401,22 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
CephFS: &v1.CephFSVolumeSource{
|
||||
Monitors: []string{serverIP + ":6789"},
|
||||
User: "kube",
|
||||
SecretRef: &v1.LocalObjectReference{Name: config.prefix + "-secret"},
|
||||
SecretRef: &v1.LocalObjectReference{Name: config.Prefix + "-secret"},
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/ceph/index.html
|
||||
expectedContent: "Hello Ceph!",
|
||||
ExpectedContent: "Hello Ceph!",
|
||||
},
|
||||
}
|
||||
testVolumeClient(cs, config, nil, tests)
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -732,9 +432,9 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
framework.KubeDescribe("Cinder [Feature:Volumes]", func() {
|
||||
It("should be mountable", func() {
|
||||
framework.SkipUnlessProviderIs("openstack")
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "cinder",
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "cinder",
|
||||
}
|
||||
|
||||
// We assume that namespace.Name is a random string
|
||||
@ -748,7 +448,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
defer func() {
|
||||
// Ignore any cleanup errors, there is not much we can do about
|
||||
// them. They were already logged.
|
||||
deleteCinderVolume(volumeName)
|
||||
DeleteCinderVolume(volumeName)
|
||||
}()
|
||||
|
||||
// Parse 'id'' from stdout. Expected format:
|
||||
@ -774,30 +474,30 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
defer func() {
|
||||
if clean {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{
|
||||
VolumeID: volumeID,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
expectedContent: "Hello from Cinder from namespace " + volumeName,
|
||||
ExpectedContent: "Hello from Cinder from namespace " + volumeName,
|
||||
},
|
||||
}
|
||||
|
||||
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
testVolumeClient(cs, config, &fsGroup, tests)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -808,9 +508,9 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
framework.KubeDescribe("PD", func() {
|
||||
It("should be mountable", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "pd",
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "pd",
|
||||
}
|
||||
|
||||
By("creating a test gce pd volume")
|
||||
@ -824,30 +524,30 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
defer func() {
|
||||
if clean {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: volumeName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
expectedContent: "Hello from GCE from namespace " + volumeName,
|
||||
ExpectedContent: "Hello from GCE from namespace " + volumeName,
|
||||
},
|
||||
}
|
||||
|
||||
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
testVolumeClient(cs, config, &fsGroup, tests)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -857,14 +557,14 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
|
||||
framework.KubeDescribe("ConfigMap", func() {
|
||||
It("should be mountable", func() {
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "configmap",
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "configmap",
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if clean {
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
configMap := &v1.ConfigMap{
|
||||
@ -873,7 +573,7 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.prefix + "-map",
|
||||
Name: config.Prefix + "-map",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"first": "this is the first file",
|
||||
@ -889,12 +589,12 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
}()
|
||||
|
||||
// Test one ConfigMap mounted several times to test #28502
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: config.prefix + "-map",
|
||||
Name: config.Prefix + "-map",
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
@ -904,14 +604,14 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
file: "firstfile",
|
||||
expectedContent: "this is the first file",
|
||||
File: "firstfile",
|
||||
ExpectedContent: "this is the first file",
|
||||
},
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: config.prefix + "-map",
|
||||
Name: config.Prefix + "-map",
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
@ -921,11 +621,11 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
file: "secondfile",
|
||||
expectedContent: "this is the second file",
|
||||
File: "secondfile",
|
||||
ExpectedContent: "this is the second file",
|
||||
},
|
||||
}
|
||||
testVolumeClient(cs, config, nil, tests)
|
||||
framework.TestVolumeClient(cs, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
@ -939,9 +639,9 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
var (
|
||||
volumePath string
|
||||
)
|
||||
config := VolumeTestConfig{
|
||||
namespace: namespace.Name,
|
||||
prefix: "vsphere",
|
||||
config := framework.VolumeTestConfig{
|
||||
Namespace: namespace.Name,
|
||||
Prefix: "vsphere",
|
||||
}
|
||||
By("creating a test vsphere volume")
|
||||
vsp, err := vsphere.GetVSphere()
|
||||
@ -957,29 +657,29 @@ var _ = framework.KubeDescribe("Volumes [Volume]", func() {
|
||||
defer func() {
|
||||
if clean {
|
||||
framework.Logf("Running volumeTestCleanup")
|
||||
volumeTestCleanup(f, config)
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []VolumeTest{
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
volume: v1.VolumeSource{
|
||||
Volume: v1.VolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: volumePath,
|
||||
FSType: "ext4",
|
||||
},
|
||||
},
|
||||
file: "index.html",
|
||||
File: "index.html",
|
||||
// Randomize index.html to make sure we don't see the
|
||||
// content from previous test runs.
|
||||
expectedContent: "Hello from vSphere from namespace " + namespace.Name,
|
||||
ExpectedContent: "Hello from vSphere from namespace " + namespace.Name,
|
||||
},
|
||||
}
|
||||
|
||||
injectHtml(cs, config, tests[0].volume, tests[0].expectedContent)
|
||||
framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||
|
||||
fsGroup := int64(1234)
|
||||
testVolumeClient(cs, config, &fsGroup, tests)
|
||||
framework.TestVolumeClient(cs, config, &fsGroup, tests)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user