Local persistent volume basic e2e

This commit is contained in:
Michelle Au 2017-04-24 20:41:40 -07:00
parent 61de4870de
commit 1a280993a9
4 changed files with 295 additions and 18 deletions

View File

@ -35,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/helper"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@ -73,11 +74,13 @@ type PVCMap map[types.NamespacedName]pvcval
// },
// }
type PersistentVolumeConfig struct {
PVSource v1.PersistentVolumeSource
Prebind *v1.PersistentVolumeClaim
ReclaimPolicy v1.PersistentVolumeReclaimPolicy
NamePrefix string
Labels labels.Set
PVSource v1.PersistentVolumeSource
Prebind *v1.PersistentVolumeClaim
ReclaimPolicy v1.PersistentVolumeReclaimPolicy
NamePrefix string
Labels labels.Set
StorageClassName string
NodeAffinity *v1.NodeAffinity
}
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to generate a PVC object.
@ -85,9 +88,10 @@ type PersistentVolumeConfig struct {
// (+optional) Annotations defines the PVC's annotations
type PersistentVolumeClaimConfig struct {
AccessModes []v1.PersistentVolumeAccessMode
Annotations map[string]string
Selector *metav1.LabelSelector
AccessModes []v1.PersistentVolumeAccessMode
Annotations map[string]string
Selector *metav1.LabelSelector
StorageClassName *string
}
// Clean up a pv and pvc in a single pv/pvc test case.
@ -561,7 +565,7 @@ func makePvcKey(ns, name string) types.NamespacedName {
// is assigned, assumes "Retain". Specs are expected to match the test's PVC.
// Note: the passed-in claim does not have a name until it is created and thus the PV's
// ClaimRef cannot be completely filled-in in this func. Therefore, the ClaimRef's name
// is added later in createPVCPV.
// is added later in CreatePVCPV.
func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume {
var claimRef *v1.ObjectReference
// If the reclaimPolicy is not provided, assume Retain
@ -575,7 +579,7 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
Namespace: pvConfig.Prebind.Namespace,
}
}
return &v1.PersistentVolume{
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.NamePrefix,
Labels: pvConfig.Labels,
@ -594,9 +598,16 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
v1.ReadOnlyMany,
v1.ReadWriteMany,
},
ClaimRef: claimRef,
ClaimRef: claimRef,
StorageClassName: pvConfig.StorageClassName,
},
}
err := helper.StorageNodeAffinityToAlphaAnnotation(pv.Annotations, pvConfig.NodeAffinity)
if err != nil {
Logf("Setting storage node affinity failed: %v", err)
return nil
}
return pv
}
// Returns a PVC definition based on the namespace.
@ -625,6 +636,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
StorageClassName: cfg.StorageClassName,
},
}
}

View File

@ -76,11 +76,18 @@ type VolumeTestConfig struct {
ServerImage string
// Ports to export from the server pod. TCP only.
ServerPorts []int
// Commands to run in the comtainer image.
ServerCmds []string
// Arguments to pass to the container image.
ServerArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
ServerVolumes map[string]string
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
WaitForCompletion bool
// NodeName to run pod on. Default is any node.
NodeName string
}
// VolumeTest contains a volume to mount into a client pod and its
@ -133,6 +140,11 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
By(fmt.Sprint("creating ", serverPodName, " pod"))
privileged := new(bool)
*privileged = true
restartPolicy := v1.RestartPolicyAlways
if config.WaitForCompletion {
restartPolicy = v1.RestartPolicyNever
}
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -153,12 +165,15 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Command: config.ServerCmds,
Args: config.ServerArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
Volumes: volumes,
RestartPolicy: restartPolicy,
NodeName: config.NodeName,
},
}
@ -176,12 +191,16 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
}
}
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
if pod == nil {
By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
if config.WaitForCompletion {
ExpectNoError(WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
ExpectNoError(podClient.Delete(serverPod.Name, nil))
} else {
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
if pod == nil {
By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
return pod
}

View File

@ -14,6 +14,7 @@ go_library(
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
"persistent_volumes-gce.go",
"persistent_volumes-local.go",
"persistent_volumes-vsphere.go",
"pv_reclaimpolicy.go",
"pvc_label_selector.go",

View File

@ -0,0 +1,245 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"path/filepath"
. "github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
)
type localTestConfig struct {
ns string
nodes []v1.Node
client clientset.Interface
}
type localTestVolume struct {
// Node that the volume is on
node *v1.Node
// Path to the volume on the host node
hostDir string
// Path to the volume in the local util container
containerDir string
// PVC for this volume
pvc *v1.PersistentVolumeClaim
// PV for this volume
pv *v1.PersistentVolume
}
const (
// TODO: This may not be available/writable on all images.
hostBase = "/tmp"
containerBase = "/myvol"
testFile = "test-file"
testContents = "testdata"
testSC = "local-test-storagclass"
)
var _ = framework.KubeDescribe("[Volume] PersistentVolumes-local [Feature:LocalPersistentVolumes] [Serial]", func() {
f := framework.NewDefaultFramework("persistent-local-volumes-test")
var (
config *localTestConfig
)
BeforeEach(func() {
config = &localTestConfig{
ns: f.Namespace.Name,
client: f.ClientSet,
nodes: []v1.Node{},
}
// Get all the schedulable nodes
nodes, err := config.client.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to get nodes: %v", err)
}
for _, node := range nodes.Items {
if !node.Spec.Unschedulable {
// TODO: does this need to be a deep copy
config.nodes = append(config.nodes, node)
}
}
if len(config.nodes) == 0 {
framework.Failf("No available nodes for scheduling")
}
})
Context("when one pod requests one prebound PVC", func() {
var (
testVol *localTestVolume
node *v1.Node
)
BeforeEach(func() {
// Choose the first node
node = &config.nodes[0]
})
AfterEach(func() {
cleanupLocalVolume(config, testVol)
testVol = nil
})
It("should be able to mount and read from the volume", func() {
By("Initializing test volume")
testVol = setupLocalVolume(config, node)
By("Creating local PVC and PV")
createLocalPVCPV(config, testVol)
By("Creating a pod to consume the PV")
readCmd := fmt.Sprintf("cat /mnt/volume1/%s", testFile)
podSpec := createLocalPod(config, testVol, readCmd)
f.TestContainerOutput("pod consumes PV", podSpec, 0, []string{testContents})
})
It("should be able to mount and write to the volume", func() {
By("Initializing test volume")
testVol = setupLocalVolume(config, node)
By("Creating local PVC and PV")
createLocalPVCPV(config, testVol)
By("Creating a pod to write to the PV")
testFilePath := filepath.Join("/mnt/volume1", testFile)
cmd := fmt.Sprintf("echo %s > %s; cat %s", testVol.hostDir, testFilePath, testFilePath)
podSpec := createLocalPod(config, testVol, cmd)
f.TestContainerOutput("pod writes to PV", podSpec, 0, []string{testVol.hostDir})
})
})
})
// Launches a pod with hostpath volume on a specific node to setup a directory to use
// for the local PV
func setupLocalVolume(config *localTestConfig, node *v1.Node) *localTestVolume {
testDirName := "local-volume-test-" + string(uuid.NewUUID())
testDir := filepath.Join(containerBase, testDirName)
hostDir := filepath.Join(hostBase, testDirName)
testFilePath := filepath.Join(testDir, testFile)
writeCmd := fmt.Sprintf("mkdir %s; echo %s > %s", testDir, testContents, testFilePath)
framework.Logf("Creating local volume on node %q at path %q", node.Name, hostDir)
runLocalUtil(config, node.Name, writeCmd)
return &localTestVolume{
node: node,
hostDir: hostDir,
containerDir: testDir,
}
}
// Deletes the PVC/PV, and launches a pod with hostpath volume to remove the test directory
func cleanupLocalVolume(config *localTestConfig, volume *localTestVolume) {
if volume == nil {
return
}
By("Cleaning up PVC and PV")
errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc)
if len(errs) > 0 {
framework.Logf("AfterEach: Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
}
By("Removing the test directory")
writeCmd := fmt.Sprintf("rm -r %s", volume.containerDir)
runLocalUtil(config, volume.node.Name, writeCmd)
}
func runLocalUtil(config *localTestConfig, nodeName, cmd string) {
framework.StartVolumeServer(config.client, framework.VolumeTestConfig{
Namespace: config.ns,
Prefix: "local-volume-init",
ServerImage: "gcr.io/google_containers/busybox:1.24",
ServerCmds: []string{"/bin/sh"},
ServerArgs: []string{"-c", cmd},
ServerVolumes: map[string]string{
hostBase: containerBase,
},
WaitForCompletion: true,
NodeName: nodeName,
})
}
func makeLocalPVCConfig() framework.PersistentVolumeClaimConfig {
sc := testSC
return framework.PersistentVolumeClaimConfig{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
StorageClassName: &sc,
}
}
func makeLocalPVConfig(volume *localTestVolume) framework.PersistentVolumeConfig {
// TODO: hostname may not be the best option
nodeKey := "kubernetes.io/hostname"
if volume.node.Labels == nil {
framework.Failf("Node does not have labels")
}
nodeValue, found := volume.node.Labels[nodeKey]
if !found {
framework.Failf("Node does not have required label %q", nodeKey)
}
return framework.PersistentVolumeConfig{
PVSource: v1.PersistentVolumeSource{
Local: &v1.LocalVolumeSource{
Path: volume.hostDir,
},
},
NamePrefix: "local-pv",
StorageClassName: testSC,
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: nodeKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeValue},
},
},
},
},
},
},
}
}
// Creates a PVC and PV with prebinding
func createLocalPVCPV(config *localTestConfig, volume *localTestVolume) {
pvcConfig := makeLocalPVCConfig()
pvConfig := makeLocalPVConfig(volume)
var err error
volume.pv, volume.pvc, err = framework.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, true)
framework.ExpectNoError(err)
framework.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc)
}
func createLocalPod(config *localTestConfig, volume *localTestVolume, cmd string) *v1.Pod {
return framework.MakePod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd)
}