mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Add basic local volume provisioner e2e tests
This commit is contained in:
parent
04c863cb1d
commit
59c54805ae
@ -17,14 +17,20 @@ limitations under the License.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -34,7 +40,9 @@ import (
|
||||
type localTestConfig struct {
|
||||
ns string
|
||||
nodes *v1.NodeList
|
||||
node0 *v1.Node
|
||||
client clientset.Interface
|
||||
scName string
|
||||
}
|
||||
|
||||
type localTestVolume struct {
|
||||
@ -54,15 +62,36 @@ const (
|
||||
// TODO: This may not be available/writable on all images.
|
||||
hostBase = "/tmp"
|
||||
containerBase = "/myvol"
|
||||
// 'hostBase + discoveryDir' is the path for volume discovery.
|
||||
discoveryDir = "disks"
|
||||
// Path to the first volume in the test containers
|
||||
// created via createLocalPod or makeLocalPod
|
||||
// leveraging pv_util.MakePod
|
||||
volumeDir = "/mnt/volume1"
|
||||
// testFile created in setupLocalVolume
|
||||
testFile = "test-file"
|
||||
// testFileContent writtent into testFile
|
||||
// testFileContent written into testFile
|
||||
testFileContent = "test-file-content"
|
||||
testSC = "local-test-storageclass"
|
||||
testSCPrefix = "local-volume-test-storageclass"
|
||||
|
||||
// Following are constants used for provisioner e2e tests.
|
||||
//
|
||||
// testServiceAccount is the service account for bootstrapper
|
||||
testServiceAccount = "local-storage-bootstrapper"
|
||||
// testRoleBinding is the cluster-admin rolebinding for bootstrapper
|
||||
testRoleBinding = "local-storage:bootstrapper"
|
||||
// volumeConfigName is the configmap passed to bootstrapper and provisioner
|
||||
volumeConfigName = "local-volume-config"
|
||||
// bootstrapper and provisioner images used for e2e tests
|
||||
bootstrapperImageName = "quay.io/external_storage/local-volume-provisioner-bootstrap:v1.0.0"
|
||||
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v1.0.0"
|
||||
// provisioner daemonSetName name, must match the one defined in bootstrapper
|
||||
daemonSetName = "local-volume-provisioner"
|
||||
// provisioner node/pv cluster role binding, must match the one defined in bootstrapper
|
||||
nodeBindingName = "local-storage:provisioner-node-binding"
|
||||
pvBindingName = "local-storage:provisioner-pv-binding"
|
||||
// A sample request size
|
||||
testRequestSize = "10Mi"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [Serial]", func() {
|
||||
@ -71,22 +100,24 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S
|
||||
var (
|
||||
config *localTestConfig
|
||||
node0 *v1.Node
|
||||
scName string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
|
||||
// Get all the schedulable nodes
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
|
||||
scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name)
|
||||
// Choose the first node
|
||||
node0 = &config.nodes.Items[0]
|
||||
|
||||
config = &localTestConfig{
|
||||
ns: f.Namespace.Name,
|
||||
client: f.ClientSet,
|
||||
nodes: nodes,
|
||||
node0: node0,
|
||||
scName: scName,
|
||||
}
|
||||
|
||||
// Choose the first node
|
||||
node0 = &config.nodes.Items[0]
|
||||
})
|
||||
|
||||
Context("when one pod requests one prebound PVC", func() {
|
||||
@ -128,7 +159,7 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S
|
||||
Expect(pod1NodeName).To(Equal(node0.Name))
|
||||
|
||||
By("Reading in pod1")
|
||||
//testFileContent was written during setupLocalVolume
|
||||
// testFileContent was written during setupLocalVolume
|
||||
_, readCmd := createWriteAndReadCmds(volumeDir, testFile, "" /*writeTestFileContent*/)
|
||||
readOut := podRWCmdExec(pod1, readCmd)
|
||||
Expect(readOut).To(ContainSubstring(testFileContent)) /*aka writeTestFileContents*/
|
||||
@ -266,6 +297,60 @@ var _ = SIGDescribe("PersistentVolumes-local [Feature:LocalPersistentVolumes] [S
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
})
|
||||
})
|
||||
|
||||
Context("when using local volume provisioner", func() {
|
||||
var (
|
||||
volumePath string
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
setupLocalVolumeProvisioner(config)
|
||||
volumePath = path.Join(
|
||||
hostBase, discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cleanupLocalVolumeProvisioner(config, volumePath)
|
||||
})
|
||||
|
||||
It("should create and recreate local persistent volume", func() {
|
||||
By("Creating bootstrapper pod to start provisioner daemonset")
|
||||
createBootstrapperPod(config)
|
||||
kind := schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
|
||||
framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind)
|
||||
|
||||
By("Creating a directory under discovery path")
|
||||
framework.Logf("creating local volume under path %q", volumePath)
|
||||
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath)
|
||||
_, err := framework.NodeExec(node0.Name, mkdirCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
oldPV, err := waitForLocalPersistentVolume(config.client, volumePath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create a persistent volume claim for local volume: the above volume will be bound.
|
||||
By("Creating a persistent volume claim")
|
||||
claim, err := config.client.Core().PersistentVolumeClaims(config.ns).Create(newLocalClaim(config))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(
|
||||
v1.ClaimBound, config.client, claim.Namespace, claim.Name, framework.Poll, 1*time.Minute)
|
||||
Expect(claim.Spec.VolumeName).To(Equal(oldPV.Name))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Delete the persistent volume claim: file will be cleaned up and volume be re-created.
|
||||
By("Deleting the persistent volume claim to clean up persistent volume and re-create one")
|
||||
writeCmd, readCmd := createWriteAndReadCmds(volumePath, testFile, testFileContent)
|
||||
_, err = framework.NodeExec(node0.Name, writeCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = config.client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
newPV, err := waitForLocalPersistentVolume(config.client, volumePath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newPV.UID).NotTo(Equal(oldPV.UID))
|
||||
result, err := framework.NodeExec(node0.Name, readCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(result.Code).NotTo(BeZero(), "file should be deleted across local pv recreation, but exists")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// podNode wraps RunKubectl to get node where pod is running
|
||||
@ -274,19 +359,19 @@ func podNodeName(config *localTestConfig, pod *v1.Pod) (string, error) {
|
||||
return runtimePod.Spec.NodeName, runtimePodErr
|
||||
}
|
||||
|
||||
// Launches a pod with hostpath volume on a specific node to setup a directory to use
|
||||
// for the local PV
|
||||
func setupLocalVolume(config *localTestConfig, node *v1.Node) *localTestVolume {
|
||||
// setupLocalVolume setups a directory to user for local PV
|
||||
func setupLocalVolume(config *localTestConfig) *localTestVolume {
|
||||
testDirName := "local-volume-test-" + string(uuid.NewUUID())
|
||||
testDir := filepath.Join(containerBase, testDirName)
|
||||
hostDir := filepath.Join(hostBase, testDirName)
|
||||
//populate volume with testFile containing testFileContent
|
||||
// populate volume with testFile containing testFileContent
|
||||
writeCmd, _ := createWriteAndReadCmds(testDir, testFile, testFileContent)
|
||||
By(fmt.Sprintf("Creating local volume on node %q at path %q", node.Name, hostDir))
|
||||
By(fmt.Sprintf("Creating local volume on node %q at path %q", config.node0.Name, hostDir))
|
||||
|
||||
runLocalUtil(config, node.Name, writeCmd)
|
||||
_, err := framework.NodeExec(config.node0.Name, writeCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return &localTestVolume{
|
||||
node: node,
|
||||
node: config.node0,
|
||||
hostDir: hostDir,
|
||||
containerDir: testDir,
|
||||
}
|
||||
@ -306,33 +391,18 @@ func cleanupLocalVolume(config *localTestConfig, volume *localTestVolume) {
|
||||
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", volume.containerDir)
|
||||
runLocalUtil(config, volume.node.Name, removeCmd)
|
||||
_, err := framework.NodeExec(config.node0.Name, removeCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func runLocalUtil(config *localTestConfig, nodeName, cmd string) {
|
||||
framework.StartVolumeServer(config.client, framework.VolumeTestConfig{
|
||||
Namespace: config.ns,
|
||||
Prefix: "local-volume-init",
|
||||
ServerImage: framework.BusyBoxImage,
|
||||
ServerCmds: []string{"/bin/sh"},
|
||||
ServerArgs: []string{"-c", cmd},
|
||||
ServerVolumes: map[string]string{
|
||||
hostBase: containerBase,
|
||||
},
|
||||
WaitForCompletion: true,
|
||||
NodeName: nodeName,
|
||||
})
|
||||
}
|
||||
|
||||
func makeLocalPVCConfig() framework.PersistentVolumeClaimConfig {
|
||||
sc := testSC
|
||||
func makeLocalPVCConfig(config *localTestConfig) framework.PersistentVolumeClaimConfig {
|
||||
return framework.PersistentVolumeClaimConfig{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
StorageClassName: &sc,
|
||||
StorageClassName: &config.scName,
|
||||
}
|
||||
}
|
||||
|
||||
func makeLocalPVConfig(volume *localTestVolume) framework.PersistentVolumeConfig {
|
||||
func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framework.PersistentVolumeConfig {
|
||||
// TODO: hostname may not be the best option
|
||||
nodeKey := "kubernetes.io/hostname"
|
||||
if volume.node.Labels == nil {
|
||||
@ -350,7 +420,7 @@ func makeLocalPVConfig(volume *localTestVolume) framework.PersistentVolumeConfig
|
||||
},
|
||||
},
|
||||
NamePrefix: "local-pv",
|
||||
StorageClassName: testSC,
|
||||
StorageClassName: config.scName,
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
@ -371,8 +441,8 @@ func makeLocalPVConfig(volume *localTestVolume) framework.PersistentVolumeConfig
|
||||
|
||||
// Creates a PVC and PV with prebinding
|
||||
func createLocalPVCPV(config *localTestConfig, volume *localTestVolume) {
|
||||
pvcConfig := makeLocalPVCConfig()
|
||||
pvConfig := makeLocalPVConfig(volume)
|
||||
pvcConfig := makeLocalPVCConfig(config)
|
||||
pvConfig := makeLocalPVConfig(config, volume)
|
||||
|
||||
var err error
|
||||
volume.pv, volume.pvc, err = framework.CreatePVPVC(config.client, pvConfig, pvcConfig, config.ns, true)
|
||||
@ -409,10 +479,216 @@ func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
||||
// and create local PVC and PV
|
||||
func setupLocalVolumePVCPV(config *localTestConfig, node *v1.Node) *localTestVolume {
|
||||
By("Initializing test volume")
|
||||
testVol := setupLocalVolume(config, node)
|
||||
testVol := setupLocalVolume(config)
|
||||
|
||||
By("Creating local PVC and PV")
|
||||
createLocalPVCPV(config, testVol)
|
||||
|
||||
return testVol
|
||||
}
|
||||
|
||||
func setupLocalVolumeProvisioner(config *localTestConfig) {
|
||||
By("Bootstrapping local volume provisioner")
|
||||
createServiceAccount(config)
|
||||
createClusterRoleBinding(config)
|
||||
createVolumeConfigMap(config)
|
||||
|
||||
By("Initializing local volume discovery base path")
|
||||
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", path.Join(hostBase, discoveryDir))
|
||||
_, err := framework.NodeExec(config.node0.Name, mkdirCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func cleanupLocalVolumeProvisioner(config *localTestConfig, volumePath string) {
|
||||
By("Cleaning up cluster role binding")
|
||||
deleteClusterRoleBinding(config)
|
||||
|
||||
By("Removing the test directory")
|
||||
removeCmd := fmt.Sprintf("rm -r %s", path.Join(hostBase, discoveryDir))
|
||||
_, err := framework.NodeExec(config.node0.Name, removeCmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleaning up persistent volume")
|
||||
pv, err := findLocalPersistentVolume(config.client, volumePath)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = config.client.Core().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createServiceAccount(config *localTestConfig) {
|
||||
serviceAccount := v1.ServiceAccount{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ServiceAccount"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testServiceAccount, Namespace: config.ns},
|
||||
}
|
||||
_, err := config.client.CoreV1().ServiceAccounts(config.ns).Create(&serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createClusterRoleBinding(config *localTestConfig) {
|
||||
subjects := []rbacv1beta1.Subject{
|
||||
{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Name: testServiceAccount,
|
||||
Namespace: config.ns,
|
||||
},
|
||||
}
|
||||
|
||||
binding := rbacv1beta1.ClusterRoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
Kind: "ClusterRoleBinding",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testRoleBinding,
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "cluster-admin",
|
||||
},
|
||||
Subjects: subjects,
|
||||
}
|
||||
|
||||
_, err := config.client.RbacV1beta1().ClusterRoleBindings().Create(&binding)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func deleteClusterRoleBinding(config *localTestConfig) {
|
||||
err := config.client.RbacV1beta1().ClusterRoleBindings().Delete(testRoleBinding, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// These role bindings are created in provisioner; we just ensure it's
|
||||
// deleted and do not panic on error.
|
||||
config.client.RbacV1beta1().ClusterRoleBindings().Delete(nodeBindingName, metav1.NewDeleteOptions(0))
|
||||
config.client.RbacV1beta1().ClusterRoleBindings().Delete(pvBindingName, metav1.NewDeleteOptions(0))
|
||||
}
|
||||
|
||||
func createVolumeConfigMap(config *localTestConfig) {
|
||||
mountConfig := struct {
|
||||
HostDir string `json:"hostDir"`
|
||||
}{
|
||||
HostDir: path.Join(hostBase, discoveryDir),
|
||||
}
|
||||
data, err := json.Marshal(&mountConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
configMap := v1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: volumeConfigName,
|
||||
Namespace: config.ns,
|
||||
},
|
||||
Data: map[string]string{
|
||||
config.scName: string(data),
|
||||
},
|
||||
}
|
||||
_, err = config.client.CoreV1().ConfigMaps(config.ns).Create(&configMap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createBootstrapperPod(config *localTestConfig) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "local-volume-tester-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
ServiceAccountName: testServiceAccount,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "volume-tester",
|
||||
Image: bootstrapperImageName,
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "MY_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Args: []string{
|
||||
fmt.Sprintf("--image=%v", provisionerImageName),
|
||||
fmt.Sprintf("--volume-config=%v", volumeConfigName),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForPodSuccessInNamespace(config.client, pod.Name, pod.Namespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// newLocalClaim creates a new persistent volume claim.
|
||||
func newLocalClaim(config *localTestConfig) *v1.PersistentVolumeClaim {
|
||||
claim := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "local-pvc-",
|
||||
Namespace: config.ns,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: &config.scName,
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(testRequestSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &claim
|
||||
}
|
||||
|
||||
// waitForLocalPersistentVolume waits a local persistent volume with 'volumePath' to be available.
|
||||
func waitForLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
|
||||
var pv *v1.PersistentVolume
|
||||
for start := time.Now(); time.Since(start) < 10*time.Minute && pv == nil; time.Sleep(5 * time.Second) {
|
||||
pvs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pvs.Items) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, p := range pvs.Items {
|
||||
if p.Spec.PersistentVolumeSource.Local == nil || p.Spec.PersistentVolumeSource.Local.Path != volumePath {
|
||||
continue
|
||||
}
|
||||
if p.Status.Phase != v1.VolumeAvailable {
|
||||
continue
|
||||
}
|
||||
pv = &p
|
||||
break
|
||||
}
|
||||
}
|
||||
if pv == nil {
|
||||
return nil, fmt.Errorf("Timeout while waiting for local persistent volume with path %v to be available", volumePath)
|
||||
}
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
// findLocalPersistentVolume finds persistent volume with 'spec.local.path' equals 'volumePath'.
|
||||
func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
|
||||
pvs, err := c.Core().PersistentVolumes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range pvs.Items {
|
||||
if p.Spec.PersistentVolumeSource.Local != nil && p.Spec.PersistentVolumeSource.Local.Path == volumePath {
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to find local persistent volume with path %v", volumePath)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user