mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #72617 from cofyc/clean_local_volume_provisioner_e2e
Clean up local volum provisioner e2e tests
This commit is contained in:
commit
7376001870
@ -60,6 +60,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||||
@ -82,7 +83,6 @@ go_library(
|
|||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -27,19 +26,18 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"sigs.k8s.io/yaml"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
@ -137,24 +135,6 @@ const (
|
|||||||
testFileContent = "test-file-content"
|
testFileContent = "test-file-content"
|
||||||
testSCPrefix = "local-volume-test-storageclass"
|
testSCPrefix = "local-volume-test-storageclass"
|
||||||
|
|
||||||
// Following are constants used for provisioner e2e tests.
|
|
||||||
//
|
|
||||||
// testServiceAccount is the service account for bootstrapper
|
|
||||||
testServiceAccount = "local-storage-admin"
|
|
||||||
// volumeConfigName is the configmap passed to bootstrapper and provisioner
|
|
||||||
volumeConfigName = "local-volume-config"
|
|
||||||
// provisioner image used for e2e tests
|
|
||||||
provisionerImageName = "quay.io/external_storage/local-volume-provisioner:v2.1.0"
|
|
||||||
// provisioner daemonSetName name
|
|
||||||
daemonSetName = "local-volume-provisioner"
|
|
||||||
// provisioner default mount point folder
|
|
||||||
provisionerDefaultMountRoot = "/mnt/local-storage"
|
|
||||||
// provisioner node/pv cluster role binding
|
|
||||||
nodeBindingName = "local-storage:provisioner-node-binding"
|
|
||||||
pvBindingName = "local-storage:provisioner-pv-binding"
|
|
||||||
systemRoleNode = "system:node"
|
|
||||||
systemRolePVProvisioner = "system:persistent-volume-provisioner"
|
|
||||||
|
|
||||||
// A sample request size
|
// A sample request size
|
||||||
testRequestSize = "10Mi"
|
testRequestSize = "10Mi"
|
||||||
|
|
||||||
@ -398,107 +378,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Local volume provisioner [Serial]", func() {
|
|
||||||
var volumePath string
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
setupStorageClass(config, &immediateMode)
|
|
||||||
setupLocalVolumeProvisioner(config)
|
|
||||||
volumePath = path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
|
|
||||||
setupLocalVolumeProvisionerMountPoint(config, volumePath, config.node0)
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
cleanupLocalVolumeProvisionerMountPoint(config, volumePath, config.node0)
|
|
||||||
cleanupLocalVolumeProvisioner(config)
|
|
||||||
cleanupStorageClass(config)
|
|
||||||
})
|
|
||||||
|
|
||||||
It("should create and recreate local persistent volume", func() {
|
|
||||||
By("Starting a provisioner daemonset")
|
|
||||||
createProvisionerDaemonset(config)
|
|
||||||
|
|
||||||
By("Waiting for a PersistentVolume to be created")
|
|
||||||
oldPV, err := waitForLocalPersistentVolume(config.client, volumePath)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Create a persistent volume claim for local volume: the above volume will be bound.
|
|
||||||
By("Creating a persistent volume claim")
|
|
||||||
claim, err := config.client.CoreV1().PersistentVolumeClaims(config.ns).Create(newLocalClaim(config))
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(
|
|
||||||
v1.ClaimBound, config.client, claim.Namespace, claim.Name, framework.Poll, 1*time.Minute)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
claim, err = config.client.CoreV1().PersistentVolumeClaims(config.ns).Get(claim.Name, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(claim.Spec.VolumeName).To(Equal(oldPV.Name))
|
|
||||||
|
|
||||||
// Delete the persistent volume claim: file will be cleaned up and volume be re-created.
|
|
||||||
By("Deleting the persistent volume claim to clean up persistent volume and re-create one")
|
|
||||||
writeCmd := createWriteCmd(volumePath, testFile, testFileContent, DirectoryLocalVolumeType)
|
|
||||||
err = issueNodeCommand(config, writeCmd, config.node0)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
err = config.client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, &metav1.DeleteOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Waiting for a new PersistentVolume to be re-created")
|
|
||||||
newPV, err := waitForLocalPersistentVolume(config.client, volumePath)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
Expect(newPV.UID).NotTo(Equal(oldPV.UID))
|
|
||||||
fileDoesntExistCmd := createFileDoesntExistCmd(volumePath, testFile)
|
|
||||||
err = issueNodeCommand(config, fileDoesntExistCmd, config.node0)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Deleting provisioner daemonset")
|
|
||||||
deleteProvisionerDaemonset(config)
|
|
||||||
})
|
|
||||||
It("should not create local persistent volume for filesystem volume that was not bind mounted", func() {
|
|
||||||
|
|
||||||
directoryPath := filepath.Join(config.discoveryDir, "notbindmount")
|
|
||||||
By("Creating a directory, not bind mounted, in discovery directory")
|
|
||||||
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", directoryPath)
|
|
||||||
err := issueNodeCommand(config, mkdirCmd, config.node0)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Starting a provisioner daemonset")
|
|
||||||
createProvisionerDaemonset(config)
|
|
||||||
|
|
||||||
By("Allowing provisioner to run for 30s and discover potential local PVs")
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
|
|
||||||
By("Examining provisioner logs for not an actual mountpoint message")
|
|
||||||
provisionerPodName := findProvisionerDaemonsetPodName(config)
|
|
||||||
logs, err := framework.GetPodLogs(config.client, config.ns, provisionerPodName, "" /*containerName*/)
|
|
||||||
Expect(err).NotTo(HaveOccurred(),
|
|
||||||
"Error getting logs from pod %s in namespace %s", provisionerPodName, config.ns)
|
|
||||||
|
|
||||||
expectedLogMessage := "Path \"/mnt/local-storage/notbindmount\" is not an actual mountpoint"
|
|
||||||
Expect(strings.Contains(logs, expectedLogMessage)).To(BeTrue())
|
|
||||||
|
|
||||||
By("Deleting provisioner daemonset")
|
|
||||||
deleteProvisionerDaemonset(config)
|
|
||||||
})
|
|
||||||
It("should discover dynamically created local persistent volume mountpoint in discovery directory", func() {
|
|
||||||
By("Starting a provisioner daemonset")
|
|
||||||
createProvisionerDaemonset(config)
|
|
||||||
|
|
||||||
By("Creating a volume in discovery directory")
|
|
||||||
dynamicVolumePath := path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
|
|
||||||
setupLocalVolumeProvisionerMountPoint(config, dynamicVolumePath, config.node0)
|
|
||||||
|
|
||||||
By("Waiting for the PersistentVolume to be created")
|
|
||||||
_, err := waitForLocalPersistentVolume(config.client, dynamicVolumePath)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Deleting provisioner daemonset")
|
|
||||||
deleteProvisionerDaemonset(config)
|
|
||||||
|
|
||||||
By("Deleting volume in discovery directory")
|
|
||||||
cleanupLocalVolumeProvisionerMountPoint(config, dynamicVolumePath, config.node0)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
Context("StatefulSet with pod affinity [Slow]", func() {
|
Context("StatefulSet with pod affinity [Slow]", func() {
|
||||||
var testVols map[string][]*localTestVolume
|
var testVols map[string][]*localTestVolume
|
||||||
const (
|
const (
|
||||||
@ -556,8 +435,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Stress with local volume provisioner [Serial]", func() {
|
Context("Stress with local volumes [Serial]", func() {
|
||||||
var testVols [][]string
|
var (
|
||||||
|
allLocalVolumes = make(map[string][]*localTestVolume)
|
||||||
|
volType = TmpfsLocalVolumeType
|
||||||
|
stopCh = make(chan struct{})
|
||||||
|
wg sync.WaitGroup
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
volsPerNode = 10 // Make this non-divisable by volsPerPod to increase changes of partial binding failure
|
volsPerNode = 10 // Make this non-divisable by volsPerPod to increase changes of partial binding failure
|
||||||
@ -567,38 +451,79 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
setupStorageClass(config, &waitMode)
|
setupStorageClass(config, &waitMode)
|
||||||
setupLocalVolumeProvisioner(config)
|
for _, node := range config.nodes {
|
||||||
|
By(fmt.Sprintf("Setting up %d local volumes on node %q", volsPerNode, node.Name))
|
||||||
testVols = [][]string{}
|
allLocalVolumes[node.Name] = setupLocalVolumes(config, volType, &node, volsPerNode)
|
||||||
for i, node := range config.nodes {
|
|
||||||
By(fmt.Sprintf("Setting up local volumes on node %q", node.Name))
|
|
||||||
paths := []string{}
|
|
||||||
for j := 0; j < volsPerNode; j++ {
|
|
||||||
volumePath := path.Join(config.discoveryDir, fmt.Sprintf("vol-%v", string(uuid.NewUUID())))
|
|
||||||
setupLocalVolumeProvisionerMountPoint(config, volumePath, &config.nodes[i])
|
|
||||||
paths = append(paths, volumePath)
|
|
||||||
}
|
}
|
||||||
testVols = append(testVols, paths)
|
By(fmt.Sprintf("Create %d PVs", volsPerNode*len(config.nodes)))
|
||||||
|
var err error
|
||||||
|
for _, localVolumes := range allLocalVolumes {
|
||||||
|
for _, localVolume := range localVolumes {
|
||||||
|
pvConfig := makeLocalPVConfig(config, localVolume)
|
||||||
|
localVolume.pv, err = framework.CreatePV(config.client, framework.MakePersistentVolume(pvConfig))
|
||||||
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
By("Starting the local volume provisioner")
|
By("Start a goroutine to recycle unbound PVs")
|
||||||
createProvisionerDaemonset(config)
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
w, err := config.client.CoreV1().PersistentVolumes().Watch(metav1.ListOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
if w == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer w.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event := <-w.ResultChan():
|
||||||
|
if event.Type != watch.Modified {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pv, ok := event.Object.(*v1.PersistentVolume)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pv.Status.Phase == v1.VolumeBound || pv.Status.Phase == v1.VolumeAvailable {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pv, err = config.client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Delete and create a new PV for same local volume storage
|
||||||
|
By(fmt.Sprintf("Delete %q and create a new PV for same local volume storage", pv.Name))
|
||||||
|
for _, localVolumes := range allLocalVolumes {
|
||||||
|
for _, localVolume := range localVolumes {
|
||||||
|
if localVolume.pv.Name != pv.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
err = config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
pvConfig := makeLocalPVConfig(config, localVolume)
|
||||||
|
localVolume.pv, err = framework.CreatePV(config.client, framework.MakePersistentVolume(pvConfig))
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-stopCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
By("Deleting provisioner daemonset")
|
for nodeName, localVolumes := range allLocalVolumes {
|
||||||
deleteProvisionerDaemonset(config)
|
By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName))
|
||||||
|
cleanupLocalVolumes(config, localVolumes)
|
||||||
for i, paths := range testVols {
|
|
||||||
for _, volumePath := range paths {
|
|
||||||
cleanupLocalVolumeProvisionerMountPoint(config, volumePath, &config.nodes[i])
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
cleanupLocalVolumeProvisioner(config)
|
|
||||||
cleanupStorageClass(config)
|
cleanupStorageClass(config)
|
||||||
|
By("Wait for recycle goroutine to finish")
|
||||||
|
close(stopCh)
|
||||||
|
wg.Wait()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should use be able to process many pods and reuse local volumes", func() {
|
It("should be able to process many pods and reuse local volumes", func() {
|
||||||
var (
|
var (
|
||||||
podsLock sync.Mutex
|
podsLock sync.Mutex
|
||||||
// Have one extra pod pending
|
// Have one extra pod pending
|
||||||
@ -611,7 +536,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
|
|
||||||
// Create pods gradually instead of all at once because scheduler has
|
// Create pods gradually instead of all at once because scheduler has
|
||||||
// exponential backoff
|
// exponential backoff
|
||||||
// TODO: this is still a bit slow because of the provisioner polling period
|
|
||||||
By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods))
|
By(fmt.Sprintf("Creating %v pods periodically", numConcurrentPods))
|
||||||
stop := make(chan struct{})
|
stop := make(chan struct{})
|
||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
@ -631,7 +555,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
|||||||
for i := 0; i < numConcurrentPods; i++ {
|
for i := 0; i < numConcurrentPods; i++ {
|
||||||
pvcs := []*v1.PersistentVolumeClaim{}
|
pvcs := []*v1.PersistentVolumeClaim{}
|
||||||
for j := 0; j < volsPerPod; j++ {
|
for j := 0; j < volsPerPod; j++ {
|
||||||
pvc := framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
|
pvc := framework.MakePersistentVolumeClaim(makeLocalPVCConfig(config, volType), config.ns)
|
||||||
pvc, err := framework.CreatePVC(config.client, config.ns, pvc)
|
pvc, err := framework.CreatePVC(config.client, config.ns, pvc)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
pvcs = append(pvcs, pvc)
|
pvcs = append(pvcs, pvc)
|
||||||
@ -1226,18 +1150,6 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeLocalPod(config *localTestConfig, volume *localTestVolume, cmd string) *v1.Pod {
|
|
||||||
pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, cmd, false, false, selinuxLabel, nil)
|
|
||||||
if pod == nil {
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
if volume.localVolumeType == BlockLocalVolumeType {
|
|
||||||
// Block e2e tests require utilities for writing to block devices (e.g. dd), and nginx has this utilities.
|
|
||||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
|
|
||||||
}
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
|
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
|
||||||
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
|
pod = framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, nil)
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
@ -1285,16 +1197,6 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// createSecPod should be used when Pod requires non default SELinux labels
|
|
||||||
func createSecPod(config *localTestConfig, volume *localTestVolume, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
|
|
||||||
pod, err := framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", hostIPC, hostPID, seLinuxLabel, nil, framework.PodStartShortTimeout)
|
|
||||||
podNodeName, podNodeNameErr := podNodeName(config, pod)
|
|
||||||
Expect(podNodeNameErr).NotTo(HaveOccurred())
|
|
||||||
framework.Logf("Security Context POD %q created on Node %q", pod.Name, podNodeName)
|
|
||||||
Expect(podNodeName).To(Equal(config.node0.Name))
|
|
||||||
return pod, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
|
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
|
||||||
By("Creating a pod")
|
By("Creating a pod")
|
||||||
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
|
return framework.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout)
|
||||||
@ -1368,13 +1270,6 @@ func testReadFileContent(testFileDir string, testFile string, testFileContent st
|
|||||||
Expect(readOut).To(ContainSubstring(testFileContent))
|
Expect(readOut).To(ContainSubstring(testFileContent))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create command to verify that the file doesn't exist
|
|
||||||
// to be executed via hostexec Pod on the node with the local PV
|
|
||||||
func createFileDoesntExistCmd(testFileDir string, testFile string) string {
|
|
||||||
testFilePath := filepath.Join(testFileDir, testFile)
|
|
||||||
return fmt.Sprintf("[ ! -e %s ]", testFilePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute a read or write command in a pod.
|
// Execute a read or write command in a pod.
|
||||||
// Fail on error
|
// Fail on error
|
||||||
func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
func podRWCmdExec(pod *v1.Pod, cmd string) string {
|
||||||
@ -1402,332 +1297,6 @@ func setupLocalVolumesPVCsPVs(
|
|||||||
return testVols
|
return testVols
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupLocalVolumeProvisioner(config *localTestConfig) {
|
|
||||||
By("Bootstrapping local volume provisioner")
|
|
||||||
createServiceAccount(config)
|
|
||||||
createProvisionerClusterRoleBinding(config)
|
|
||||||
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, false /* teardown */, []string{testServiceAccount})
|
|
||||||
createVolumeConfigMap(config)
|
|
||||||
|
|
||||||
for _, node := range config.nodes {
|
|
||||||
By(fmt.Sprintf("Initializing local volume discovery base path on node %v", node.Name))
|
|
||||||
mkdirCmd := fmt.Sprintf("mkdir -p %v -m 777", config.discoveryDir)
|
|
||||||
err := issueNodeCommand(config, mkdirCmd, &node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanupLocalVolumeProvisioner(config *localTestConfig) {
|
|
||||||
By("Cleaning up cluster role binding")
|
|
||||||
deleteClusterRoleBinding(config)
|
|
||||||
utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, true /* teardown */, []string{testServiceAccount})
|
|
||||||
|
|
||||||
for _, node := range config.nodes {
|
|
||||||
By(fmt.Sprintf("Removing the test discovery directory on node %v", node.Name))
|
|
||||||
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", config.discoveryDir, config.discoveryDir)
|
|
||||||
err := issueNodeCommand(config, removeCmd, &node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
|
|
||||||
By(fmt.Sprintf("Creating local directory at path %q", volumePath))
|
|
||||||
mkdirCmd := fmt.Sprintf("mkdir %v -m 777", volumePath)
|
|
||||||
err := issueNodeCommand(config, mkdirCmd, node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By(fmt.Sprintf("Mounting local directory at path %q", volumePath))
|
|
||||||
mntCmd := fmt.Sprintf("sudo mount --bind %v %v", volumePath, volumePath)
|
|
||||||
err = issueNodeCommand(config, mntCmd, node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanupLocalVolumeProvisionerMountPoint(config *localTestConfig, volumePath string, node *v1.Node) {
|
|
||||||
By(fmt.Sprintf("Unmounting the test mount point from %q", volumePath))
|
|
||||||
umountCmd := fmt.Sprintf("[ ! -e %v ] || sudo umount %v", volumePath, volumePath)
|
|
||||||
err := issueNodeCommand(config, umountCmd, node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Removing the test mount point")
|
|
||||||
removeCmd := fmt.Sprintf("[ ! -e %v ] || rm -r %v", volumePath, volumePath)
|
|
||||||
err = issueNodeCommand(config, removeCmd, node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Cleaning up persistent volume")
|
|
||||||
pv, err := findLocalPersistentVolume(config.client, volumePath)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
if pv != nil {
|
|
||||||
err = config.client.CoreV1().PersistentVolumes().Delete(pv.Name, &metav1.DeleteOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createServiceAccount(config *localTestConfig) {
|
|
||||||
serviceAccount := v1.ServiceAccount{
|
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ServiceAccount"},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: testServiceAccount, Namespace: config.ns},
|
|
||||||
}
|
|
||||||
_, err := config.client.CoreV1().ServiceAccounts(config.ns).Create(&serviceAccount)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// createProvisionerClusterRoleBinding creates two cluster role bindings for local volume provisioner's
|
|
||||||
// service account: systemRoleNode and systemRolePVProvisioner. These are required for
|
|
||||||
// provisioner to get node information and create persistent volumes.
|
|
||||||
func createProvisionerClusterRoleBinding(config *localTestConfig) {
|
|
||||||
subjects := []rbacv1beta1.Subject{
|
|
||||||
{
|
|
||||||
Kind: rbacv1beta1.ServiceAccountKind,
|
|
||||||
Name: testServiceAccount,
|
|
||||||
Namespace: config.ns,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pvBinding := rbacv1beta1.ClusterRoleBinding{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
|
||||||
Kind: "ClusterRoleBinding",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: pvBindingName,
|
|
||||||
},
|
|
||||||
RoleRef: rbacv1beta1.RoleRef{
|
|
||||||
APIGroup: "rbac.authorization.k8s.io",
|
|
||||||
Kind: "ClusterRole",
|
|
||||||
Name: systemRolePVProvisioner,
|
|
||||||
},
|
|
||||||
Subjects: subjects,
|
|
||||||
}
|
|
||||||
nodeBinding := rbacv1beta1.ClusterRoleBinding{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
APIVersion: "rbac.authorization.k8s.io/v1beta1",
|
|
||||||
Kind: "ClusterRoleBinding",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: nodeBindingName,
|
|
||||||
},
|
|
||||||
RoleRef: rbacv1beta1.RoleRef{
|
|
||||||
APIGroup: "rbac.authorization.k8s.io",
|
|
||||||
Kind: "ClusterRole",
|
|
||||||
Name: systemRoleNode,
|
|
||||||
},
|
|
||||||
Subjects: subjects,
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteClusterRoleBinding(config)
|
|
||||||
_, err := config.client.RbacV1beta1().ClusterRoleBindings().Create(&pvBinding)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
_, err = config.client.RbacV1beta1().ClusterRoleBindings().Create(&nodeBinding)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteClusterRoleBinding(config *localTestConfig) {
|
|
||||||
// These role bindings are created in provisioner; we just ensure it's
|
|
||||||
// deleted and do not panic on error.
|
|
||||||
config.client.RbacV1beta1().ClusterRoleBindings().Delete(nodeBindingName, metav1.NewDeleteOptions(0))
|
|
||||||
config.client.RbacV1beta1().ClusterRoleBindings().Delete(pvBindingName, metav1.NewDeleteOptions(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func createVolumeConfigMap(config *localTestConfig) {
|
|
||||||
// MountConfig and ProvisionerConfiguration from
|
|
||||||
// https://github.com/kubernetes-incubator/external-storage/blob/master/local-volume/provisioner/pkg/common/common.go
|
|
||||||
type MountConfig struct {
|
|
||||||
// The hostpath directory
|
|
||||||
HostDir string `json:"hostDir" yaml:"hostDir"`
|
|
||||||
MountDir string `json:"mountDir" yaml:"mountDir"`
|
|
||||||
}
|
|
||||||
type ProvisionerConfiguration struct {
|
|
||||||
// StorageClassConfig defines configuration of Provisioner's storage classes
|
|
||||||
StorageClassConfig map[string]MountConfig `json:"storageClassMap" yaml:"storageClassMap"`
|
|
||||||
}
|
|
||||||
var provisionerConfig ProvisionerConfiguration
|
|
||||||
provisionerConfig.StorageClassConfig = map[string]MountConfig{
|
|
||||||
config.scName: {
|
|
||||||
HostDir: config.discoveryDir,
|
|
||||||
MountDir: provisionerDefaultMountRoot,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := yaml.Marshal(&provisionerConfig.StorageClassConfig)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
configMap := v1.ConfigMap{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
APIVersion: "v1",
|
|
||||||
Kind: "ConfigMap",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: volumeConfigName,
|
|
||||||
Namespace: config.ns,
|
|
||||||
},
|
|
||||||
Data: map[string]string{
|
|
||||||
"storageClassMap": string(data),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = config.client.CoreV1().ConfigMaps(config.ns).Create(&configMap)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func createProvisionerDaemonset(config *localTestConfig) {
|
|
||||||
provisionerPrivileged := true
|
|
||||||
mountProp := v1.MountPropagationHostToContainer
|
|
||||||
|
|
||||||
provisioner := &appsv1.DaemonSet{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
Kind: "DaemonSet",
|
|
||||||
APIVersion: "apps/v1",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: daemonSetName,
|
|
||||||
},
|
|
||||||
Spec: appsv1.DaemonSetSpec{
|
|
||||||
Selector: &metav1.LabelSelector{
|
|
||||||
MatchLabels: map[string]string{"app": "local-volume-provisioner"},
|
|
||||||
},
|
|
||||||
Template: v1.PodTemplateSpec{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Labels: map[string]string{"app": "local-volume-provisioner"},
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
ServiceAccountName: testServiceAccount,
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "provisioner",
|
|
||||||
Image: provisionerImageName,
|
|
||||||
ImagePullPolicy: "Always",
|
|
||||||
SecurityContext: &v1.SecurityContext{
|
|
||||||
Privileged: &provisionerPrivileged,
|
|
||||||
},
|
|
||||||
Env: []v1.EnvVar{
|
|
||||||
{
|
|
||||||
Name: "MY_NODE_NAME",
|
|
||||||
ValueFrom: &v1.EnvVarSource{
|
|
||||||
FieldRef: &v1.ObjectFieldSelector{
|
|
||||||
FieldPath: "spec.nodeName",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "MY_NAMESPACE",
|
|
||||||
ValueFrom: &v1.EnvVarSource{
|
|
||||||
FieldRef: &v1.ObjectFieldSelector{
|
|
||||||
FieldPath: "metadata.namespace",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: volumeConfigName,
|
|
||||||
MountPath: "/etc/provisioner/config/",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "local-disks",
|
|
||||||
MountPath: provisionerDefaultMountRoot,
|
|
||||||
MountPropagation: &mountProp,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: volumeConfigName,
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
||||||
LocalObjectReference: v1.LocalObjectReference{
|
|
||||||
Name: volumeConfigName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "local-disks",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
HostPath: &v1.HostPathVolumeSource{
|
|
||||||
Path: config.discoveryDir,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := config.client.AppsV1().DaemonSets(config.ns).Create(provisioner)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
kind := schema.GroupKind{Group: "extensions", Kind: "DaemonSet"}
|
|
||||||
framework.WaitForControlledPodsRunning(config.client, config.ns, daemonSetName, kind)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findProvisionerDaemonsetPodName(config *localTestConfig) string {
|
|
||||||
podList, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
framework.Failf("could not get the pod list: %v", err)
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
pods := podList.Items
|
|
||||||
for _, pod := range pods {
|
|
||||||
if strings.HasPrefix(pod.Name, daemonSetName) && pod.Spec.NodeName == config.node0.Name {
|
|
||||||
return pod.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
framework.Failf("Unable to find provisioner daemonset pod on node0")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteProvisionerDaemonset(config *localTestConfig) {
|
|
||||||
ds, err := config.client.AppsV1().DaemonSets(config.ns).Get(daemonSetName, metav1.GetOptions{})
|
|
||||||
if ds == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = config.client.AppsV1().DaemonSets(config.ns).Delete(daemonSetName, nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
|
||||||
pods, err := config.client.CoreV1().Pods(config.ns).List(metav1.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
if metav1.IsControlledBy(&pod, ds) {
|
|
||||||
// DaemonSet pod still exists
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// All DaemonSet pods are deleted
|
|
||||||
return true, nil
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLocalClaim creates a new persistent volume claim.
|
|
||||||
func newLocalClaim(config *localTestConfig) *v1.PersistentVolumeClaim {
|
|
||||||
claim := v1.PersistentVolumeClaim{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
GenerateName: "local-pvc-",
|
|
||||||
Namespace: config.ns,
|
|
||||||
},
|
|
||||||
Spec: v1.PersistentVolumeClaimSpec{
|
|
||||||
StorageClassName: &config.scName,
|
|
||||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
|
||||||
v1.ReadWriteOnce,
|
|
||||||
},
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Requests: v1.ResourceList{
|
|
||||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(testRequestSize),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return &claim
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLocalClaim creates a new persistent volume claim.
|
// newLocalClaim creates a new persistent volume claim.
|
||||||
func newLocalClaimWithName(config *localTestConfig, name string) *v1.PersistentVolumeClaim {
|
func newLocalClaimWithName(config *localTestConfig, name string) *v1.PersistentVolumeClaim {
|
||||||
claim := v1.PersistentVolumeClaim{
|
claim := v1.PersistentVolumeClaim{
|
||||||
@ -1751,50 +1320,6 @@ func newLocalClaimWithName(config *localTestConfig, name string) *v1.PersistentV
|
|||||||
return &claim
|
return &claim
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForLocalPersistentVolume waits a local persistent volume with 'volumePath' to be available.
|
|
||||||
func waitForLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
|
|
||||||
var pv *v1.PersistentVolume
|
|
||||||
|
|
||||||
for start := time.Now(); time.Since(start) < 10*time.Minute && pv == nil; time.Sleep(5 * time.Second) {
|
|
||||||
pvs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(pvs.Items) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, p := range pvs.Items {
|
|
||||||
if p.Spec.PersistentVolumeSource.Local == nil || p.Spec.PersistentVolumeSource.Local.Path != volumePath {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if p.Status.Phase != v1.VolumeAvailable {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pv = &p
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pv == nil {
|
|
||||||
return nil, fmt.Errorf("Timeout while waiting for local persistent volume with path %v to be available", volumePath)
|
|
||||||
}
|
|
||||||
return pv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findLocalPersistentVolume finds persistent volume with 'spec.local.path' equals 'volumePath'.
|
|
||||||
func findLocalPersistentVolume(c clientset.Interface, volumePath string) (*v1.PersistentVolume, error) {
|
|
||||||
pvs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, p := range pvs.Items {
|
|
||||||
if p.Spec.PersistentVolumeSource.Local != nil && p.Spec.PersistentVolumeSource.Local.Path == volumePath {
|
|
||||||
return &p, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Doesn't exist, that's fine, it could be invoked by early cleanup
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet {
|
func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount int, anti, parallel bool) *appsv1.StatefulSet {
|
||||||
mounts := []v1.VolumeMount{}
|
mounts := []v1.VolumeMount{}
|
||||||
claims := []v1.PersistentVolumeClaim{}
|
claims := []v1.PersistentVolumeClaim{}
|
||||||
|
Loading…
Reference in New Issue
Block a user