mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-16 06:32:32 +00:00
Fix provisioning tests to allow delayed binding
This commit is contained in:
@@ -23,8 +23,9 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -50,7 +51,7 @@ type StorageClassTest struct {
|
||||
DelayBinding bool
|
||||
ClaimSize string
|
||||
ExpectedSize string
|
||||
PvCheck func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume)
|
||||
PvCheck func(claim *v1.PersistentVolumeClaim)
|
||||
VolumeMode *v1.PersistentVolumeMode
|
||||
AllowVolumeExpansion bool
|
||||
}
|
||||
@@ -146,6 +147,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
|
||||
}
|
||||
l.testCase.TestDynamicProvisioning()
|
||||
})
|
||||
|
||||
@@ -158,6 +162,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
defer cleanup()
|
||||
|
||||
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
|
||||
}
|
||||
l.testCase.TestDynamicProvisioning()
|
||||
})
|
||||
|
||||
@@ -178,26 +185,12 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
if len(nodes.Items) <= 1 {
|
||||
framework.Skipf("need more than one node - skipping")
|
||||
}
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||
PVMultiNodeCheck(l.cs, claim, volume, framework.NodeSelection{Name: l.config.ClientNodeName})
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
PVMultiNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
|
||||
}
|
||||
l.testCase.TestDynamicProvisioning()
|
||||
})
|
||||
|
||||
It("should create and delete block persistent volumes", func() {
|
||||
if !dInfo.Capabilities[CapBlock] {
|
||||
framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
block := v1.PersistentVolumeBlock
|
||||
l.testCase.VolumeMode = &block
|
||||
l.pvc.Spec.VolumeMode = &block
|
||||
l.testCase.TestDynamicProvisioning()
|
||||
})
|
||||
|
||||
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
||||
if !dInfo.Capabilities[CapDataSource] {
|
||||
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
|
||||
@@ -217,7 +210,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
|
||||
defer cleanupFunc()
|
||||
|
||||
l.pvc.Spec.DataSource = dataSource
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
By("checking whether the created volume has the pre-populated data")
|
||||
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
||||
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
|
||||
@@ -261,16 +254,39 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
||||
}
|
||||
}()
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
|
||||
// Run the checker
|
||||
if t.PvCheck != nil {
|
||||
t.PvCheck(claim)
|
||||
}
|
||||
|
||||
pv := t.checkProvisioning(client, claim, class)
|
||||
|
||||
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
|
||||
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
|
||||
|
||||
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
|
||||
// Retain, there's no use waiting because the PV won't be auto-deleted and
|
||||
// it's expected for the caller to do it.) Technically, the first few delete
|
||||
// attempts may fail, as the volume is still attached to a node because
|
||||
// kubelet is slowly cleaning up the previous pod, however it should succeed
|
||||
// in a couple of minutes. Wait 20 minutes to recover from random cloud
|
||||
// hiccups.
|
||||
if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
|
||||
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
}
|
||||
|
||||
return pv
|
||||
}
|
||||
|
||||
// checkProvisioning verifies that the claim is bound and has the correct properities
|
||||
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
|
||||
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking the claim")
|
||||
// Get new copy of the claim
|
||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get the bound PV
|
||||
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
pv, err := framework.GetBoundPV(client, claim)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check sizes
|
||||
@@ -284,8 +300,20 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
|
||||
// Check PV properties
|
||||
By("checking the PV")
|
||||
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
|
||||
|
||||
// Every access mode in PV should be in PVC
|
||||
Expect(pv.Spec.AccessModes).NotTo(BeZero())
|
||||
for _, pvMode := range pv.Spec.AccessModes {
|
||||
found := false
|
||||
for _, pvcMode := range claim.Spec.AccessModes {
|
||||
if pvMode == pvcMode {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
Expect(found).To(BeTrue())
|
||||
}
|
||||
|
||||
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
|
||||
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
|
||||
if class == nil {
|
||||
@@ -294,35 +322,15 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
|
||||
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions))
|
||||
}
|
||||
if t.VolumeMode != nil {
|
||||
if claim.Spec.VolumeMode != nil {
|
||||
Expect(pv.Spec.VolumeMode).NotTo(BeNil())
|
||||
Expect(*pv.Spec.VolumeMode).To(Equal(*t.VolumeMode))
|
||||
Expect(*pv.Spec.VolumeMode).To(Equal(*claim.Spec.VolumeMode))
|
||||
}
|
||||
|
||||
// Run the checker
|
||||
if t.PvCheck != nil {
|
||||
t.PvCheck(claim, pv)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
|
||||
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
|
||||
|
||||
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
|
||||
// Retain, there's no use waiting because the PV won't be auto-deleted and
|
||||
// it's expected for the caller to do it.) Technically, the first few delete
|
||||
// attempts may fail, as the volume is still attached to a node because
|
||||
// kubelet is slowly cleaning up the previous pod, however it should succeed
|
||||
// in a couple of minutes. Wait 20 minutes to recover from random cloud
|
||||
// hiccups.
|
||||
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
|
||||
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
}
|
||||
|
||||
return pv
|
||||
}
|
||||
|
||||
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node.
|
||||
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node
|
||||
// and returns the PV.
|
||||
//
|
||||
// It starts two pods:
|
||||
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
|
||||
@@ -335,16 +343,9 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
// persistent across pods.
|
||||
//
|
||||
// This is a common test that can be called from a StorageClassTest.PvCheck.
|
||||
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
|
||||
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
|
||||
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume {
|
||||
By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
|
||||
command := "echo 'hello world' > /mnt/test/data"
|
||||
// We give the first pod the secondary responsibility of checking the volume has
|
||||
// been mounted with the PV's mount options, if the PV was provisioned with any
|
||||
for _, option := range volume.Spec.MountOptions {
|
||||
// Get entry, get mount options at 6th word, replace brackets with commas
|
||||
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
|
||||
}
|
||||
command += " || (mount | grep 'on /mnt/test'; false)"
|
||||
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
|
||||
defer func() {
|
||||
// pod might be nil now.
|
||||
@@ -357,9 +358,24 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
StopPod(client, pod)
|
||||
pod = nil // Don't stop twice.
|
||||
|
||||
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName))
|
||||
// Get a new copy of the PV
|
||||
volume, err := framework.GetBoundPV(client, claim)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
|
||||
command = "grep 'hello world' /mnt/test/data"
|
||||
|
||||
// We give the second pod the additional responsibility of checking the volume has
|
||||
// been mounted with the PV's mount options, if the PV was provisioned with any
|
||||
for _, option := range volume.Spec.MountOptions {
|
||||
// Get entry, get mount options at 6th word, replace brackets with commas
|
||||
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
|
||||
}
|
||||
command += " || (mount | grep 'on /mnt/test'; false)"
|
||||
|
||||
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, framework.NodeSelection{Name: actualNodeName})
|
||||
|
||||
return volume
|
||||
}
|
||||
|
||||
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
|
||||
@@ -376,7 +392,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
// persistent across pods and across nodes.
|
||||
//
|
||||
// This is a common test that can be called from a StorageClassTest.PvCheck.
|
||||
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
|
||||
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) {
|
||||
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node")
|
||||
|
||||
var pod *v1.Pod
|
||||
@@ -385,7 +401,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
StopPod(client, pod)
|
||||
}()
|
||||
|
||||
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
|
||||
By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
|
||||
command := "echo 'hello world' > /mnt/test/data"
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
|
Reference in New Issue
Block a user