mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #52350 from wongma7/reclaim-e2e
Automatic merge from submit-queue (batch tested with PRs 52350, 52659). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. Add e2e test for storageclass.reclaimpolicy **What this PR does / why we need it**: Adds another dynamic provisioning test where the storageclass.reclaimpolicy == retain. Have to manually delete the PV at the end of the test. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: https://github.com/kubernetes/kubernetes/issues/52138 **Special notes for your reviewer**: I have not tested it but it's ready for review, I will comment and edit this when i've verified it actually works. **Release note**: ```release-note NONE ```
This commit is contained in:
commit
0ff165f647
@ -63,7 +63,7 @@ const (
|
||||
externalPluginName = "example.com/nfs"
|
||||
)
|
||||
|
||||
func testDynamicProvisioning(t storageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) {
|
||||
func testDynamicProvisioning(t storageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume {
|
||||
var err error
|
||||
if class != nil {
|
||||
By("creating a StorageClass " + class.Name)
|
||||
@ -109,7 +109,7 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
||||
|
||||
// Check PV properties
|
||||
By("checking the PV")
|
||||
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
|
||||
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
|
||||
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
|
||||
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
|
||||
@ -135,13 +135,19 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
|
||||
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
|
||||
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
|
||||
|
||||
// Wait for the PV to get deleted. Technically, the first few delete
|
||||
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
|
||||
// Retain, there's no use waiting because the PV won't be auto-deleted and
|
||||
// it's expected for the caller to do it.) Technically, the first few delete
|
||||
// attempts may fail, as the volume is still attached to a node because
|
||||
// kubelet is slowly cleaning up the previous pod, however it should succeed
|
||||
// in a couple of minutes. Wait 20 minutes to recover from random cloud
|
||||
// hiccups.
|
||||
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
|
||||
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
|
||||
}
|
||||
|
||||
return pv
|
||||
}
|
||||
|
||||
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
|
||||
@ -408,6 +414,40 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("should provision storage with non-default reclaim policy Retain", func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
test := storageClassTest{
|
||||
"HDD PD on GCE/GKE",
|
||||
[]string{"gce", "gke"},
|
||||
"kubernetes.io/gce-pd",
|
||||
map[string]string{
|
||||
"type": "pd-standard",
|
||||
},
|
||||
"1Gi",
|
||||
"1Gi",
|
||||
func(volume *v1.PersistentVolume) error {
|
||||
return checkGCEPD(volume, "pd-standard")
|
||||
},
|
||||
}
|
||||
class := newStorageClass(test, ns, "reclaimpolicy")
|
||||
retain := v1.PersistentVolumeReclaimRetain
|
||||
class.ReclaimPolicy = &retain
|
||||
claim := newClaim(test, ns, "reclaimpolicy")
|
||||
claim.Spec.StorageClassName = &class.Name
|
||||
pv := testDynamicProvisioning(test, c, claim, class)
|
||||
|
||||
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
|
||||
|
||||
By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
|
||||
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
|
||||
|
||||
By(fmt.Sprintf("deleting the PV %q", pv.Name))
|
||||
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
|
||||
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
|
||||
})
|
||||
|
||||
// NOTE: Slow! The test will wait up to 5 minutes (framework.ClaimProvisionTimeout)
|
||||
// when there is no regression.
|
||||
It("should not provision a volume in an unmanaged GCE zone. [Slow]", func() {
|
||||
|
Loading…
Reference in New Issue
Block a user