Merge pull request #123151 from carlory/honor-pv-reclaim-policy-e2e

add e2e test for HonorPVReclaimPolicy
This commit is contained in:
Kubernetes Prow Robot 2024-05-13 00:51:28 -07:00 committed by GitHub
commit c7c4039093
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 378 additions and 4 deletions

View File

@ -139,6 +139,20 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test)
HPA = framework.WithFeature(framework.ValidFeatures.Add("HPA"))
// owning-sig: sig-storage
// kep: https://kep.k8s.io/2680
// test-infra jobs:
// - pull-kubernetes-e2e-storage-kind-alpha-features (need manual trigger)
// - ci-kubernetes-e2e-storage-kind-alpha-features
//
// When this label is added to a test, it means that the cluster must be created
// with the feature-gate "HonorPVReclaimPolicy=true".
//
// Once the feature are stable, this label should be removed and these tests will
// be run by default on any cluster. The test-infra job also should be updated to
// not focus on this feature anymore.
HonorPVReclaimPolicy = framework.WithFeature(framework.ValidFeatures.Add("HonorPVReclaimPolicy"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
HugePages = framework.WithFeature(framework.ValidFeatures.Add("HugePages"))

View File

@ -93,6 +93,9 @@ type PersistentVolumeConfig struct {
// [Optional] Labels contains information used to organize and categorize
// objects
Labels labels.Set
// [Optional] Annotations contains information used to organize and categorize
// objects
Annotations map[string]string
// PVSource contains the details of the underlying volume and must be set
PVSource v1.PersistentVolumeSource
// [Optional] Prebind lets you specify a PVC to bind this PV to before
@ -595,13 +598,18 @@ func MakePersistentVolume(pvConfig PersistentVolumeConfig) *v1.PersistentVolume
}
}
annotations := map[string]string{
volumeGidAnnotationKey: "777",
}
for k, v := range pvConfig.Annotations {
annotations[k] = v
}
return &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pvConfig.NamePrefix,
Labels: pvConfig.Labels,
Annotations: map[string]string{
volumeGidAnnotationKey: "777",
},
Annotations: annotations,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: pvConfig.ReclaimPolicy,

View File

@ -48,6 +48,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
"k8s.io/kubernetes/test/utils/format"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/ptr"
)
const (
@ -101,7 +102,9 @@ type testParameters struct {
fsGroupPolicy *storagev1.FSGroupPolicy
enableSELinuxMount *bool
enableRecoverExpansionFailure bool
enableHonorPVReclaimPolicy bool
enableCSINodeExpandSecret bool
reclaimPolicy *v1.PersistentVolumeReclaimPolicy
}
type mockDriverSetup struct {
@ -109,6 +112,7 @@ type mockDriverSetup struct {
config *storageframework.PerTestConfig
pods []*v1.Pod
pvcs []*v1.PersistentVolumeClaim
pvs []*v1.PersistentVolume
sc map[string]*storagev1.StorageClass
vsc map[string]*unstructured.Unstructured
driver drivers.MockCSITestDriver
@ -171,6 +175,7 @@ func (m *mockDriverSetup) init(ctx context.Context, tp testParameters) {
FSGroupPolicy: tp.fsGroupPolicy,
EnableSELinuxMount: tp.enableSELinuxMount,
EnableRecoverExpansionFailure: tp.enableRecoverExpansionFailure,
EnableHonorPVReclaimPolicy: tp.enableHonorPVReclaimPolicy,
}
// At the moment, only tests which need hooks are
@ -235,6 +240,11 @@ func (m *mockDriverSetup) cleanup(ctx context.Context) {
}
}
for _, pv := range m.pvs {
ginkgo.By(fmt.Sprintf("Deleting pv %s", pv.Name))
errs = append(errs, e2epv.DeletePersistentVolume(ctx, cs, pv.Name))
}
for _, sc := range m.sc {
ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name))
cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
@ -286,6 +296,7 @@ func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType)
ExpectedSize: "1Gi",
DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing,
ReclaimPolicy: m.tp.reclaimPolicy,
}
// The mock driver only works when everything runs on a single node.
@ -319,6 +330,92 @@ func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType)
return // result variables set above
}
func (m *mockDriverSetup) createPVC(ctx context.Context) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim) {
ginkgo.By("Creating pvc")
f := m.f
sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
if m.tp.enableCSINodeExpandSecret {
if sc.Parameters == nil {
parameters := map[string]string{
csiNodeExpandSecretKey: "test-secret",
csiNodeExpandSecretNamespaceKey: f.Namespace.Name,
}
sc.Parameters = parameters
} else {
sc.Parameters[csiNodeExpandSecretKey] = "test-secret"
sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name
}
}
scTest := testsuites.StorageClassTest{
Name: m.driver.GetDriverInfo().Name,
Timeouts: f.Timeouts,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing,
ReclaimPolicy: m.tp.reclaimPolicy,
}
// The mock driver only works when everything runs on a single node.
nodeSelection := m.config.ClientNodeSelection
class, claim = createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, nil)
if class != nil {
m.sc[class.Name] = class
}
if claim != nil {
m.pvcs = append(m.pvcs, claim)
}
return class, claim
}
func (m *mockDriverSetup) createPVPVC(ctx context.Context) (class *storagev1.StorageClass, volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) {
ginkgo.By("Creating the PV and PVC manually")
f := m.f
sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
if m.tp.enableCSINodeExpandSecret {
if sc.Parameters == nil {
parameters := map[string]string{
csiNodeExpandSecretKey: "test-secret",
csiNodeExpandSecretNamespaceKey: f.Namespace.Name,
}
sc.Parameters = parameters
} else {
sc.Parameters[csiNodeExpandSecretKey] = "test-secret"
sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name
}
}
scTest := testsuites.StorageClassTest{
Name: m.driver.GetDriverInfo().Name,
Timeouts: f.Timeouts,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing,
ReclaimPolicy: m.tp.reclaimPolicy,
}
// The mock driver only works when everything runs on a single node.
nodeSelection := m.config.ClientNodeSelection
class, volume, claim = createVolumeAndClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, nil)
if class != nil {
m.sc[class.Name] = class
}
if volume != nil {
m.pvs = append(m.pvs, volume)
}
if claim != nil {
m.pvcs = append(m.pvcs, claim)
}
return class, volume, claim
}
func (m *mockDriverSetup) createPodWithPVC(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
f := m.f
@ -344,6 +441,7 @@ func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int
ExpectedSize: "1Gi",
DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing,
ReclaimPolicy: m.tp.reclaimPolicy,
}
class, claim, pod := startBusyBoxPod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
@ -375,6 +473,7 @@ func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes
DelayBinding: m.tp.lateBinding,
AllowVolumeExpansion: m.tp.enableResizing,
MountOptions: mountOptions,
ReclaimPolicy: m.tp.reclaimPolicy,
}
class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes)
pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts)
@ -441,7 +540,7 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *s
}
}
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, ns, prefix)
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, t.ReclaimPolicy, ns, prefix)
if t.AllowVolumeExpansion {
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
}
@ -453,6 +552,7 @@ func getStorageClass(
parameters map[string]string,
bindingMode *storagev1.VolumeBindingMode,
mountOptions []string,
reclaimPolicy *v1.PersistentVolumeReclaimPolicy,
ns string,
prefix string,
) *storagev1.StorageClass {
@ -472,6 +572,7 @@ func getStorageClass(
Parameters: parameters,
VolumeBindingMode: bindingMode,
MountOptions: mountOptions,
ReclaimPolicy: reclaimPolicy,
}
}
@ -523,6 +624,44 @@ func createClaim(ctx context.Context, cs clientset.Interface, t testsuites.Stora
return class, claim
}
func createVolumeAndClaim(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
class := createSC(cs, t, scName, ns)
volumeMode := v1.PersistentVolumeFilesystem
if t.VolumeMode != "" {
volumeMode = t.VolumeMode
}
pvConfig := e2epv.PersistentVolumeConfig{
Capacity: t.ClaimSize,
StorageClassName: class.Name,
VolumeMode: &volumeMode,
AccessModes: accessModes,
ReclaimPolicy: ptr.Deref(class.ReclaimPolicy, v1.PersistentVolumeReclaimDelete),
PVSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: class.Provisioner,
VolumeHandle: "test-volume-handle",
},
},
}
pvcConfig := e2epv.PersistentVolumeClaimConfig{
ClaimSize: t.ClaimSize,
StorageClassName: &(class.Name),
VolumeMode: &volumeMode,
AccessModes: accessModes,
}
volume, claim, err := e2epv.CreatePVPVC(ctx, cs, t.Timeouts, pvConfig, pvcConfig, ns, true)
framework.ExpectNoError(err, "Failed to create PV and PVC")
err = e2epv.WaitOnPVandPVC(ctx, cs, t.Timeouts, ns, volume, claim)
framework.ExpectNoError(err, "Failed waiting for PV and PVC to be bound each other")
return class, volume, claim
}
func startPausePod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
class, claim := createClaim(ctx, cs, t, node, scName, ns, nil)

View File

@ -0,0 +1,192 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package csimock
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
storagehelpers "k8s.io/component-helpers/storage/volume"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
"k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/ptr"
)
var _ = utils.SIGDescribe("CSI Mock honor pv reclaim policy", feature.HonorPVReclaimPolicy, framework.WithFeatureGate(features.HonorPVReclaimPolicy), func() {
f := framework.NewDefaultFramework("csi-mock-honor-pv-reclaim-policy")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
m := newMockDriverSetup(f)
ginkgo.Context("CSI honor pv reclaim policy using mock driver", func() {
ginkgo.It("Dynamic provisioning should honor pv delete reclaim policy", func(ctx context.Context) {
m.init(ctx, testParameters{
registerDriver: true,
enableHonorPVReclaimPolicy: true,
reclaimPolicy: ptr.To(v1.PersistentVolumeReclaimDelete),
})
ginkgo.DeferCleanup(m.cleanup)
_, pvc := m.createPVC(ctx)
ginkgo.By(fmt.Sprintf("Waiting for PVC %s to be bound", pvc.Name))
pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, f.ClientSet, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "failed to wait for PVC to be bound")
gomega.Expect(pvs).To(gomega.HaveLen(1), "expected 1 PV to be bound to PVC, got %d", len(pvs))
pv := pvs[0]
ginkgo.By(fmt.Sprintf("PVC %s is bound to PV %s", pvc.Name, pv.Name))
gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete),
"expected PV %s to have reclaim policy %s, got %s", pv.Name, v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
// For dynamic provisioning, the PV should be created with the deletion protection finalizer.
gomega.Expect(pv.Finalizers).To(gomega.ContainElement(storagehelpers.PVDeletionProtectionFinalizer),
"expected PV %s to have finalizer %s", pv.Name, storagehelpers.PVDeletionProtectionFinalizer)
ginkgo.By(fmt.Sprintf("Deleting PV %s", pv.Name))
err = f.ClientSet.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PV %s", pv.Name)
ginkgo.By(fmt.Sprintf("Deleting PVC %s", pvc.Name))
err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PVC %s", pvc.Name)
ginkgo.By(fmt.Sprintf("Waiting for PV %s to be deleted", pv.Name))
err = e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, pv.Name, framework.Poll, 2*time.Minute)
framework.ExpectNoError(err, "failed to wait for PV to be deleted")
ginkgo.By(fmt.Sprintf("Verifying that the driver received DeleteVolume call for PV %s", pv.Name))
gomega.Expect(m.driver.GetCalls(ctx)).To(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("DeleteVolume"))))
})
ginkgo.It("Dynamic provisioning should honor pv retain reclaim policy", func(ctx context.Context) {
m.init(ctx, testParameters{
registerDriver: true,
enableHonorPVReclaimPolicy: true,
reclaimPolicy: ptr.To(v1.PersistentVolumeReclaimRetain),
})
ginkgo.DeferCleanup(m.cleanup)
_, pvc := m.createPVC(ctx)
ginkgo.By(fmt.Sprintf("Waiting for PVC %s to be bound", pvc.Name))
pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, f.ClientSet, []*v1.PersistentVolumeClaim{pvc}, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err, "failed to wait for PVC to be bound")
gomega.Expect(pvs).To(gomega.HaveLen(1), "expected 1 PV to be bound to PVC, got %d", len(pvs))
pv := pvs[0]
ginkgo.By(fmt.Sprintf("PVC %s is bound to PV %s", pvc.Name, pv.Name))
gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimRetain),
"expected PV %s to have reclaim policy %s, got %s", pv.Name, v1.PersistentVolumeReclaimRetain, pv.Spec.PersistentVolumeReclaimPolicy)
ginkgo.By(fmt.Sprintf("Verifying that the PV %s does not have finalizer %s after creation", pv.Name, storagehelpers.PVDeletionProtectionFinalizer))
gomega.Consistently(ctx, framework.GetObject(f.ClientSet.CoreV1().PersistentVolumes().Get, pv.Name, metav1.GetOptions{})).
WithPolling(framework.Poll).WithTimeout(framework.ClaimProvisionTimeout).ShouldNot(gomega.HaveField("Finalizers",
gomega.ContainElement(storagehelpers.PVDeletionProtectionFinalizer)), "pv unexpectedly has the finalizer %s", storagehelpers.PVDeletionProtectionFinalizer)
ginkgo.By(fmt.Sprintf("Deleting PV %s", pv.Name))
err = f.ClientSet.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PV %s", pv.Name)
ginkgo.By(fmt.Sprintf("Deleting PVC %s", pvc.Name))
err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PVC %s", pvc.Name)
ginkgo.By(fmt.Sprintf("Waiting for PV %s to be deleted", pv.Name))
err = e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, pv.Name, framework.Poll, 2*time.Minute)
framework.ExpectNoError(err, "failed to wait for PV to be deleted")
ginkgo.By(fmt.Sprintf("Verifying that the driver did not receive DeleteVolume call for PV %s", pv.Name))
gomega.Expect(m.driver.GetCalls(ctx)).NotTo(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("DeleteVolume"))))
})
ginkgo.It("Static provisioning should honor pv delete reclaim policy", func(ctx context.Context) {
m.init(ctx, testParameters{
registerDriver: true,
enableHonorPVReclaimPolicy: true,
reclaimPolicy: ptr.To(v1.PersistentVolumeReclaimDelete),
})
ginkgo.DeferCleanup(m.cleanup)
sc, pv, pvc := m.createPVPVC(ctx)
gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete),
"expected PV %s to have reclaim policy %s, got %s", pv.Name, v1.PersistentVolumeReclaimDelete, pv.Spec.PersistentVolumeReclaimPolicy)
gomega.Expect(pv.Annotations).NotTo(gomega.HaveKeyWithValue(storagehelpers.AnnDynamicallyProvisioned, sc.Provisioner), "expected PV %s to not have annotation %s", pv.Name, storagehelpers.AnnDynamicallyProvisioned)
ginkgo.By(fmt.Sprintf("Verifying that the PV %s has finalizer %s after creation", pv.Name, storagehelpers.PVDeletionProtectionFinalizer))
gomega.Eventually(ctx, framework.GetObject(f.ClientSet.CoreV1().PersistentVolumes().Get, pv.Name, metav1.GetOptions{})).
WithPolling(framework.Poll).WithTimeout(framework.ClaimProvisionTimeout).Should(gomega.HaveField("Finalizers",
gomega.ContainElement(storagehelpers.PVDeletionProtectionFinalizer)), "failed to wait for PV to have finalizer %s", storagehelpers.PVDeletionProtectionFinalizer)
ginkgo.By(fmt.Sprintf("Deleting PV %s", pv.Name))
err := f.ClientSet.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PV %s", pv.Name)
ginkgo.By(fmt.Sprintf("Deleting PVC %s", pvc.Name))
err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PVC %s", pvc.Name)
ginkgo.By(fmt.Sprintf("Waiting for PV %s to be deleted", pv.Name))
err = e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, pv.Name, framework.Poll, 2*time.Minute)
framework.ExpectNoError(err, "failed to wait for PV to be deleted")
ginkgo.By(fmt.Sprintf("Verifying that the driver received DeleteVolume call for PV %s", pv.Name))
gomega.Expect(m.driver.GetCalls(ctx)).To(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("DeleteVolume"))))
})
ginkgo.It("Static provisioning should honor pv retain reclaim policy", func(ctx context.Context) {
m.init(ctx, testParameters{
registerDriver: true,
enableHonorPVReclaimPolicy: true,
reclaimPolicy: ptr.To(v1.PersistentVolumeReclaimRetain),
})
ginkgo.DeferCleanup(m.cleanup)
sc, pv, pvc := m.createPVPVC(ctx)
gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimRetain),
"expected PV %s to have reclaim policy %s, got %s", pv.Name, v1.PersistentVolumeReclaimRetain, pv.Spec.PersistentVolumeReclaimPolicy)
gomega.Expect(pv.Annotations).NotTo(gomega.HaveKeyWithValue(storagehelpers.AnnDynamicallyProvisioned, sc.Provisioner), "expected PV %s to not have annotation %s", pv.Name, storagehelpers.AnnDynamicallyProvisioned)
ginkgo.By(fmt.Sprintf("Verifying that the PV %s does not have finalizer %s after creation", pv.Name, storagehelpers.PVDeletionProtectionFinalizer))
gomega.Consistently(ctx, framework.GetObject(f.ClientSet.CoreV1().PersistentVolumes().Get, pv.Name, metav1.GetOptions{})).
WithPolling(framework.Poll).WithTimeout(framework.ClaimProvisionTimeout).ShouldNot(gomega.HaveField("Finalizers",
gomega.ContainElement(storagehelpers.PVDeletionProtectionFinalizer)), "pv unexpectedly has the finalizer %s", storagehelpers.PVDeletionProtectionFinalizer)
ginkgo.By(fmt.Sprintf("Deleting PV %s", pv.Name))
err := f.ClientSet.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PV %s", pv.Name)
ginkgo.By(fmt.Sprintf("Deleting PVC %s", pvc.Name))
err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PVC %s", pvc.Name)
ginkgo.By(fmt.Sprintf("Waiting for PV %s to be deleted", pv.Name))
err = e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, pv.Name, framework.Poll, 2*time.Minute)
framework.ExpectNoError(err, "failed to wait for PV to be deleted")
ginkgo.By(fmt.Sprintf("Verifying that the driver did not receive DeleteVolume call for PV %s", pv.Name))
gomega.Expect(m.driver.GetCalls(ctx)).NotTo(gomega.ContainElement(gomega.HaveField("Method", gomega.Equal("DeleteVolume"))))
})
})
})

View File

@ -61,6 +61,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -311,6 +312,7 @@ type mockCSIDriver struct {
embeddedCSIDriver *mockdriver.CSIDriver
enableSELinuxMount *bool
enableRecoverExpansionFailure bool
enableHonorPVReclaimPolicy bool
// Additional values set during PrepareTest
clientSet clientset.Interface
@ -359,6 +361,7 @@ type CSIMockDriverOpts struct {
FSGroupPolicy *storagev1.FSGroupPolicy
EnableSELinuxMount *bool
EnableRecoverExpansionFailure bool
EnableHonorPVReclaimPolicy bool
// Embedded defines whether the CSI mock driver runs
// inside the cluster (false, the default) or just a proxy
@ -513,6 +516,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) MockCSITestDriver {
enableVolumeMountGroup: driverOpts.EnableVolumeMountGroup,
enableSELinuxMount: driverOpts.EnableSELinuxMount,
enableRecoverExpansionFailure: driverOpts.EnableRecoverExpansionFailure,
enableHonorPVReclaimPolicy: driverOpts.EnableHonorPVReclaimPolicy,
embedded: driverOpts.Embedded,
hooks: driverOpts.Hooks,
}
@ -673,6 +677,10 @@ func (m *mockCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework)
if m.enableRecoverExpansionFailure {
o.Features["csi-resizer"] = []string{"RecoverVolumeExpansionFailure=true"}
}
if m.enableHonorPVReclaimPolicy {
o.Features["csi-provisioner"] = append(o.Features["csi-provisioner"], fmt.Sprintf("%s=true", features.HonorPVReclaimPolicy))
}
err = utils.CreateFromManifests(ctx, f, m.driverNamespace, func(item interface{}) error {
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
return err
@ -690,6 +698,18 @@ func (m *mockCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework)
Verbs: []string{"get", "list"},
})
}
if m.enableHonorPVReclaimPolicy && strings.HasPrefix(item.Name, "external-provisioner-runner") {
// The update verb is needed for testing the HonorPVReclaimPolicy feature gate.
// The feature gate is an alpha stage and is not enabled by default, so the verb
// is not added to the default rbac manifest.
// TODO: Remove this when the feature gate is promoted to beta or stable, and the
// verb is added to the default rbac manifest in the external-provisioner.
item.Rules = append(item.Rules, rbacv1.PolicyRule{
APIGroups: []string{""},
Resources: []string{"persistentvolumes"},
Verbs: []string{"update"},
})
}
}
return nil

View File

@ -70,6 +70,7 @@ type StorageClassTest struct {
AllowVolumeExpansion bool
NodeSelection e2epod.NodeSelection
MountOptions []string
ReclaimPolicy *v1.PersistentVolumeReclaimPolicy
}
type provisioningTestSuite struct {