From 3a6a4830df003d8efcad7bd8e4f0cce609aee2ca Mon Sep 17 00:00:00 2001 From: carlory Date: Wed, 15 Nov 2023 17:20:58 +0800 Subject: [PATCH] pvc bind pv with vac --- pkg/apis/core/validation/validation.go | 4 +- pkg/apis/core/validation/validation_test.go | 14 ++ .../volume/persistentvolume/binder_test.go | 74 ++++++- .../volume/persistentvolume/framework_test.go | 34 ++++ .../volume/persistentvolume/index.go | 4 +- .../volume/persistentvolume/index_test.go | 47 +++++ .../volume/persistentvolume/pv_controller.go | 41 +++- .../core/persistentvolume/strategy.go | 2 + .../framework/plugins/volumebinding/binder.go | 2 +- .../storage/volume/pv_helpers.go | 23 ++- .../storage/volume/pv_helpers_test.go | 137 ++++++++++--- .../volume/persistent_volumes_test.go | 189 ++++++++++++++++++ 12 files changed, 534 insertions(+), 37 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index e0841d0c21b..c22b02267b5 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -2391,7 +2391,9 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeCl newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] // +k8s:verify-mutation:reason=clone } // lets make sure volume attributes class name is same. - newPvcClone.Spec.VolumeAttributesClassName = oldPvcClone.Spec.VolumeAttributesClassName // +k8s:verify-mutation:reason=clone + if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.VolumeAttributesClassName != nil { + newPvcClone.Spec.VolumeAttributesClassName = oldPvcClone.Spec.VolumeAttributesClassName // +k8s:verify-mutation:reason=clone + } oldSize := oldPvc.Spec.Resources.Requests["storage"] newSize := newPvc.Spec.Resources.Requests["storage"] diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 65d5a2236e3..ada31301bca 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -3007,6 +3007,20 @@ func TestValidatePersistentVolumeClaimUpdate(t *testing.T) { enableVolumeAttributesClass: true, isExpectedFailure: true, }, + "invalid-update-volume-attributes-class-when-claim-not-bound": { + oldClaim: func() *core.PersistentVolumeClaim { + clone := validClaimVolumeAttributesClass1.DeepCopy() + clone.Status.Phase = core.ClaimPending + return clone + }(), + newClaim: func() *core.PersistentVolumeClaim { + clone := validClaimVolumeAttributesClass2.DeepCopy() + clone.Status.Phase = core.ClaimPending + return clone + }(), + enableVolumeAttributesClass: true, + isExpectedFailure: true, + }, "invalid-update-volume-attributes-class-to-nil-without-featuregate-enabled": { oldClaim: validClaimVolumeAttributesClass1, newClaim: validClaimNilVolumeAttributesClass, diff --git a/pkg/controller/volume/persistentvolume/binder_test.go b/pkg/controller/volume/persistentvolume/binder_test.go index 0891d6e6439..08b280f2190 100644 --- a/pkg/controller/volume/persistentvolume/binder_test.go +++ b/pkg/controller/volume/persistentvolume/binder_test.go @@ -17,13 +17,17 @@ limitations under the License. package persistentvolume import ( + "fmt" "testing" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/component-helpers/storage/volume" "k8s.io/klog/v2/ktesting" + "k8s.io/kubernetes/pkg/features" ) // Test single call to syncClaim and syncVolume methods. @@ -750,13 +754,73 @@ func TestSync(t *testing.T) { test: testSyncClaim, }, } - _, ctx := ktesting.NewTestContext(t) - runSyncTests(t, ctx, tests, []*storage.StorageClass{ + + // Once the feature-gate VolumeAttributesClass is promoted to GA, merge it with the above tests + whenFeatureGateEnabled := []controllerTest{ { - ObjectMeta: metav1.ObjectMeta{Name: classWait}, - VolumeBindingMode: &modeWait, + // syncClaim with claim pre-bound to a PV that exists and is + // unbound, but its volume attributes class does not match. Check that the claim status is reset to Pending + name: "2-11 - claim prebound to unbound volume that volume attributes class is different", + initialVolumes: volumesWithVAC(classGold, newVolumeArray("volume2-11", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + expectedVolumes: volumesWithVAC(classGold, newVolumeArray("volume2-11", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + initialClaims: newClaimArray("claim2-11", "uid2-11", "1Gi", "volume2-11", v1.ClaimBound, nil), + expectedClaims: newClaimArray("claim2-11", "uid2-11", "1Gi", "volume2-11", v1.ClaimPending, nil), + expectedEvents: []string{"Warning VolumeMismatch"}, + errors: noerrors, + test: testSyncClaim, }, - }, []*v1.Pod{}) + { + // syncClaim with claim pre-bound to a PV that exists and is + // unbound, they have the same volume attributes class. Check it gets bound and no volume.AnnBoundByController is set. + name: "2-12 - claim prebound to unbound volume that volume attributes class is same", + initialVolumes: volumesWithVAC(classGold, newVolumeArray("volume2-12", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + expectedVolumes: volumesWithVAC(classGold, newVolumeArray("volume2-12", "1Gi", "uid2-12", "claim2-12", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, volume.AnnBoundByController)), + initialClaims: claimWithVAC(&classGold, newClaimArray("claim2-12", "uid2-12", "1Gi", "volume2-12", v1.ClaimPending, nil)), + expectedClaims: withExpectedVAC(&classGold, claimWithVAC(&classGold, newClaimArray("claim2-12", "uid2-12", "1Gi", "volume2-12", v1.ClaimBound, nil, volume.AnnBindCompleted))), + expectedEvents: noevents, + errors: noerrors, + test: testSyncClaim, + }, + } + + // Once the feature-gate VolumeAttributesClass is promoted to GA, remove it + whenFeatureGateDisabled := []controllerTest{ + { + // syncClaim with claim pre-bound to a PV that exists and is + // unbound, they have the same volume attributes class but the feature-gate is disabled. Check that the claim status is reset to Pending + name: "2-13 - claim prebound to unbound volume that volume attributes class is same", + initialVolumes: volumesWithVAC(classGold, newVolumeArray("volume2-13", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + expectedVolumes: volumesWithVAC(classGold, newVolumeArray("volume2-13", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + initialClaims: withExpectedVAC(&classGold, claimWithVAC(&classGold, newClaimArray("claim2-13", "uid2-13", "1Gi", "volume2-13", v1.ClaimBound, nil))), + expectedClaims: claimWithVAC(&classGold, newClaimArray("claim2-13", "uid2-13", "1Gi", "volume2-13", v1.ClaimPending, nil)), + expectedEvents: []string{"Warning VolumeMismatch"}, + errors: noerrors, + test: testSyncClaim, + }, + } + + for _, isEnabled := range []bool{true, false} { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeAttributesClass, isEnabled) + + allTests := tests + if isEnabled { + allTests = append(allTests, whenFeatureGateEnabled...) + } else { + allTests = append(allTests, whenFeatureGateDisabled...) + } + + for i := range allTests { + allTests[i].name = fmt.Sprintf("features.VolumeAttributesClass=%v %s", isEnabled, allTests[i].name) + } + + _, ctx := ktesting.NewTestContext(t) + runSyncTests(t, ctx, allTests, []*storage.StorageClass{ + { + ObjectMeta: metav1.ObjectMeta{Name: classWait}, + VolumeBindingMode: &modeWait, + }, + }, []*v1.Pod{}) + } } func TestSyncBlockVolume(t *testing.T) { diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 90d95927c4d..ff223910b44 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -409,6 +409,14 @@ func withExpectedCapacity(capacity string, claims []*v1.PersistentVolumeClaim) [ return claims } +// withExpectedVAC sets the claim.Status.CurrentVolumeAttributesClassName of the first claim in the +// array to given value and returns the array. Meant to be used to compose +// claims specified inline in a test. +func withExpectedVAC(vacName *string, claims []*v1.PersistentVolumeClaim) []*v1.PersistentVolumeClaim { + claims[0].Status.CurrentVolumeAttributesClassName = vacName + return claims +} + // withMessage saves given message into volume.Status.Message of the first // volume in the array and returns the array. Meant to be used to compose // volumes specified inline in a test. @@ -419,6 +427,7 @@ func withMessage(message string, volumes []*v1.PersistentVolume) []*v1.Persisten // newVolumeArray returns array with a single volume that would be returned by // newVolume() with the same parameters. +// TODO: make the newVolumeArray function accept volume attributes class name as an input parameter func newVolumeArray(name, capacity, boundToClaimUID, boundToClaimName string, phase v1.PersistentVolumePhase, reclaimPolicy v1.PersistentVolumeReclaimPolicy, class string, annotations ...string) []*v1.PersistentVolume { return []*v1.PersistentVolume{ newVolume(name, capacity, boundToClaimUID, boundToClaimName, phase, reclaimPolicy, class, annotations...), @@ -489,6 +498,9 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.Persisten // For most of the tests it's enough to copy claim's requested capacity, // individual tests can adjust it using withExpectedCapacity() claim.Status.Capacity = claim.Spec.Resources.Requests + // For most of the tests it's enough to copy claim's requested vac, + // individual tests can adjust it using withExpectedVAC() + claim.Status.CurrentVolumeAttributesClassName = claim.Spec.VolumeAttributesClassName } return &claim @@ -496,12 +508,18 @@ func newClaim(name, claimUID, capacity, boundToVolume string, phase v1.Persisten // newClaimArray returns array with a single claim that would be returned by // newClaim() with the same parameters. +// TODO: make the newClaimArray function accept volume attributes class name as an input parameter func newClaimArray(name, claimUID, capacity, boundToVolume string, phase v1.PersistentVolumeClaimPhase, class *string, annotations ...string) []*v1.PersistentVolumeClaim { return []*v1.PersistentVolumeClaim{ newClaim(name, claimUID, capacity, boundToVolume, phase, class, annotations...), } } +func claimWithVAC(vacName *string, claims []*v1.PersistentVolumeClaim) []*v1.PersistentVolumeClaim { + claims[0].Spec.VolumeAttributesClassName = vacName + return claims +} + // claimWithAnnotation saves given annotation into given claims. Meant to be // used to compose claims specified inline in a test. // TODO(refactor): This helper function (and other helpers related to claim @@ -536,6 +554,22 @@ func annotateClaim(claim *v1.PersistentVolumeClaim, ann map[string]string) *v1.P return claim } +// volumeWithVAC saves given vac into given volume. +// Meant to be used to compose volume specified inline in a test. +func volumeWithVAC(vacName string, volume *v1.PersistentVolume) *v1.PersistentVolume { + volume.Spec.VolumeAttributesClassName = &vacName + return volume +} + +// volumesWithVAC saves given vac into given volumes. +// Meant to be used to compose volumes specified inline in a test. +func volumesWithVAC(vacName string, volumes []*v1.PersistentVolume) []*v1.PersistentVolume { + for _, volume := range volumes { + volumeWithVAC(vacName, volume) + } + return volumes +} + // volumeWithAnnotation saves given annotation into given volume. // Meant to be used to compose volume specified inline in a test. func volumeWithAnnotation(name, value string, volume *v1.PersistentVolume) *v1.PersistentVolume { diff --git a/pkg/controller/volume/persistentvolume/index.go b/pkg/controller/volume/persistentvolume/index.go index 0e6ecbeee8d..9eaf1b918cc 100644 --- a/pkg/controller/volume/persistentvolume/index.go +++ b/pkg/controller/volume/persistentvolume/index.go @@ -21,9 +21,11 @@ import ( "sort" v1 "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/cache" "k8s.io/component-helpers/storage/volume" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume/util" ) @@ -92,7 +94,7 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol return nil, err } - bestVol, err := volume.FindMatchingVolume(claim, volumes, nil /* node for topology binding*/, nil /* exclusion map */, delayBinding) + bestVol, err := volume.FindMatchingVolume(claim, volumes, nil /* node for topology binding*/, nil /* exclusion map */, delayBinding, utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass)) if err != nil { return nil, err } diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index 46b9746d413..25b50d9e06b 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -23,9 +23,12 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes/scheme" ref "k8s.io/client-go/tools/reference" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/component-helpers/storage/volume" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume/util" ) @@ -75,6 +78,9 @@ func makeVolumeModePVC(size string, mode *v1.PersistentVolumeMode, modfn func(*v } func TestMatchVolume(t *testing.T) { + // Default enable the VolumeAttributesClass feature gate. + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeAttributesClass, true) + volList := newPersistentVolumeOrderedIndex() for _, pv := range createTestVolumes() { volList.store.Add(pv) @@ -134,6 +140,24 @@ func TestMatchVolume(t *testing.T) { pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} }), }, + "successful-match-with-empty-vac": { + expectedMatch: "gce-pd-10", + claim: makePVC("8G", func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classEmpty + }), + }, + "successful-match-with-vac": { + expectedMatch: "gce-pd-vac-silver1", + claim: makePVC("1G", func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classSilver + }), + }, + "successful-no-match-vac-nonexisting": { + expectedMatch: "", + claim: makePVC("1G", func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classNonExisting + }), + }, "successful-match-with-class": { expectedMatch: "gce-pd-silver1", claim: makePVC("1G", func(pvc *v1.PersistentVolumeClaim) { @@ -962,6 +986,29 @@ func createTestVolumes() []*v1.PersistentVolume { VolumeMode: &fs, }, }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "gce-pd-vac-silver1", + Name: "gce-pd-vac-silver1", + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("100G"), + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + }, + VolumeAttributesClassName: &classSilver, + VolumeMode: &fs, + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, + }, } } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index 5e756e0b675..9020966eaff 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -26,6 +26,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/slice" + "k8s.io/utils/ptr" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" @@ -274,6 +275,20 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo return fmt.Errorf("storageClassName does not match") } + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) { + requestedVAC := ptr.Deref(claim.Spec.VolumeAttributesClassName, "") + volumeVAC := ptr.Deref(volume.Spec.VolumeAttributesClassName, "") + if requestedVAC != volumeVAC { + return fmt.Errorf("volumeAttributesClassName does not match") + } + } else { + requestedVAC := ptr.Deref(claim.Spec.VolumeAttributesClassName, "") + volumeVAC := ptr.Deref(volume.Spec.VolumeAttributesClassName, "") + if requestedVAC != "" || volumeVAC != "" { + return fmt.Errorf("volumeAttributesClassName is not supported when the feature-gate VolumeAttributesClass is disabled") + } + } + if storagehelpers.CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) { return fmt.Errorf("incompatible volumeMode") } @@ -764,7 +779,7 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // // claim - claim to update // phase - phase to set -// volume - volume which Capacity is set into claim.Status.Capacity +// volume - volume which Capacity is set into claim.Status.Capacity and VolumeAttributesClassName is set into claim.Status.CurrentVolumeAttributesClassName func (ctrl *PersistentVolumeController) updateClaimStatus(ctx context.Context, claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { logger := klog.FromContext(ctx) logger.V(4).Info("Updating PersistentVolumeClaim status", "PVC", klog.KObj(claim), "setPhase", phase) @@ -778,7 +793,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(ctx context.Context, c } if volume == nil { - // Need to reset AccessModes and Capacity + // Need to reset AccessModes, Capacity and CurrentVolumeAttributesClassName if claim.Status.AccessModes != nil { claimClone.Status.AccessModes = nil dirty = true @@ -787,8 +802,12 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(ctx context.Context, c claimClone.Status.Capacity = nil dirty = true } + if claim.Status.CurrentVolumeAttributesClassName != nil { + claimClone.Status.CurrentVolumeAttributesClassName = nil + dirty = true + } } else { - // Need to update AccessModes and Capacity + // Need to update AccessModes, Capacity and CurrentVolumeAttributesClassName if !reflect.DeepEqual(claim.Status.AccessModes, volume.Spec.AccessModes) { claimClone.Status.AccessModes = volume.Spec.AccessModes dirty = true @@ -821,6 +840,20 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(ctx context.Context, c dirty = true } } + + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass) { + // There are two components to updating the current vac name, this controller and external-resizer. + // The controller ensures that the field is set properly when the volume is statically provisioned. + // It is safer for the controller to only set this field during binding, but not after. Without this + // constraint, there is a potential race condition where the resizer sets the field after the controller + // has set it, or vice versa. Afterwards, it should be handled only by the resizer, or if an admin wants + // to override. + if claim.Status.Phase == v1.ClaimPending && phase == v1.ClaimBound && + !reflect.DeepEqual(claim.Status.CurrentVolumeAttributesClassName, volume.Spec.VolumeAttributesClassName) { + claimClone.Status.CurrentVolumeAttributesClassName = volume.Spec.VolumeAttributesClassName + dirty = true + } + } } if !dirty { @@ -850,7 +883,7 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(ctx context.Context, c // // claim - claim to update // phase - phase to set -// volume - volume which Capacity is set into claim.Status.Capacity +// volume - volume which Capacity is set into claim.Status.Capacity and VolumeAttributesClassName is set into claim.Status.CurrentVolumeAttributesClassName // eventtype, reason, message - event to send, see EventRecorder.Event() func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(ctx context.Context, claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { logger := klog.FromContext(ctx) diff --git a/pkg/registry/core/persistentvolume/strategy.go b/pkg/registry/core/persistentvolume/strategy.go index 144092b93e5..d3ef0c81dc5 100644 --- a/pkg/registry/core/persistentvolume/strategy.go +++ b/pkg/registry/core/persistentvolume/strategy.go @@ -66,6 +66,7 @@ func (persistentvolumeStrategy) GetResetFields() map[fieldpath.APIVersion]*field func (persistentvolumeStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { pv := obj.(*api.PersistentVolume) pv.Status = api.PersistentVolumeStatus{} + pvutil.DropDisabledSpecFields(&pv.Spec, nil) pv.Status.Phase = api.VolumePending now := NowFunc() @@ -97,6 +98,7 @@ func (persistentvolumeStrategy) PrepareForUpdate(ctx context.Context, obj, old r newPv := obj.(*api.PersistentVolume) oldPv := old.(*api.PersistentVolume) newPv.Status = oldPv.Status + pvutil.DropDisabledSpecFields(&newPv.Spec, &oldPv.Spec) } func (persistentvolumeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index ed98ba7da14..57a24312c91 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -914,7 +914,7 @@ func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, clai pvs := unboundVolumesDelayBinding[storageClassName] // Find a matching PV - pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true) + pv, err := volume.FindMatchingVolume(pvc, pvs, node, chosenPVs, true, utilfeature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass)) if err != nil { return false, nil, nil, err } diff --git a/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers.go b/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers.go index f927b72314a..7912276ca3d 100644 --- a/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers.go +++ b/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" storagelisters "k8s.io/client-go/listers/storage/v1" "k8s.io/client-go/tools/reference" + "k8s.io/utils/ptr" ) const ( @@ -187,7 +188,15 @@ func FindMatchingVolume( volumes []*v1.PersistentVolume, node *v1.Node, excludedVolumes map[string]*v1.PersistentVolume, - delayBinding bool) (*v1.PersistentVolume, error) { + delayBinding bool, + vacEnabled bool) (*v1.PersistentVolume, error) { + + if !vacEnabled { + claimVAC := ptr.Deref(claim.Spec.VolumeAttributesClassName, "") + if claimVAC != "" { + return nil, fmt.Errorf("unsupported volumeAttributesClassName is set on claim %s when the feature-gate VolumeAttributesClass is disabled", claimToClaimKey(claim)) + } + } var smallestVolume *v1.PersistentVolume var smallestVolumeQty resource.Quantity @@ -227,6 +236,18 @@ func FindMatchingVolume( continue } + claimVAC := ptr.Deref(claim.Spec.VolumeAttributesClassName, "") + volumeVAC := ptr.Deref(volume.Spec.VolumeAttributesClassName, "") + + // filter out mismatching volumeAttributesClassName + if vacEnabled && claimVAC != volumeVAC { + continue + } + if !vacEnabled && volumeVAC != "" { + // when the feature gate is disabled, the PV object has VAC set, then we should not bind at all. + continue + } + // check if PV's DeletionTimeStamp is set, if so, skip this volume. if volume.ObjectMeta.DeletionTimestamp != nil { continue diff --git a/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers_test.go b/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers_test.go index 4d73bd6f048..35ab8f933d5 100644 --- a/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers_test.go +++ b/staging/src/k8s.io/component-helpers/storage/volume/pv_helpers_test.go @@ -17,6 +17,7 @@ limitations under the License. package volume import ( + "fmt" "testing" v1 "k8s.io/api/core/v1" @@ -33,6 +34,8 @@ var ( classNoMode = "no-mode" classImmediateMode = "immediate-mode" classWaitMode = "wait-mode" + classGold = "gold" + classSilver = "silver" modeImmediate = storagev1.VolumeBindingImmediate modeWait = storagev1.VolumeBindingWaitForFirstConsumer @@ -167,6 +170,15 @@ func TestFindMatchVolumeWithNode(t *testing.T) { }), } + var volumesWithVAC = func(name string, input []*v1.PersistentVolume) []*v1.PersistentVolume { + output := make([]*v1.PersistentVolume, len(input)) + for i, volume := range input { + output[i] = volume.DeepCopy() + output[i].Spec.VolumeAttributesClassName = &name + } + return output + } + node1 := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"key1": "value1"}, @@ -190,80 +202,151 @@ func TestFindMatchVolumeWithNode(t *testing.T) { scenarios := map[string]struct { expectedMatch string + expectErr bool claim *v1.PersistentVolumeClaim node *v1.Node + volumes []*v1.PersistentVolume excludedVolumes map[string]*v1.PersistentVolume + vacEnabled []bool }{ "success-match": { expectedMatch: "affinity-pv", - claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, + vacEnabled: []bool{true, false}, }, "success-prebound": { expectedMatch: "affinity-prebound", - claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, + vacEnabled: []bool{true, false}, }, "success-exclusion": { expectedMatch: "affinity-pv2", - claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil}, + vacEnabled: []bool{true, false}, }, "fail-exclusion": { expectedMatch: "", - claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node1, - excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil, "affinity002": nil}, + excludedVolumes: map[string]*v1.PersistentVolume{"affinity001": nil, "affinity002": nil, "affinity002-vac": nil}, + vacEnabled: []bool{true, false}, }, "fail-accessmode": { expectedMatch: "", - claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, nil), node: node1, + vacEnabled: []bool{true, false}, }, "fail-nodeaffinity": { expectedMatch: "", - claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node2, + vacEnabled: []bool{true, false}, }, "fail-prebound-node-affinity": { expectedMatch: "", - claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node3, + vacEnabled: []bool{true, false}, }, "fail-nonavaliable": { expectedMatch: "", - claim: makeTestPersistentVolumeClaim("claim04", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim04", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node4, + vacEnabled: []bool{true, false}, }, "success-bad-and-good-node-affinity": { expectedMatch: "affinity-pv3", - claim: makeTestPersistentVolumeClaim("claim03", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim03", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), node: node3, + vacEnabled: []bool{true, false}, + }, + "success-match-with-vac": { + expectedMatch: "affinity-pv", + volumes: volumesWithVAC(classGold, volumes), + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classGold + }), + node: node1, + vacEnabled: []bool{true}, + }, + "fail-vac": { // claim has a given vac and volumes don't have the same vac. + expectedMatch: "", + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classSilver + }), + node: node1, + vacEnabled: []bool{true}, + }, + "fail-prebound-vac": { // claim has a given vac and volume name but the given volume has a different vac. + expectedMatch: "", + volumes: volumesWithVAC(classGold, volumes), + claim: makeTestPersistentVolumeClaim("claim02", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classSilver + }), + node: node1, + vacEnabled: []bool{true}, + }, + "fail-on-error": { // claim has a given vac when feature-gate is disabled. + expectedMatch: "", + expectErr: true, + volumes: volumes, + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.VolumeAttributesClassName = &classGold + }), + node: node1, + vacEnabled: []bool{false}, + }, + "fail-volumes-vac": { // claim has no vac and all volumes have vac when feature-gate is disabled. + expectedMatch: "", + volumes: volumesWithVAC(classGold, volumes), + claim: makeTestPersistentVolumeClaim("claim01", "100G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, nil), + node: node1, + vacEnabled: []bool{false}, }, } for name, scenario := range scenarios { - volume, err := FindMatchingVolume(scenario.claim, volumes, scenario.node, scenario.excludedVolumes, true) - if err != nil { - t.Errorf("Unexpected error matching volume by claim: %v", err) - } - if len(scenario.expectedMatch) != 0 && volume == nil { - t.Errorf("Expected match but received nil volume for scenario: %s", name) - } - if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch { - t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name) - } - if len(scenario.expectedMatch) == 0 && volume != nil { - t.Errorf("Unexpected match for scenario: %s, matched with %s instead", name, volume.UID) + for _, enabled := range scenario.vacEnabled { + name := fmt.Sprintf("[VolumeAttributiesClass: %v] %s", enabled, name) + volume, err := FindMatchingVolume(scenario.claim, scenario.volumes, scenario.node, scenario.excludedVolumes, true, enabled) + if scenario.expectErr && err == nil { + t.Errorf("Expected error for scenario: %s", name) + } + if !scenario.expectErr && err != nil { + t.Errorf("Unexpected error matching volume by claim: %v", err) + } + if len(scenario.expectedMatch) != 0 && volume == nil { + t.Errorf("Expected match but received nil volume for scenario: %s", name) + } + if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch { + t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name) + } + if len(scenario.expectedMatch) == 0 && volume != nil { + t.Errorf("Unexpected match for scenario: %s, matched with %s instead", name, volume.UID) + } } } } -func makeTestPersistentVolumeClaim(name string, size string, accessMode []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim { +func makeTestPersistentVolumeClaim(name string, size string, accessMode []v1.PersistentVolumeAccessMode, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim { fs := v1.PersistentVolumeFilesystem sc := "wait" - return &v1.PersistentVolumeClaim{ + pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "myns", @@ -279,6 +362,12 @@ func makeTestPersistentVolumeClaim(name string, size string, accessMode []v1.Per VolumeMode: &fs, }, } + + if modfn != nil { + modfn(pvc) + } + + return pvc } func makeTestVolume(uid types.UID, name string, capacity string, available bool, modfn func(*v1.PersistentVolume)) *v1.PersistentVolume { diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index 47ad859370a..40355fd0130 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -30,13 +30,16 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" ref "k8s.io/client-go/tools/reference" + featuregatetesting "k8s.io/component-base/featuregate/testing" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/pkg/api/legacyscheme" persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/pkg/volume/util" @@ -569,6 +572,180 @@ func TestPersistentVolumeMultiPVs(t *testing.T) { t.Log("volumes released") } +// TestPersistentVolumeClaimVolumeAttirbutesClassName test binding using volume attributes +// class name. +func TestPersistentVolumeClaimVolumeAttirbutesClassName(t *testing.T) { + var ( + err error + modes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + reclaim = v1.PersistentVolumeReclaimRetain + namespaceName = "pvc-volume-attributes-class-name" + + classEmpty = "" + classGold = "gold" + classSilver = "silver" + + pv = createCSIPV("pv", "1G", modes, reclaim) + pvGold = createCSIPV("pv-gold", "1G", modes, reclaim) + pv2Gold = createCSIPV("pv2-gold", "1G", modes, reclaim) + pvSilver = createCSIPV("pv-silver", "1G", modes, reclaim) + + pvc = createPVC("pvc", namespaceName, "1G", modes, "") + pvcEmpty = createPVC("pvc", namespaceName, "1G", modes, "") + pvcGold = createPVC("pvc-gold", namespaceName, "1G", modes, "") + ) + + // prepare PVs and PVCs + pv.Spec.VolumeAttributesClassName = nil + pvGold.Spec.VolumeAttributesClassName = &classGold + pv2Gold.Spec.VolumeAttributesClassName = &classGold + pvSilver.Spec.VolumeAttributesClassName = &classSilver + + pvc.Spec.VolumeAttributesClassName = nil + pvcEmpty.Spec.VolumeAttributesClassName = &classEmpty + pvcGold.Spec.VolumeAttributesClassName = &classGold + + testCases := []struct { + featureEnabled bool + name string + volumes []*v1.PersistentVolume + claim *v1.PersistentVolumeClaim + expectVolumeName string + }{ + { + featureEnabled: true, + name: "claim with nil class bind to a pv", + volumes: []*v1.PersistentVolume{pv, pvGold, pvSilver}, + claim: pvc, + expectVolumeName: pv.Name, + }, + { + featureEnabled: true, + name: "claim with empty class bind to a pv", + volumes: []*v1.PersistentVolume{pv, pvGold, pvSilver}, + claim: pvcEmpty, + expectVolumeName: pv.Name, + }, + { + featureEnabled: true, + name: "claim bind to a pv with same class name", + volumes: []*v1.PersistentVolume{pv, pvGold, pvSilver}, + claim: pvcGold, + expectVolumeName: pvGold.Name, + }, + { + featureEnabled: true, + name: "claim bind to a user-asked pv with same class name", + volumes: []*v1.PersistentVolume{pv, pvGold, pv2Gold, pvSilver}, + claim: func() *v1.PersistentVolumeClaim { + pvcGoldClone := pvcGold.DeepCopy() + pvcGoldClone.Spec.VolumeName = pv2Gold.Name + return pvcGoldClone + }(), + expectVolumeName: pv2Gold.Name, + }, + { + featureEnabled: false, + name: "claim bind to a pv due to class name is dropped by kube-apiserver", + volumes: []*v1.PersistentVolume{pvGold}, + claim: pvcGold, + expectVolumeName: pvGold.Name, + }, + { + featureEnabled: false, + name: "claim with nil class bind to a pv", + volumes: []*v1.PersistentVolume{pv}, + claim: pvc, + expectVolumeName: pv.Name, + }, + { + featureEnabled: false, + name: "claim with empty class bind to a pv", + volumes: []*v1.PersistentVolume{pv}, + claim: pvcEmpty, + expectVolumeName: pv.Name, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeAttributesClass, tc.featureEnabled) + s := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount,StorageObjectInUseProtection"}, framework.SharedEtcd()) + defer s.TearDownFn() + + tCtx := ktesting.Init(t) + defer tCtx.Cancel("test has completed") + testClient, controller, informers, watchPV, watchPVC := createClients(tCtx, namespaceName, t, s, defaultSyncPeriod) + defer watchPV.Stop() + defer watchPVC.Stop() + + ns := framework.CreateNamespaceOrDie(testClient, namespaceName, t) + defer framework.DeleteNamespaceOrDie(testClient, ns, t) + + // NOTE: This test cannot run in parallel, because it is creating and deleting + // non-namespaced objects (PersistenceVolumes). + defer func() { + _ = testClient.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{}) + }() + + informers.Start(tCtx.Done()) + go controller.Run(tCtx) + + for _, volume := range tc.volumes { + _, err = testClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create PersistentVolume: %v", err) + } + } + t.Log("volumes created") + + _, err = testClient.CoreV1().PersistentVolumeClaims(ns.Name).Create(context.TODO(), tc.claim, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("Failed to create PersistentVolumeClaim: %v", err) + } + t.Log("claim created") + + waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound) + t.Log("volume bound") + + waitForPersistentVolumeClaimPhase(testClient, tc.claim.Name, ns.Name, watchPVC, v1.ClaimBound) + t.Log("claim bound") + + gotClaim, err := testClient.CoreV1().PersistentVolumeClaims(ns.Name).Get(context.TODO(), tc.claim.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Unexpected error getting pvc: %v", err) + } + if !tc.featureEnabled { + if gotClaim.Spec.VolumeAttributesClassName != nil || gotClaim.Status.CurrentVolumeAttributesClassName != nil { + t.Fatalf("unexpected volume class name on claim %q", gotClaim.Name) + } + } + + for _, volume := range tc.volumes { + gotVolume, err := testClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Unexpected error getting pv: %v", err) + } + if !tc.featureEnabled { + if gotVolume.Spec.VolumeAttributesClassName != nil { + t.Fatalf("unexpected volume class name on volume %q", gotVolume.Name) + } + } + if volume.Name == tc.expectVolumeName { + if gotVolume.Spec.ClaimRef == nil { + t.Fatalf("%s PV should be bound", volume.Name) + } + if gotVolume.Spec.ClaimRef.Namespace != tc.claim.Namespace || gotVolume.Spec.ClaimRef.Name != tc.claim.Name { + t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", tc.claim.Namespace, tc.claim.Name, gotVolume.Spec.ClaimRef.Namespace, gotVolume.Spec.ClaimRef.Name) + } + } else if gotVolume.Spec.ClaimRef != nil { + t.Fatalf("%s PV shouldn't be bound", volume.Name) + } + } + }) + } +} + // TestPersistentVolumeMultiPVsPVCs tests binding of 100 PVC to 100 PVs. // This test is configurable by KUBE_INTEGRATION_PV_* variables. func TestPersistentVolumeMultiPVsPVCs(t *testing.T) { @@ -1433,3 +1610,15 @@ func createPVCWithNilStorageClass(name, namespace, cap string, mode []v1.Persist }, } } + +func createCSIPV(name, cap string, mode []v1.PersistentVolumeAccessMode, reclaim v1.PersistentVolumeReclaimPolicy) *v1.PersistentVolume { + return &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{CSI: &v1.CSIPersistentVolumeSource{Driver: "mock-driver", VolumeHandle: "volume-handle"}}, + Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse(cap)}, + AccessModes: mode, + PersistentVolumeReclaimPolicy: reclaim, + }, + } +}