From 5ca081416593aaf6fb33badb229112ece64c2d6a Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 3 Mar 2021 12:43:34 +0100 Subject: [PATCH] CSIStorageCapacity: use beta API --- .../volume/scheduling/scheduler_binder.go | 12 +++---- .../scheduling/scheduler_binder_test.go | 32 +++++++++---------- .../plugins/volumebinding/volume_binding.go | 2 +- test/e2e/storage/csi_mock_volume.go | 8 ++--- test/integration/apiserver/print_test.go | 1 + .../volumescheduling/volume_binding_test.go | 8 ++--- 6 files changed, 32 insertions(+), 31 deletions(-) diff --git a/pkg/controller/volume/scheduling/scheduler_binder.go b/pkg/controller/volume/scheduling/scheduler_binder.go index 439c4b334ca..4281c4219e8 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/pkg/controller/volume/scheduling/scheduler_binder.go @@ -25,7 +25,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -35,11 +35,11 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" - storageinformersv1alpha1 "k8s.io/client-go/informers/storage/v1alpha1" + storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" storagelisters "k8s.io/client-go/listers/storage/v1" - storagelistersv1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1" storagehelpers "k8s.io/component-helpers/storage/volume" csitrans "k8s.io/csi-translation-lib" csiplugins "k8s.io/csi-translation-lib/plugins" @@ -210,7 +210,7 @@ type volumeBinder struct { capacityCheckEnabled bool csiDriverLister storagelisters.CSIDriverLister - csiStorageCapacityLister storagelistersv1alpha1.CSIStorageCapacityLister + csiStorageCapacityLister storagelistersv1beta1.CSIStorageCapacityLister } // CapacityCheck contains additional parameters for NewVolumeBinder that @@ -218,7 +218,7 @@ type volumeBinder struct { // capacity is desired. type CapacityCheck struct { CSIDriverInformer storageinformers.CSIDriverInformer - CSIStorageCapacityInformer storageinformersv1alpha1.CSIStorageCapacityInformer + CSIStorageCapacityInformer storageinformersv1beta1.CSIStorageCapacityInformer } // NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. @@ -989,7 +989,7 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten return false, nil } -func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1alpha1.CSIStorageCapacity) bool { +func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1beta1.CSIStorageCapacity) bool { if capacity.NodeTopology == nil { // Unavailable return false diff --git a/pkg/controller/volume/scheduling/scheduler_binder_test.go b/pkg/controller/volume/scheduling/scheduler_binder_test.go index ce4694cc3b1..b092252271a 100644 --- a/pkg/controller/volume/scheduling/scheduler_binder_test.go +++ b/pkg/controller/volume/scheduling/scheduler_binder_test.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -37,7 +37,7 @@ import ( "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" - storageinformersv1alpha1 "k8s.io/client-go/informers/storage/v1alpha1" + storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" @@ -140,7 +140,7 @@ type testEnv struct { // For CSIStorageCapacity feature testing: internalCSIDriverInformer storageinformers.CSIDriverInformer - internalCSIStorageCapacityInformer storageinformersv1alpha1.CSIStorageCapacityInformer + internalCSIStorageCapacityInformer storageinformersv1beta1.CSIStorageCapacityInformer } func newTestBinder(t *testing.T, stopCh <-chan struct{}, csiStorageCapacity ...bool) *testEnv { @@ -164,7 +164,7 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}, csiStorageCapacity ...b pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() classInformer := informerFactory.Storage().V1().StorageClasses() csiDriverInformer := informerFactory.Storage().V1().CSIDrivers() - csiStorageCapacityInformer := informerFactory.Storage().V1alpha1().CSIStorageCapacities() + csiStorageCapacityInformer := informerFactory.Storage().V1beta1().CSIStorageCapacities() var capacityCheck *CapacityCheck if len(csiStorageCapacity) > 0 && csiStorageCapacity[0] { capacityCheck = &CapacityCheck{ @@ -302,7 +302,7 @@ func (env *testEnv) addCSIDriver(csiDriver *storagev1.CSIDriver) { csiDriverInformer.GetIndexer().Add(csiDriver) } -func (env *testEnv) addCSIStorageCapacities(capacities []*storagev1alpha1.CSIStorageCapacity) { +func (env *testEnv) addCSIStorageCapacities(capacities []*storagev1beta1.CSIStorageCapacity) { csiStorageCapacityInformer := env.internalCSIStorageCapacityInformer.Informer() for _, capacity := range capacities { csiStorageCapacityInformer.GetIndexer().Add(capacity) @@ -743,8 +743,8 @@ func makeCSIDriver(name string, storageCapacity bool) *storagev1.CSIDriver { } } -func makeCapacity(name, storageClassName string, node *v1.Node, capacityStr string) *storagev1alpha1.CSIStorageCapacity { - c := &storagev1alpha1.CSIStorageCapacity{ +func makeCapacity(name, storageClassName string, node *v1.Node, capacityStr string) *storagev1beta1.CSIStorageCapacity { + c := &storagev1beta1.CSIStorageCapacity{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -2202,7 +2202,7 @@ func TestCapacity(t *testing.T) { type scenarioType struct { // Inputs pvcs []*v1.PersistentVolumeClaim - capacities []*storagev1alpha1.CSIStorageCapacity + capacities []*storagev1beta1.CSIStorageCapacity // Expected return values reasons ConflictReasons @@ -2211,19 +2211,19 @@ func TestCapacity(t *testing.T) { scenarios := map[string]scenarioType{ "network-attached": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, nil, "1Gi"), }, }, "local-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "1Gi"), }, }, "multiple": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, nil, "1Gi"), makeCapacity("net", waitClassWithProvisioner, node2, "1Gi"), makeCapacity("net", waitClassWithProvisioner, node1, "1Gi"), @@ -2235,35 +2235,35 @@ func TestCapacity(t *testing.T) { }, "wrong-node": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node2, "1Gi"), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "wrong-storage-class": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClass, node1, "1Gi"), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "insufficient-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "1Mi"), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "zero-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "0Mi"), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "nil-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1alpha1.CSIStorageCapacity{ + capacities: []*storagev1beta1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, ""), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 6a290301fa9..c20784b5161 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -354,7 +354,7 @@ func New(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) { if utilfeature.DefaultFeatureGate.Enabled(features.CSIStorageCapacity) { capacityCheck = &scheduling.CapacityCheck{ CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(), - CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1alpha1().CSIStorageCapacities(), + CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1beta1().CSIStorageCapacities(), } } binder := scheduling.NewVolumeBinder(fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index b785f5d416b..05f7a2bf3fd 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/status" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1188,7 +1188,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // before adding CSIStorageCapacity objects for it. for _, capacityStr := range test.capacities { capacityQuantity := resource.MustParse(capacityStr) - capacity := &storagev1alpha1.CSIStorageCapacity{ + capacity := &storagev1beta1.CSIStorageCapacity{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "fake-capacity-", }, @@ -1197,10 +1197,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { NodeTopology: &metav1.LabelSelector{}, Capacity: &capacityQuantity, } - createdCapacity, err := f.ClientSet.StorageV1alpha1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) + createdCapacity, err := f.ClientSet.StorageV1beta1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity) m.testCleanups = append(m.testCleanups, func() { - f.ClientSet.StorageV1alpha1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{}) + f.ClientSet.StorageV1beta1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{}) }) } diff --git a/test/integration/apiserver/print_test.go b/test/integration/apiserver/print_test.go index 53f8da141bf..9375024a04a 100644 --- a/test/integration/apiserver/print_test.go +++ b/test/integration/apiserver/print_test.go @@ -157,6 +157,7 @@ func TestServerSidePrint(t *testing.T) { {Group: "rbac.authorization.k8s.io", Version: "v1alpha1"}, {Group: "scheduling.k8s.io", Version: "v1"}, {Group: "storage.k8s.io", Version: "v1alpha1"}, + {Group: "storage.k8s.io", Version: "v1beta1"}, {Group: "extensions", Version: "v1beta1"}, {Group: "node.k8s.io", Version: "v1"}, {Group: "node.k8s.io", Version: "v1alpha1"}, diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 783c4144200..fd14ceaf217 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -31,7 +31,7 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" @@ -941,8 +941,8 @@ func TestCapacity(t *testing.T) { // Create CSIStorageCapacity if test.haveCapacity { - if _, err := config.client.StorageV1alpha1().CSIStorageCapacities("default").Create(context.TODO(), - &storagev1alpha1.CSIStorageCapacity{ + if _, err := config.client.StorageV1beta1().CSIStorageCapacities("default").Create(context.TODO(), + &storagev1beta1.CSIStorageCapacity{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "foo-", }, @@ -1155,7 +1155,7 @@ func deleteTestObjects(client clientset.Interface, ns string, option metav1.Dele client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.StorageV1().StorageClasses().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.StorageV1().CSIDrivers().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) - client.StorageV1alpha1().CSIStorageCapacities("default").DeleteCollection(context.TODO(), option, metav1.ListOptions{}) + client.StorageV1beta1().CSIStorageCapacities("default").DeleteCollection(context.TODO(), option, metav1.ListOptions{}) } func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {