diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 2884023b0e3..35c5433ddf7 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -40,7 +40,6 @@ import ( rbacv1beta1 "k8s.io/api/rbac/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -530,8 +529,8 @@ func AddHandlers(h printers.PrintHandler) { csiStorageCapacityColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "StorageClassName", Type: "string", Description: storagev1alpha1.CSIStorageCapacity{}.SwaggerDoc()["storageClassName"]}, - {Name: "Capacity", Type: "string", Description: storagev1alpha1.CSIStorageCapacity{}.SwaggerDoc()["capacity"]}, + {Name: "StorageClassName", Type: "string", Description: storagev1.CSIStorageCapacity{}.SwaggerDoc()["storageClassName"]}, + {Name: "Capacity", Type: "string", Description: storagev1.CSIStorageCapacity{}.SwaggerDoc()["capacity"]}, } h.TableHandler(csiStorageCapacityColumnDefinitions, printCSIStorageCapacity) h.TableHandler(csiStorageCapacityColumnDefinitions, printCSIStorageCapacityList) diff --git a/pkg/scheduler/eventhandlers.go b/pkg/scheduler/eventhandlers.go index 96ae9536af5..0e579ea1549 100644 --- a/pkg/scheduler/eventhandlers.go +++ b/pkg/scheduler/eventhandlers.go @@ -353,7 +353,7 @@ func addAllEventHandlers( buildEvtResHandler(at, framework.CSIDriver, "CSIDriver"), ) case framework.CSIStorageCapacity: - informerFactory.Storage().V1beta1().CSIStorageCapacities().Informer().AddEventHandler( + informerFactory.Storage().V1().CSIStorageCapacities().Informer().AddEventHandler( buildEvtResHandler(at, framework.CSIStorageCapacity, "CSIStorageCapacity"), ) case framework.PersistentVolume: diff --git a/pkg/scheduler/eventhandlers_test.go b/pkg/scheduler/eventhandlers_test.go index 81522ca9892..7b7d5ccf6b5 100644 --- a/pkg/scheduler/eventhandlers_test.go +++ b/pkg/scheduler/eventhandlers_test.go @@ -26,7 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -383,11 +383,11 @@ func TestAddAllEventHandlers(t *testing.T) { "storage.k8s.io/CSIStorageCapacity": framework.Update, }, expectStaticInformers: map[reflect.Type]bool{ - reflect.TypeOf(&v1.Pod{}): true, - reflect.TypeOf(&v1.Node{}): true, - reflect.TypeOf(&v1.Namespace{}): true, - reflect.TypeOf(&v1.PersistentVolume{}): true, - reflect.TypeOf(&storagev1beta1.CSIStorageCapacity{}): true, + reflect.TypeOf(&v1.Pod{}): true, + reflect.TypeOf(&v1.Node{}): true, + reflect.TypeOf(&v1.Namespace{}): true, + reflect.TypeOf(&v1.PersistentVolume{}): true, + reflect.TypeOf(&storagev1.CSIStorageCapacity{}): true, }, expectDynamicInformers: map[schema.GroupVersionResource]bool{}, }, diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index e0f9973d876..4930cea4d56 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -25,7 +25,6 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -35,11 +34,9 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" - storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" storagelisters "k8s.io/client-go/listers/storage/v1" - storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1" "k8s.io/component-helpers/storage/ephemeral" storagehelpers "k8s.io/component-helpers/storage/volume" csitrans "k8s.io/csi-translation-lib" @@ -210,7 +207,7 @@ type volumeBinder struct { translator InTreeToCSITranslator csiDriverLister storagelisters.CSIDriverLister - csiStorageCapacityLister storagelistersv1beta1.CSIStorageCapacityLister + csiStorageCapacityLister storagelisters.CSIStorageCapacityLister } // CapacityCheck contains additional parameters for NewVolumeBinder that @@ -218,7 +215,7 @@ type volumeBinder struct { // capacity is desired. type CapacityCheck struct { CSIDriverInformer storageinformers.CSIDriverInformer - CSIStorageCapacityInformer storageinformersv1beta1.CSIStorageCapacityInformer + CSIStorageCapacityInformer storageinformers.CSIStorageCapacityInformer } // NewVolumeBinder sets up all the caches needed for the scheduler to make volume binding decisions. @@ -963,7 +960,7 @@ func (b *volumeBinder) hasEnoughCapacity(provisioner string, claim *v1.Persisten return false, nil } -func capacitySufficient(capacity *storagev1beta1.CSIStorageCapacity, sizeInBytes int64) bool { +func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool { limit := capacity.Capacity if capacity.MaximumVolumeSize != nil { // Prefer MaximumVolumeSize if available, it is more precise. @@ -972,7 +969,7 @@ func capacitySufficient(capacity *storagev1beta1.CSIStorageCapacity, sizeInBytes return limit != nil && limit.Value() >= sizeInBytes } -func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1beta1.CSIStorageCapacity) bool { +func (b *volumeBinder) nodeHasAccess(node *v1.Node, capacity *storagev1.CSIStorageCapacity) bool { if capacity.NodeTopology == nil { // Unavailable return false diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go index 8e85b8485ba..555068a94a1 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go @@ -27,7 +27,6 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -38,7 +37,6 @@ import ( "k8s.io/client-go/informers" coreinformers "k8s.io/client-go/informers/core/v1" storageinformers "k8s.io/client-go/informers/storage/v1" - storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" @@ -141,7 +139,7 @@ type testEnv struct { // For CSIStorageCapacity feature testing: internalCSIDriverInformer storageinformers.CSIDriverInformer - internalCSIStorageCapacityInformer storageinformersv1beta1.CSIStorageCapacityInformer + internalCSIStorageCapacityInformer storageinformers.CSIStorageCapacityInformer } func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { @@ -165,7 +163,7 @@ func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims() classInformer := informerFactory.Storage().V1().StorageClasses() csiDriverInformer := informerFactory.Storage().V1().CSIDrivers() - csiStorageCapacityInformer := informerFactory.Storage().V1beta1().CSIStorageCapacities() + csiStorageCapacityInformer := informerFactory.Storage().V1().CSIStorageCapacities() capacityCheck := CapacityCheck{ CSIDriverInformer: csiDriverInformer, CSIStorageCapacityInformer: csiStorageCapacityInformer, @@ -301,7 +299,7 @@ func (env *testEnv) addCSIDriver(csiDriver *storagev1.CSIDriver) { csiDriverInformer.GetIndexer().Add(csiDriver) } -func (env *testEnv) addCSIStorageCapacities(capacities []*storagev1beta1.CSIStorageCapacity) { +func (env *testEnv) addCSIStorageCapacities(capacities []*storagev1.CSIStorageCapacity) { csiStorageCapacityInformer := env.internalCSIStorageCapacityInformer.Informer() for _, capacity := range capacities { csiStorageCapacityInformer.GetIndexer().Add(capacity) @@ -736,8 +734,8 @@ func makeCSIDriver(name string, storageCapacity bool) *storagev1.CSIDriver { } } -func makeCapacity(name, storageClassName string, node *v1.Node, capacityStr, maximumVolumeSizeStr string) *storagev1beta1.CSIStorageCapacity { - c := &storagev1beta1.CSIStorageCapacity{ +func makeCapacity(name, storageClassName string, node *v1.Node, capacityStr, maximumVolumeSizeStr string) *storagev1.CSIStorageCapacity { + c := &storagev1.CSIStorageCapacity{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -2159,7 +2157,7 @@ func TestCapacity(t *testing.T) { type scenarioType struct { // Inputs pvcs []*v1.PersistentVolumeClaim - capacities []*storagev1beta1.CSIStorageCapacity + capacities []*storagev1.CSIStorageCapacity // Expected return values reasons ConflictReasons @@ -2168,19 +2166,19 @@ func TestCapacity(t *testing.T) { scenarios := map[string]scenarioType{ "network-attached": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", ""), }, }, "local-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", ""), }, }, "multiple": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", ""), makeCapacity("net", waitClassWithProvisioner, node2, "1Gi", ""), makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", ""), @@ -2192,49 +2190,49 @@ func TestCapacity(t *testing.T) { }, "wrong-node": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node2, "1Gi", ""), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "wrong-storage-class": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClass, node1, "1Gi", ""), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "insufficient-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "1Mi", ""), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "insufficient-volume-size": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", "1Mi"), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "zero-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "0Mi", ""), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "zero-volume-size": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "", "0Mi"), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, }, "nil-storage": { pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, - capacities: []*storagev1beta1.CSIStorageCapacity{ + capacities: []*storagev1.CSIStorageCapacity{ makeCapacity("net", waitClassWithProvisioner, node1, "", ""), }, reasons: ConflictReasons{ErrReasonNotEnoughSpace}, diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index b1a30027f35..54b1a8b4780 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -377,7 +377,7 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram csiNodeInformer := fh.SharedInformerFactory().Storage().V1().CSINodes() capacityCheck := CapacityCheck{ CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(), - CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1beta1().CSIStorageCapacities(), + CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1().CSIStorageCapacities(), } binder := NewVolumeBinder(fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 8b14bbce45e..8233bddf6bc 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/status" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1387,7 +1386,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // before adding CSIStorageCapacity objects for it. for _, capacityStr := range test.capacities { capacityQuantity := resource.MustParse(capacityStr) - capacity := &storagev1beta1.CSIStorageCapacity{ + capacity := &storagev1.CSIStorageCapacity{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "fake-capacity-", }, @@ -1396,10 +1395,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { NodeTopology: &metav1.LabelSelector{}, Capacity: &capacityQuantity, } - createdCapacity, err := f.ClientSet.StorageV1beta1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) + createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity) m.testCleanups = append(m.testCleanups, func() { - f.ClientSet.StorageV1beta1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{}) + f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{}) }) } diff --git a/test/e2e/storage/testsuites/capacity.go b/test/e2e/storage/testsuites/capacity.go index ac223be8e59..8dcfe0747e7 100644 --- a/test/e2e/storage/testsuites/capacity.go +++ b/test/e2e/storage/testsuites/capacity.go @@ -27,7 +27,6 @@ import ( "github.com/onsi/gomega/types" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -115,8 +114,8 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt timeout := time.Minute pollInterval := time.Second matchSC := HaveCapacitiesForClass(sc.Name) - listAll := gomega.Eventually(func() (*storagev1beta1.CSIStorageCapacityList, error) { - return f.ClientSet.StorageV1beta1().CSIStorageCapacities("").List(context.Background(), metav1.ListOptions{}) + listAll := gomega.Eventually(func() (*storagev1.CSIStorageCapacityList, error) { + return f.ClientSet.StorageV1().CSIStorageCapacities("").List(context.Background(), metav1.ListOptions{}) }, timeout, pollInterval) // If we have further information about what storage @@ -150,7 +149,7 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt }) } -func formatCapacities(capacities []storagev1beta1.CSIStorageCapacity) []string { +func formatCapacities(capacities []storagev1.CSIStorageCapacity) []string { lines := []string{} for _, capacity := range capacities { lines = append(lines, fmt.Sprintf(" %+v", capacity)) @@ -158,7 +157,7 @@ func formatCapacities(capacities []storagev1beta1.CSIStorageCapacity) []string { return lines } -// MatchCapacities runs some kind of check against *storagev1beta1.CSIStorageCapacityList. +// MatchCapacities runs some kind of check against *storagev1.CSIStorageCapacityList. // In case of failure, all actual objects are appended to the failure message. func MatchCapacities(match types.GomegaMatcher) types.GomegaMatcher { return matchCSIStorageCapacities{match: match} @@ -183,7 +182,7 @@ func (m matchCSIStorageCapacities) NegatedFailureMessage(actual interface{}) (me } func (m matchCSIStorageCapacities) dump(actual interface{}) string { - capacities, ok := actual.(*storagev1beta1.CSIStorageCapacityList) + capacities, ok := actual.(*storagev1.CSIStorageCapacityList) if !ok || capacities == nil { return "" } @@ -201,10 +200,10 @@ type CapacityMatcher interface { types.GomegaMatcher // MatchedCapacities returns all CSICapacityObjects which were // found during the preceding Match call. - MatchedCapacities() []storagev1beta1.CSIStorageCapacity + MatchedCapacities() []storagev1.CSIStorageCapacity } -// HaveCapacitiesForClass filters all storage capacity objects in a *storagev1beta1.CSIStorageCapacityList +// HaveCapacitiesForClass filters all storage capacity objects in a *storagev1.CSIStorageCapacityList // by storage class. Success is when when there is at least one. func HaveCapacitiesForClass(scName string) CapacityMatcher { return &haveCSIStorageCapacities{scName: scName} @@ -212,15 +211,15 @@ func HaveCapacitiesForClass(scName string) CapacityMatcher { type haveCSIStorageCapacities struct { scName string - matchingCapacities []storagev1beta1.CSIStorageCapacity + matchingCapacities []storagev1.CSIStorageCapacity } var _ CapacityMatcher = &haveCSIStorageCapacities{} func (h *haveCSIStorageCapacities) Match(actual interface{}) (success bool, err error) { - capacities, ok := actual.(*storagev1beta1.CSIStorageCapacityList) + capacities, ok := actual.(*storagev1.CSIStorageCapacityList) if !ok { - return false, fmt.Errorf("expected *storagev1beta1.CSIStorageCapacityList, got: %T", actual) + return false, fmt.Errorf("expected *storagev1.CSIStorageCapacityList, got: %T", actual) } h.matchingCapacities = nil for _, capacity := range capacities.Items { @@ -231,7 +230,7 @@ func (h *haveCSIStorageCapacities) Match(actual interface{}) (success bool, err return len(h.matchingCapacities) > 0, nil } -func (h *haveCSIStorageCapacities) MatchedCapacities() []storagev1beta1.CSIStorageCapacity { +func (h *haveCSIStorageCapacities) MatchedCapacities() []storagev1.CSIStorageCapacity { return h.matchingCapacities } @@ -264,8 +263,8 @@ type haveLocalStorageCapacities struct { topologyKey string matchSuccess bool - expectedCapacities []storagev1beta1.CSIStorageCapacity - unexpectedCapacities []storagev1beta1.CSIStorageCapacity + expectedCapacities []storagev1.CSIStorageCapacity + unexpectedCapacities []storagev1.CSIStorageCapacity missingTopologyValues []string } @@ -340,7 +339,7 @@ func (h *haveLocalStorageCapacities) Match(actual interface{}) (success bool, er return len(h.unexpectedCapacities) == 0 && len(h.missingTopologyValues) == 0, nil } -func (h *haveLocalStorageCapacities) MatchedCapacities() []storagev1beta1.CSIStorageCapacity { +func (h *haveLocalStorageCapacities) MatchedCapacities() []storagev1.CSIStorageCapacity { return h.match.MatchedCapacities() } diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index aeb7ea0b960..99b2bd49f85 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -31,7 +31,6 @@ import ( v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" - storagev1beta1 "k8s.io/api/storage/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" @@ -929,8 +928,8 @@ func TestCapacity(t *testing.T) { // Create CSIStorageCapacity if test.haveCapacity { - if _, err := config.client.StorageV1beta1().CSIStorageCapacities("default").Create(context.TODO(), - &storagev1beta1.CSIStorageCapacity{ + if _, err := config.client.StorageV1().CSIStorageCapacities("default").Create(context.TODO(), + &storagev1.CSIStorageCapacity{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "foo-", }, @@ -1143,7 +1142,7 @@ func deleteTestObjects(client clientset.Interface, ns string, option metav1.Dele client.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.StorageV1().StorageClasses().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) client.StorageV1().CSIDrivers().DeleteCollection(context.TODO(), option, metav1.ListOptions{}) - client.StorageV1beta1().CSIStorageCapacities("default").DeleteCollection(context.TODO(), option, metav1.ListOptions{}) + client.StorageV1().CSIStorageCapacities("default").DeleteCollection(context.TODO(), option, metav1.ListOptions{}) } func makeStorageClass(name string, mode *storagev1.VolumeBindingMode) *storagev1.StorageClass {