mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Merge pull request #83568 from bertinatto/volume_limits_ga
Promote volume limits to GA
This commit is contained in:
commit
c580a12c8e
@ -282,6 +282,8 @@ const (
|
|||||||
|
|
||||||
// owner: @gnufied
|
// owner: @gnufied
|
||||||
// beta : v1.12
|
// beta : v1.12
|
||||||
|
// GA : v1.17
|
||||||
|
|
||||||
//
|
//
|
||||||
// Add support for volume plugins to report node specific
|
// Add support for volume plugins to report node specific
|
||||||
// volume limits
|
// volume limits
|
||||||
@ -523,7 +525,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||||
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||||
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta},
|
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||||
AttachVolumeLimit: {Default: true, PreRelease: featuregate.Beta},
|
AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
|
||||||
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
||||||
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
TopologyManager: {Default: false, PreRelease: featuregate.Alpha},
|
TopologyManager: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
@ -31,12 +31,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||||
@ -548,9 +546,9 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
|||||||
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
|
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
|
||||||
nodestatus.GoRuntime(),
|
nodestatus.GoRuntime(),
|
||||||
)
|
)
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
// Volume limits
|
||||||
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
||||||
}
|
|
||||||
setters = append(setters,
|
setters = append(setters,
|
||||||
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
||||||
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
||||||
|
@ -12,11 +12,9 @@ go_library(
|
|||||||
"//pkg/api/legacyscheme:go_default_library",
|
"//pkg/api/legacyscheme:go_default_library",
|
||||||
"//pkg/apis/storage:go_default_library",
|
"//pkg/apis/storage:go_default_library",
|
||||||
"//pkg/apis/storage/validation:go_default_library",
|
"//pkg/apis/storage/validation:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,12 +41,9 @@ go_test(
|
|||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/storage:go_default_library",
|
"//pkg/apis/storage:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
|
||||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -22,11 +22,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
"k8s.io/kubernetes/pkg/apis/storage"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage/validation"
|
"k8s.io/kubernetes/pkg/apis/storage/validation"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// csiNodeStrategy implements behavior for CSINode objects
|
// csiNodeStrategy implements behavior for CSINode objects
|
||||||
@ -45,12 +43,6 @@ func (csiNodeStrategy) NamespaceScoped() bool {
|
|||||||
|
|
||||||
// PrepareForCreate clears fields that are not allowed to be set on creation.
|
// PrepareForCreate clears fields that are not allowed to be set on creation.
|
||||||
func (csiNodeStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
func (csiNodeStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||||
csiNode := obj.(*storage.CSINode)
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
for i := range csiNode.Spec.Drivers {
|
|
||||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csiNodeStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
func (csiNodeStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||||
@ -72,31 +64,6 @@ func (csiNodeStrategy) AllowCreateOnUpdate() bool {
|
|||||||
|
|
||||||
// PrepareForUpdate sets the driver's Allocatable fields that are not allowed to be set by an end user updating a CSINode.
|
// PrepareForUpdate sets the driver's Allocatable fields that are not allowed to be set by an end user updating a CSINode.
|
||||||
func (csiNodeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
func (csiNodeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||||
newCSINode := obj.(*storage.CSINode)
|
|
||||||
oldCSINode := old.(*storage.CSINode)
|
|
||||||
|
|
||||||
inUse := getAllocatablesInUse(oldCSINode)
|
|
||||||
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
for i := range newCSINode.Spec.Drivers {
|
|
||||||
if !inUse[newCSINode.Spec.Drivers[i].Name] {
|
|
||||||
newCSINode.Spec.Drivers[i].Allocatable = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAllocatablesInUse(obj *storage.CSINode) map[string]bool {
|
|
||||||
inUse := make(map[string]bool)
|
|
||||||
if obj == nil {
|
|
||||||
return inUse
|
|
||||||
}
|
|
||||||
for i := range obj.Spec.Drivers {
|
|
||||||
if obj.Spec.Drivers[i].Allocatable != nil {
|
|
||||||
inUse[obj.Spec.Drivers[i].Name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return inUse
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csiNodeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
func (csiNodeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||||
|
@ -23,10 +23,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
"k8s.io/kubernetes/pkg/apis/storage"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +44,7 @@ func TestPrepareForCreate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeLimitsEnabledCases := []struct {
|
volumeLimitsCases := []struct {
|
||||||
name string
|
name string
|
||||||
obj *storage.CSINode
|
obj *storage.CSINode
|
||||||
expected *storage.CSINode
|
expected *storage.CSINode
|
||||||
@ -64,32 +61,7 @@ func TestPrepareForCreate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
for _, test := range volumeLimitsCases {
|
||||||
for _, test := range volumeLimitsEnabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
testPrepareForCreate(t, test.obj, test.expected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeLimitsDisabledCases := []struct {
|
|
||||||
name string
|
|
||||||
obj *storage.CSINode
|
|
||||||
expected *storage.CSINode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"empty allocatable",
|
|
||||||
emptyAllocatable,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"drop allocatable",
|
|
||||||
valid,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, false)()
|
|
||||||
for _, test := range volumeLimitsDisabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testPrepareForCreate(t, test.obj, test.expected)
|
testPrepareForCreate(t, test.obj, test.expected)
|
||||||
})
|
})
|
||||||
@ -140,7 +112,7 @@ func TestPrepareForUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeLimitsEnabledCases := []struct {
|
volumeLimitsCases := []struct {
|
||||||
name string
|
name string
|
||||||
old *storage.CSINode
|
old *storage.CSINode
|
||||||
new *storage.CSINode
|
new *storage.CSINode
|
||||||
@ -166,35 +138,7 @@ func TestPrepareForUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
for _, test := range volumeLimitsCases {
|
||||||
for _, test := range volumeLimitsEnabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeLimitsDisabledCases := []struct {
|
|
||||||
name string
|
|
||||||
old *storage.CSINode
|
|
||||||
new *storage.CSINode
|
|
||||||
expected *storage.CSINode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"allow empty allocatable when it's not set",
|
|
||||||
emptyAllocatable,
|
|
||||||
emptyAllocatable,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"drop allocatable when it's not set",
|
|
||||||
emptyAllocatable,
|
|
||||||
valid,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, false)()
|
|
||||||
for _, test := range volumeLimitsDisabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
||||||
})
|
})
|
||||||
|
@ -48,13 +48,11 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
|
@ -28,7 +28,6 @@ go_library(
|
|||||||
"//pkg/volume/util:go_default_library",
|
"//pkg/volume/util:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
@ -37,7 +36,6 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||||
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
||||||
"//staging/src/k8s.io/csi-translation-lib:go_default_library",
|
"//staging/src/k8s.io/csi-translation-lib:go_default_library",
|
||||||
@ -67,7 +65,6 @@ go_test(
|
|||||||
"//pkg/volume/util:go_default_library",
|
"//pkg/volume/util:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
@ -20,16 +20,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
|
||||||
csitrans "k8s.io/csi-translation-lib"
|
csitrans "k8s.io/csi-translation-lib"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
@ -46,7 +43,7 @@ type InTreeToCSITranslator interface {
|
|||||||
|
|
||||||
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
|
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
|
||||||
type CSIMaxVolumeLimitChecker struct {
|
type CSIMaxVolumeLimitChecker struct {
|
||||||
csiNodeLister v1beta1storagelisters.CSINodeLister
|
csiNodeLister storagelisters.CSINodeLister
|
||||||
pvLister corelisters.PersistentVolumeLister
|
pvLister corelisters.PersistentVolumeLister
|
||||||
pvcLister corelisters.PersistentVolumeClaimLister
|
pvcLister corelisters.PersistentVolumeClaimLister
|
||||||
scLister storagelisters.StorageClassLister
|
scLister storagelisters.StorageClassLister
|
||||||
@ -58,7 +55,7 @@ type CSIMaxVolumeLimitChecker struct {
|
|||||||
|
|
||||||
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
||||||
func NewCSIMaxVolumeLimitPredicate(
|
func NewCSIMaxVolumeLimitPredicate(
|
||||||
csiNodeLister v1beta1storagelisters.CSINodeLister, pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate {
|
csiNodeLister storagelisters.CSINodeLister, pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate {
|
||||||
c := &CSIMaxVolumeLimitChecker{
|
c := &CSIMaxVolumeLimitChecker{
|
||||||
csiNodeLister: csiNodeLister,
|
csiNodeLister: csiNodeLister,
|
||||||
pvLister: pvLister,
|
pvLister: pvLister,
|
||||||
@ -70,7 +67,7 @@ func NewCSIMaxVolumeLimitPredicate(
|
|||||||
return c.attachableLimitPredicate
|
return c.attachableLimitPredicate
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1beta1.CSINode) map[v1.ResourceName]int64 {
|
func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
|
||||||
// TODO: stop getting values from Node object in v1.18
|
// TODO: stop getting values from Node object in v1.18
|
||||||
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
||||||
if csiNode != nil {
|
if csiNode != nil {
|
||||||
@ -93,10 +90,6 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
|||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
return true, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -160,7 +153,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||||
csiNode *storagev1beta1.CSINode, volumes []v1.Volume, namespace string, result map[string]string) error {
|
csiNode *storagev1.CSINode, volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||||
for _, vol := range volumes {
|
for _, vol := range volumes {
|
||||||
// CSI volumes can only be used as persistent volumes
|
// CSI volumes can only be used as persistent volumes
|
||||||
if vol.PersistentVolumeClaim == nil {
|
if vol.PersistentVolumeClaim == nil {
|
||||||
@ -195,7 +188,7 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
|||||||
// getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC.
|
// getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC.
|
||||||
// If the PVC is from a migrated in-tree plugin, this function will return
|
// If the PVC is from a migrated in-tree plugin, this function will return
|
||||||
// the information of the CSI driver that the plugin has been migrated to.
|
// the information of the CSI driver that the plugin has been migrated to.
|
||||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||||
pvName := pvc.Spec.VolumeName
|
pvName := pvc.Spec.VolumeName
|
||||||
namespace := pvc.Namespace
|
namespace := pvc.Namespace
|
||||||
pvcName := pvc.Name
|
pvcName := pvc.Name
|
||||||
@ -250,7 +243,7 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSIN
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass.
|
// getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass.
|
||||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||||
namespace := pvc.Namespace
|
namespace := pvc.Namespace
|
||||||
pvcName := pvc.Name
|
pvcName := pvc.Name
|
||||||
scName := v1helper.GetPersistentVolumeClaimClass(pvc)
|
scName := v1helper.GetPersistentVolumeClaimClass(pvc)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@ -434,7 +434,6 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
// running attachable predicate tests with feature gate and limit present on nodes
|
// running attachable predicate tests with feature gate and limit present on nodes
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
@ -547,7 +546,7 @@ func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakel
|
|||||||
return pvcLister
|
return pvcLister
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
|
||||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||||
if nodeInfoAnnotations == nil {
|
if nodeInfoAnnotations == nil {
|
||||||
nodeInfoAnnotations = map[string]string{}
|
nodeInfoAnnotations = map[string]string{}
|
||||||
@ -570,7 +569,7 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.St
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSINodeLister(csiNode *storagev1beta1.CSINode) fakelisters.CSINodeLister {
|
func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister {
|
||||||
if csiNode != nil {
|
if csiNode != nil {
|
||||||
return fakelisters.CSINodeLister(*csiNode)
|
return fakelisters.CSINodeLister(*csiNode)
|
||||||
}
|
}
|
||||||
|
@ -19,18 +19,14 @@ package predicates
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/storage/v1beta1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
@ -848,31 +844,6 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||||||
|
|
||||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||||
|
|
||||||
// running attachable predicate tests without feature gate and no limit present on nodes
|
|
||||||
for _, test := range tests {
|
|
||||||
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
|
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
|
||||||
pred := NewMaxPDVolumeCountPredicate(test.filterName,
|
|
||||||
getFakeCSINodeLister(csiNode),
|
|
||||||
getFakeStorageClassLister(test.filterName),
|
|
||||||
getFakePVLister(test.filterName),
|
|
||||||
getFakePVCLister(test.filterName))
|
|
||||||
|
|
||||||
factory := &MetadataProducerFactory{}
|
|
||||||
fits, reasons, err := pred(test.newPod, factory.GetPredicateMetadata(test.newPod, nil), node)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
|
||||||
}
|
|
||||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
|
||||||
t.Errorf("[%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
|
||||||
}
|
|
||||||
if fits != test.fits {
|
|
||||||
t.Errorf("[%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
// running attachable predicate tests with feature gate and limit present on nodes
|
// running attachable predicate tests with feature gate and limit present on nodes
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
@ -1104,7 +1075,7 @@ func TestMaxVolumeFuncM4WithBothBetaAndStableLabels(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) {
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||||
@ -1112,7 +1083,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
Allocatable: v1.ResourceList{},
|
Allocatable: v1.ResourceList{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var csiNode *v1beta1.CSINode
|
var csiNode *storagev1.CSINode
|
||||||
|
|
||||||
addLimitToNode := func() {
|
addLimitToNode := func() {
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
@ -1121,10 +1092,10 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
}
|
}
|
||||||
|
|
||||||
initCSINode := func() {
|
initCSINode := func() {
|
||||||
csiNode = &v1beta1.CSINode{
|
csiNode = &storagev1.CSINode{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||||
Spec: v1beta1.CSINodeSpec{
|
Spec: storagev1.CSINodeSpec{
|
||||||
Drivers: []v1beta1.CSINodeDriver{},
|
Drivers: []storagev1.CSINodeDriver{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1132,12 +1103,12 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
addDriversCSINode := func(addLimits bool) {
|
addDriversCSINode := func(addLimits bool) {
|
||||||
initCSINode()
|
initCSINode()
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
driver := v1beta1.CSINodeDriver{
|
driver := storagev1.CSINodeDriver{
|
||||||
Name: driver,
|
Name: driver,
|
||||||
NodeID: "node-for-max-pd-test-1",
|
NodeID: "node-for-max-pd-test-1",
|
||||||
}
|
}
|
||||||
if addLimits {
|
if addLimits {
|
||||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
driver.Allocatable = &storagev1.VolumeNodeResources{
|
||||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
v1beta1storage "k8s.io/api/storage/v1beta1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
@ -36,7 +35,6 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
|
||||||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
@ -225,7 +223,7 @@ type MaxPDVolumeCountChecker struct {
|
|||||||
filter VolumeFilter
|
filter VolumeFilter
|
||||||
volumeLimitKey v1.ResourceName
|
volumeLimitKey v1.ResourceName
|
||||||
maxVolumeFunc func(node *v1.Node) int
|
maxVolumeFunc func(node *v1.Node) int
|
||||||
csiNodeLister v1beta1storagelisters.CSINodeLister
|
csiNodeLister storagelisters.CSINodeLister
|
||||||
pvLister corelisters.PersistentVolumeLister
|
pvLister corelisters.PersistentVolumeLister
|
||||||
pvcLister corelisters.PersistentVolumeClaimLister
|
pvcLister corelisters.PersistentVolumeClaimLister
|
||||||
scLister storagelisters.StorageClassLister
|
scLister storagelisters.StorageClassLister
|
||||||
@ -244,7 +242,7 @@ type VolumeFilter struct {
|
|||||||
// MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate
|
// MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate
|
||||||
MatchProvisioner func(sc *storage.StorageClass) (relevant bool)
|
MatchProvisioner func(sc *storage.StorageClass) (relevant bool)
|
||||||
// IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver
|
// IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver
|
||||||
IsMigrated func(csiNode *v1beta1storage.CSINode) bool
|
IsMigrated func(csiNode *storage.CSINode) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||||
@ -257,7 +255,7 @@ type VolumeFilter struct {
|
|||||||
// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume
|
// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume
|
||||||
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
||||||
// the maximum.
|
// the maximum.
|
||||||
func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister v1beta1storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister,
|
func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister,
|
||||||
pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister) FitPredicate {
|
pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister) FitPredicate {
|
||||||
var filter VolumeFilter
|
var filter VolumeFilter
|
||||||
var volumeLimitKey v1.ResourceName
|
var volumeLimitKey v1.ResourceName
|
||||||
@ -441,7 +439,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
csiNode *v1beta1storage.CSINode
|
csiNode *storage.CSINode
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
if c.csiNodeLister != nil {
|
if c.csiNodeLister != nil {
|
||||||
@ -477,12 +475,10 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo
|
|||||||
numNewVolumes := len(newVolumes)
|
numNewVolumes := len(newVolumes)
|
||||||
maxAttachLimit := c.maxVolumeFunc(node)
|
maxAttachLimit := c.maxVolumeFunc(node)
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
volumeLimits := nodeInfo.VolumeLimits()
|
volumeLimits := nodeInfo.VolumeLimits()
|
||||||
if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok {
|
if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok {
|
||||||
maxAttachLimit = int(maxAttachLimitFromAllocatable)
|
maxAttachLimit = int(maxAttachLimitFromAllocatable)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
||||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||||
@ -520,7 +516,7 @@ var EBSVolumeFilter = VolumeFilter{
|
|||||||
return false
|
return false
|
||||||
},
|
},
|
||||||
|
|
||||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||||
return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -548,7 +544,7 @@ var GCEPDVolumeFilter = VolumeFilter{
|
|||||||
return false
|
return false
|
||||||
},
|
},
|
||||||
|
|
||||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||||
return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName)
|
return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -576,7 +572,7 @@ var AzureDiskVolumeFilter = VolumeFilter{
|
|||||||
return false
|
return false
|
||||||
},
|
},
|
||||||
|
|
||||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||||
return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName)
|
return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -605,7 +601,7 @@ var CinderVolumeFilter = VolumeFilter{
|
|||||||
return false
|
return false
|
||||||
},
|
},
|
||||||
|
|
||||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||||
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
|
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,8 @@ package predicates
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@ -97,7 +97,7 @@ func SetPredicatesOrderingDuringTest(value []string) func() {
|
|||||||
|
|
||||||
// isCSIMigrationOn returns a boolean value indicating whether
|
// isCSIMigrationOn returns a boolean value indicating whether
|
||||||
// the CSI migration has been enabled for a particular storage plugin.
|
// the CSI migration has been enabled for a particular storage plugin.
|
||||||
func isCSIMigrationOn(csiNode *storagev1beta1.CSINode, pluginName string) bool {
|
func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
|
||||||
if csiNode == nil || len(pluginName) == 0 {
|
if csiNode == nil || len(pluginName) == 0 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,6 @@ import (
|
|||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
@ -53,7 +52,7 @@ type PluginFactoryArgs struct {
|
|||||||
ReplicaSetLister appslisters.ReplicaSetLister
|
ReplicaSetLister appslisters.ReplicaSetLister
|
||||||
StatefulSetLister appslisters.StatefulSetLister
|
StatefulSetLister appslisters.StatefulSetLister
|
||||||
PDBLister policylisters.PodDisruptionBudgetLister
|
PDBLister policylisters.PodDisruptionBudgetLister
|
||||||
CSINodeLister v1beta1storagelisters.CSINodeLister
|
CSINodeLister storagelisters.CSINodeLister
|
||||||
PVLister corelisters.PersistentVolumeLister
|
PVLister corelisters.PersistentVolumeLister
|
||||||
PVCLister corelisters.PersistentVolumeClaimLister
|
PVCLister corelisters.PersistentVolumeClaimLister
|
||||||
StorageClassLister storagelisters.StorageClassLister
|
StorageClassLister storagelisters.StorageClassLister
|
||||||
|
@ -1169,30 +1169,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "disable beta feature AttachVolumeLimit",
|
|
||||||
JSON: `{
|
|
||||||
"kind": "Policy",
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"predicates": [
|
|
||||||
{"name": "MaxCSIVolumeCountPred"},
|
|
||||||
{"name": "CheckVolumeBinding"}
|
|
||||||
],
|
|
||||||
"priorities": [
|
|
||||||
]
|
|
||||||
}`,
|
|
||||||
featureGates: map[featuregate.Feature]bool{
|
|
||||||
features.AttachVolumeLimit: false,
|
|
||||||
},
|
|
||||||
wantPlugins: map[string][]config.Plugin{
|
|
||||||
"FilterPlugin": {
|
|
||||||
{Name: "NodeUnschedulable"},
|
|
||||||
{Name: "TaintToleration"},
|
|
||||||
{Name: "NodeVolumeLimits"},
|
|
||||||
{Name: "VolumeBinding"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
registeredPredicates := sets.NewString(scheduler.ListRegisteredFitPredicates()...)
|
registeredPredicates := sets.NewString(scheduler.ListRegisteredFitPredicates()...)
|
||||||
registeredPriorities := sets.NewString(scheduler.ListRegisteredPriorityFunctions()...)
|
registeredPriorities := sets.NewString(scheduler.ListRegisteredPriorityFunctions()...)
|
||||||
|
@ -34,13 +34,11 @@ import (
|
|||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
|
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
|
||||||
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
||||||
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||||
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
||||||
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
@ -93,7 +91,7 @@ type Configurator struct {
|
|||||||
// a means to list all StorageClasses
|
// a means to list all StorageClasses
|
||||||
storageClassLister storagelistersv1.StorageClassLister
|
storageClassLister storagelistersv1.StorageClassLister
|
||||||
// a means to list all CSINodes
|
// a means to list all CSINodes
|
||||||
csiNodeLister storagelistersv1beta1.CSINodeLister
|
csiNodeLister storagelistersv1.CSINodeLister
|
||||||
// a means to list all Nodes
|
// a means to list all Nodes
|
||||||
nodeLister corelisters.NodeLister
|
nodeLister corelisters.NodeLister
|
||||||
// a means to list all Pods
|
// a means to list all Pods
|
||||||
@ -154,7 +152,7 @@ type ConfigFactoryArgs struct {
|
|||||||
ServiceInformer coreinformers.ServiceInformer
|
ServiceInformer coreinformers.ServiceInformer
|
||||||
PdbInformer policyinformers.PodDisruptionBudgetInformer
|
PdbInformer policyinformers.PodDisruptionBudgetInformer
|
||||||
StorageClassInformer storageinformersv1.StorageClassInformer
|
StorageClassInformer storageinformersv1.StorageClassInformer
|
||||||
CSINodeInformer storageinformersv1beta1.CSINodeInformer
|
CSINodeInformer storageinformersv1.CSINodeInformer
|
||||||
VolumeBinder *volumebinder.VolumeBinder
|
VolumeBinder *volumebinder.VolumeBinder
|
||||||
SchedulerCache internalcache.Cache
|
SchedulerCache internalcache.Cache
|
||||||
HardPodAffinitySymmetricWeight int32
|
HardPodAffinitySymmetricWeight int32
|
||||||
@ -184,7 +182,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) *Configurator {
|
|||||||
storageClassLister = args.StorageClassInformer.Lister()
|
storageClassLister = args.StorageClassInformer.Lister()
|
||||||
}
|
}
|
||||||
|
|
||||||
var csiNodeLister storagelistersv1beta1.CSINodeLister
|
var csiNodeLister storagelistersv1.CSINodeLister
|
||||||
if args.CSINodeInformer != nil {
|
if args.CSINodeInformer != nil {
|
||||||
csiNodeLister = args.CSINodeInformer.Lister()
|
csiNodeLister = args.CSINodeInformer.Lister()
|
||||||
}
|
}
|
||||||
|
@ -552,7 +552,7 @@ func newConfigFactoryWithFrameworkRegistry(
|
|||||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||||
CSINodeInformer: informerFactory.Storage().V1beta1().CSINodes(),
|
CSINodeInformer: informerFactory.Storage().V1().CSINodes(),
|
||||||
HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
|
HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
|
||||||
DisablePreemption: disablePodPreemption,
|
DisablePreemption: disablePodPreemption,
|
||||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||||
|
@ -22,7 +22,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -44,7 +44,7 @@ go_test(
|
|||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/volume/util:go_default_library",
|
"//pkg/volume/util:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
@ -22,9 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -355,8 +352,6 @@ func TestAzureDiskLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -22,9 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -84,8 +81,6 @@ func TestCinderLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -24,8 +24,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/storage/v1beta1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -447,7 +446,6 @@ func TestCSILimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
// running attachable predicate tests with feature gate and limit present on nodes
|
// running attachable predicate tests with feature gate and limit present on nodes
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
@ -546,7 +544,7 @@ func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakel
|
|||||||
return pvcLister
|
return pvcLister
|
||||||
}
|
}
|
||||||
|
|
||||||
func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
|
||||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||||
if nodeInfoAnnotations == nil {
|
if nodeInfoAnnotations == nil {
|
||||||
nodeInfoAnnotations = map[string]string{}
|
nodeInfoAnnotations = map[string]string{}
|
||||||
@ -569,14 +567,14 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.St
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSINodeLister(csiNode *storagev1beta1.CSINode) fakelisters.CSINodeLister {
|
func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister {
|
||||||
if csiNode != nil {
|
if csiNode != nil {
|
||||||
return fakelisters.CSINodeLister(*csiNode)
|
return fakelisters.CSINodeLister(*csiNode)
|
||||||
}
|
}
|
||||||
return fakelisters.CSINodeLister{}
|
return fakelisters.CSINodeLister{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) {
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||||
@ -584,7 +582,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
Allocatable: v1.ResourceList{},
|
Allocatable: v1.ResourceList{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var csiNode *v1beta1.CSINode
|
var csiNode *storagev1.CSINode
|
||||||
|
|
||||||
addLimitToNode := func() {
|
addLimitToNode := func() {
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
@ -593,10 +591,10 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
}
|
}
|
||||||
|
|
||||||
initCSINode := func() {
|
initCSINode := func() {
|
||||||
csiNode = &v1beta1.CSINode{
|
csiNode = &storagev1.CSINode{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||||
Spec: v1beta1.CSINodeSpec{
|
Spec: storagev1.CSINodeSpec{
|
||||||
Drivers: []v1beta1.CSINodeDriver{},
|
Drivers: []storagev1.CSINodeDriver{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -604,12 +602,12 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
addDriversCSINode := func(addLimits bool) {
|
addDriversCSINode := func(addLimits bool) {
|
||||||
initCSINode()
|
initCSINode()
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
driver := v1beta1.CSINodeDriver{
|
driver := storagev1.CSINodeDriver{
|
||||||
Name: driver,
|
Name: driver,
|
||||||
NodeID: "node-for-max-pd-test-1",
|
NodeID: "node-for-max-pd-test-1",
|
||||||
}
|
}
|
||||||
if addLimits {
|
if addLimits {
|
||||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
driver.Allocatable = &storagev1.VolumeNodeResources{
|
||||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,14 +19,14 @@ package nodevolumelimits
|
|||||||
import (
|
import (
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled
|
// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled
|
||||||
func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) v1beta1.CSINodeLister {
|
func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister {
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return factory.Storage().V1beta1().CSINodes().Lister()
|
return factory.Storage().V1().CSINodes().Lister()
|
||||||
}
|
}
|
||||||
|
@ -24,10 +24,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
@ -469,8 +466,6 @@ func TestEBSLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -22,9 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -355,8 +352,6 @@ func TestGCEPDLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -11,13 +11,11 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,13 +22,11 @@ import (
|
|||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
|
||||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
@ -280,19 +278,19 @@ func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister {
|
|||||||
return NodeInfoLister(nodeInfoList)
|
return NodeInfoLister(nodeInfoList)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ v1beta1storagelisters.CSINodeLister = CSINodeLister{}
|
var _ storagelisters.CSINodeLister = CSINodeLister{}
|
||||||
|
|
||||||
// CSINodeLister declares a storagev1beta1.CSINode type for testing.
|
// CSINodeLister declares a storagev1.CSINode type for testing.
|
||||||
type CSINodeLister storagev1beta1.CSINode
|
type CSINodeLister storagev1.CSINode
|
||||||
|
|
||||||
// Get returns a fake CSINode object.
|
// Get returns a fake CSINode object.
|
||||||
func (n CSINodeLister) Get(name string) (*storagev1beta1.CSINode, error) {
|
func (n CSINodeLister) Get(name string) (*storagev1.CSINode, error) {
|
||||||
csiNode := storagev1beta1.CSINode(n)
|
csiNode := storagev1.CSINode(n)
|
||||||
return &csiNode, nil
|
return &csiNode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List lists all CSINodes in the indexer.
|
// List lists all CSINodes in the indexer.
|
||||||
func (n CSINodeLister) List(selector labels.Selector) (ret []*storagev1beta1.CSINode, err error) {
|
func (n CSINodeLister) List(selector labels.Selector) (ret []*storagev1.CSINode, err error) {
|
||||||
return nil, fmt.Errorf("not implemented")
|
return nil, fmt.Errorf("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||||
policyv1beta1informers "k8s.io/client-go/informers/policy/v1beta1"
|
policyv1beta1informers "k8s.io/client-go/informers/policy/v1beta1"
|
||||||
storagev1beta1informers "k8s.io/client-go/informers/storage/v1beta1"
|
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
@ -292,9 +292,9 @@ func New(client clientset.Interface,
|
|||||||
pdbInformer = informerFactory.Policy().V1beta1().PodDisruptionBudgets()
|
pdbInformer = informerFactory.Policy().V1beta1().PodDisruptionBudgets()
|
||||||
}
|
}
|
||||||
|
|
||||||
var csiNodeInformer storagev1beta1informers.CSINodeInformer
|
var csiNodeInformer storageinformers.CSINodeInformer
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
||||||
csiNodeInformer = informerFactory.Storage().V1beta1().CSINodes()
|
csiNodeInformer = informerFactory.Storage().V1().CSINodes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up the configurator which can create schedulers from configs.
|
// Set up the configurator which can create schedulers from configs.
|
||||||
|
@ -556,7 +556,6 @@ func (nim *nodeInfoManager) installDriverToCSINode(
|
|||||||
TopologyKeys: topologyKeys.List(),
|
TopologyKeys: topologyKeys.List(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
if maxAttachLimit > 0 {
|
if maxAttachLimit > 0 {
|
||||||
if maxAttachLimit > math.MaxInt32 {
|
if maxAttachLimit > math.MaxInt32 {
|
||||||
klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32)
|
klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32)
|
||||||
@ -567,7 +566,6 @@ func (nim *nodeInfoManager) installDriverToCSINode(
|
|||||||
} else {
|
} else {
|
||||||
klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName)
|
klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
newDriverSpecs = append(newDriverSpecs, driverSpec)
|
newDriverSpecs = append(newDriverSpecs, driverSpec)
|
||||||
nodeInfo.Spec.Drivers = newDriverSpecs
|
nodeInfo.Spec.Drivers = newDriverSpecs
|
||||||
|
@ -1011,7 +1011,6 @@ func getClientSet(existingNode *v1.Node, existingCSINode *storage.CSINode) *fake
|
|||||||
|
|
||||||
func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []testcase) {
|
func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []testcase) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, csiNodeInfoEnabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, csiNodeInfoEnabled)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Logf("test case: %q", tc.name)
|
t.Logf("test case: %q", tc.name)
|
||||||
|
@ -498,10 +498,8 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
|||||||
// Needed to check API access. These creates are non-mutating
|
// Needed to check API access. These creates are non-mutating
|
||||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||||
}
|
// Needed for volume limits
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) &&
|
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie(),
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
kubeSchedulerRules = append(kubeSchedulerRules, rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie())
|
|
||||||
}
|
}
|
||||||
roles = append(roles, rbacv1.ClusterRole{
|
roles = append(roles, rbacv1.ClusterRole{
|
||||||
// a role to use for the kube-scheduler
|
// a role to use for the kube-scheduler
|
||||||
|
@ -106,6 +106,7 @@ func InitHostPathCSIDriver() testsuites.TestDriver {
|
|||||||
testsuites.CapPVCDataSource: true,
|
testsuites.CapPVCDataSource: true,
|
||||||
testsuites.CapControllerExpansion: true,
|
testsuites.CapControllerExpansion: true,
|
||||||
testsuites.CapSingleNodeVolume: true,
|
testsuites.CapSingleNodeVolume: true,
|
||||||
|
testsuites.CapVolumeLimits: true,
|
||||||
}
|
}
|
||||||
return initHostPathCSIDriver("csi-hostpath",
|
return initHostPathCSIDriver("csi-hostpath",
|
||||||
capabilities,
|
capabilities,
|
||||||
|
@ -109,7 +109,7 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
// And one extra pod with a CSI volume should get Pending with a condition
|
// And one extra pod with a CSI volume should get Pending with a condition
|
||||||
// that says it's unschedulable because of volume limit.
|
// that says it's unschedulable because of volume limit.
|
||||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||||
ginkgo.It("should support volume limits [Slow][Serial]", func() {
|
ginkgo.It("should support volume limits [Serial]", func() {
|
||||||
driverInfo := driver.GetDriverInfo()
|
driverInfo := driver.GetDriverInfo()
|
||||||
if !driverInfo.Capabilities[CapVolumeLimits] {
|
if !driverInfo.Capabilities[CapVolumeLimits] {
|
||||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
||||||
@ -124,15 +124,19 @@ func (t *volumeLimitsTestSuite) defineTests(driver TestDriver, pattern testpatte
|
|||||||
l.config, l.testCleanup = driver.PrepareTest(f)
|
l.config, l.testCleanup = driver.PrepareTest(f)
|
||||||
defer l.testCleanup()
|
defer l.testCleanup()
|
||||||
|
|
||||||
ginkgo.By("Picking a random node")
|
ginkgo.By("Picking a node")
|
||||||
var nodeName string
|
// Some CSI drivers are deployed to a single node (e.g csi-hostpath),
|
||||||
|
// so we use that node instead of picking a random one.
|
||||||
|
nodeName := l.config.ClientNodeName
|
||||||
|
if nodeName == "" {
|
||||||
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
nodeName = node.Name
|
nodeName = node.Name
|
||||||
|
}
|
||||||
framework.Logf("Selected node %s", nodeName)
|
framework.Logf("Selected node %s", nodeName)
|
||||||
|
|
||||||
ginkgo.By("Checking node limits")
|
ginkgo.By("Checking node limits")
|
||||||
limit, err := getNodeLimits(l.cs, nodeName, driverInfo)
|
limit, err := getNodeLimits(l.cs, l.config, nodeName, driverInfo)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
framework.Logf("Node %s can handle %d volumes of driver %s", nodeName, limit, driverInfo.Name)
|
||||||
@ -283,9 +287,9 @@ func waitForAllPVCsPhase(cs clientset.Interface, timeout time.Duration, pvcs []*
|
|||||||
return pvNames, err
|
return pvNames, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
func getNodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||||
if len(driverInfo.InTreePluginName) == 0 {
|
if len(driverInfo.InTreePluginName) == 0 {
|
||||||
return getCSINodeLimits(cs, nodeName, driverInfo)
|
return getCSINodeLimits(cs, config, nodeName, driverInfo)
|
||||||
}
|
}
|
||||||
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
||||||
}
|
}
|
||||||
@ -317,7 +321,7 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *Dr
|
|||||||
return int(limit.Value()), nil
|
return int(limit.Value()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCSINodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
func getCSINodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||||
// Wait in a loop, the driver might just have been installed and kubelet takes a while to publish everything.
|
// Wait in a loop, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||||
var limit int
|
var limit int
|
||||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||||
@ -328,7 +332,7 @@ func getCSINodeLimits(cs clientset.Interface, nodeName string, driverInfo *Drive
|
|||||||
}
|
}
|
||||||
var csiDriver *storagev1.CSINodeDriver
|
var csiDriver *storagev1.CSINodeDriver
|
||||||
for _, c := range csiNode.Spec.Drivers {
|
for _, c := range csiNode.Spec.Drivers {
|
||||||
if c.Name == driverInfo.Name {
|
if c.Name == driverInfo.Name || c.Name == config.GetUniqueDriverName() {
|
||||||
csiDriver = &c
|
csiDriver = &c
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -62,12 +62,13 @@ spec:
|
|||||||
name: csi-data-dir
|
name: csi-data-dir
|
||||||
|
|
||||||
- name: hostpath
|
- name: hostpath
|
||||||
image: quay.io/k8scsi/hostpathplugin:v1.2.0
|
image: quay.io/k8scsi/hostpathplugin:v1.3.0-rc1
|
||||||
args:
|
args:
|
||||||
- "--drivername=hostpath.csi.k8s.io"
|
- "--drivername=hostpath.csi.k8s.io"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||||
|
- "--maxvolumespernode=10"
|
||||||
env:
|
env:
|
||||||
- name: CSI_ENDPOINT
|
- name: CSI_ENDPOINT
|
||||||
value: unix:///csi/csi.sock
|
value: unix:///csi/csi.sock
|
||||||
|
Loading…
Reference in New Issue
Block a user