mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Promote volume limits to GA
This commit is contained in:
parent
dff236e3b2
commit
affcd0128b
@ -282,6 +282,8 @@ const (
|
|||||||
|
|
||||||
// owner: @gnufied
|
// owner: @gnufied
|
||||||
// beta : v1.12
|
// beta : v1.12
|
||||||
|
// GA : v1.17
|
||||||
|
|
||||||
//
|
//
|
||||||
// Add support for volume plugins to report node specific
|
// Add support for volume plugins to report node specific
|
||||||
// volume limits
|
// volume limits
|
||||||
@ -523,7 +525,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||||
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||||
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta},
|
ExpandCSIVolumes: {Default: true, PreRelease: featuregate.Beta},
|
||||||
AttachVolumeLimit: {Default: true, PreRelease: featuregate.Beta},
|
AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
|
||||||
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
CPUManager: {Default: true, PreRelease: featuregate.Beta},
|
||||||
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
TopologyManager: {Default: false, PreRelease: featuregate.Alpha},
|
TopologyManager: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
@ -31,12 +31,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||||
@ -548,9 +546,9 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
|||||||
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
|
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
|
||||||
nodestatus.GoRuntime(),
|
nodestatus.GoRuntime(),
|
||||||
)
|
)
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
// Volume limits
|
||||||
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
||||||
}
|
|
||||||
setters = append(setters,
|
setters = append(setters,
|
||||||
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
||||||
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
||||||
|
@ -12,11 +12,9 @@ go_library(
|
|||||||
"//pkg/api/legacyscheme:go_default_library",
|
"//pkg/api/legacyscheme:go_default_library",
|
||||||
"//pkg/apis/storage:go_default_library",
|
"//pkg/apis/storage:go_default_library",
|
||||||
"//pkg/apis/storage/validation:go_default_library",
|
"//pkg/apis/storage/validation:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -43,12 +41,9 @@ go_test(
|
|||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/storage:go_default_library",
|
"//pkg/apis/storage:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
|
||||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -22,11 +22,9 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
"k8s.io/apiserver/pkg/storage/names"
|
"k8s.io/apiserver/pkg/storage/names"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
"k8s.io/kubernetes/pkg/apis/storage"
|
||||||
"k8s.io/kubernetes/pkg/apis/storage/validation"
|
"k8s.io/kubernetes/pkg/apis/storage/validation"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// csiNodeStrategy implements behavior for CSINode objects
|
// csiNodeStrategy implements behavior for CSINode objects
|
||||||
@ -45,12 +43,6 @@ func (csiNodeStrategy) NamespaceScoped() bool {
|
|||||||
|
|
||||||
// PrepareForCreate clears fields that are not allowed to be set on creation.
|
// PrepareForCreate clears fields that are not allowed to be set on creation.
|
||||||
func (csiNodeStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
func (csiNodeStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||||
csiNode := obj.(*storage.CSINode)
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
for i := range csiNode.Spec.Drivers {
|
|
||||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csiNodeStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
func (csiNodeStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||||
@ -72,31 +64,6 @@ func (csiNodeStrategy) AllowCreateOnUpdate() bool {
|
|||||||
|
|
||||||
// PrepareForUpdate sets the driver's Allocatable fields that are not allowed to be set by an end user updating a CSINode.
|
// PrepareForUpdate sets the driver's Allocatable fields that are not allowed to be set by an end user updating a CSINode.
|
||||||
func (csiNodeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
func (csiNodeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||||
newCSINode := obj.(*storage.CSINode)
|
|
||||||
oldCSINode := old.(*storage.CSINode)
|
|
||||||
|
|
||||||
inUse := getAllocatablesInUse(oldCSINode)
|
|
||||||
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
for i := range newCSINode.Spec.Drivers {
|
|
||||||
if !inUse[newCSINode.Spec.Drivers[i].Name] {
|
|
||||||
newCSINode.Spec.Drivers[i].Allocatable = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAllocatablesInUse(obj *storage.CSINode) map[string]bool {
|
|
||||||
inUse := make(map[string]bool)
|
|
||||||
if obj == nil {
|
|
||||||
return inUse
|
|
||||||
}
|
|
||||||
for i := range obj.Spec.Drivers {
|
|
||||||
if obj.Spec.Drivers[i].Allocatable != nil {
|
|
||||||
inUse[obj.Spec.Drivers[i].Name] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return inUse
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csiNodeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
func (csiNodeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||||
|
@ -23,10 +23,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/storage"
|
"k8s.io/kubernetes/pkg/apis/storage"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +44,7 @@ func TestPrepareForCreate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeLimitsEnabledCases := []struct {
|
volumeLimitsCases := []struct {
|
||||||
name string
|
name string
|
||||||
obj *storage.CSINode
|
obj *storage.CSINode
|
||||||
expected *storage.CSINode
|
expected *storage.CSINode
|
||||||
@ -64,32 +61,7 @@ func TestPrepareForCreate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
for _, test := range volumeLimitsCases {
|
||||||
for _, test := range volumeLimitsEnabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
testPrepareForCreate(t, test.obj, test.expected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeLimitsDisabledCases := []struct {
|
|
||||||
name string
|
|
||||||
obj *storage.CSINode
|
|
||||||
expected *storage.CSINode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"empty allocatable",
|
|
||||||
emptyAllocatable,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"drop allocatable",
|
|
||||||
valid,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, false)()
|
|
||||||
for _, test := range volumeLimitsDisabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testPrepareForCreate(t, test.obj, test.expected)
|
testPrepareForCreate(t, test.obj, test.expected)
|
||||||
})
|
})
|
||||||
@ -140,7 +112,7 @@ func TestPrepareForUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeLimitsEnabledCases := []struct {
|
volumeLimitsCases := []struct {
|
||||||
name string
|
name string
|
||||||
old *storage.CSINode
|
old *storage.CSINode
|
||||||
new *storage.CSINode
|
new *storage.CSINode
|
||||||
@ -166,35 +138,7 @@ func TestPrepareForUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
for _, test := range volumeLimitsCases {
|
||||||
for _, test := range volumeLimitsEnabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeLimitsDisabledCases := []struct {
|
|
||||||
name string
|
|
||||||
old *storage.CSINode
|
|
||||||
new *storage.CSINode
|
|
||||||
expected *storage.CSINode
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
"allow empty allocatable when it's not set",
|
|
||||||
emptyAllocatable,
|
|
||||||
emptyAllocatable,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"drop allocatable when it's not set",
|
|
||||||
emptyAllocatable,
|
|
||||||
valid,
|
|
||||||
emptyAllocatable,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, false)()
|
|
||||||
for _, test := range volumeLimitsDisabledCases {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
||||||
})
|
})
|
||||||
|
@ -22,14 +22,12 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||||
csitrans "k8s.io/csi-translation-lib"
|
csitrans "k8s.io/csi-translation-lib"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
@ -93,10 +91,6 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
|||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
return true, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
|
@ -434,7 +434,6 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
// running attachable predicate tests with feature gate and limit present on nodes
|
// running attachable predicate tests with feature gate and limit present on nodes
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
|
@ -19,7 +19,6 @@ package predicates
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -27,10 +26,7 @@ import (
|
|||||||
"k8s.io/api/storage/v1beta1"
|
"k8s.io/api/storage/v1beta1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
@ -848,31 +844,6 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||||||
|
|
||||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||||
|
|
||||||
// running attachable predicate tests without feature gate and no limit present on nodes
|
|
||||||
for _, test := range tests {
|
|
||||||
os.Setenv(KubeMaxPDVols, strconv.Itoa(test.maxVols))
|
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
|
||||||
pred := NewMaxPDVolumeCountPredicate(test.filterName,
|
|
||||||
getFakeCSINodeLister(csiNode),
|
|
||||||
getFakeStorageClassLister(test.filterName),
|
|
||||||
getFakePVLister(test.filterName),
|
|
||||||
getFakePVCLister(test.filterName))
|
|
||||||
|
|
||||||
factory := &MetadataProducerFactory{}
|
|
||||||
fits, reasons, err := pred(test.newPod, factory.GetPredicateMetadata(test.newPod, nil), node)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("[%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
|
||||||
}
|
|
||||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
|
||||||
t.Errorf("[%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
|
||||||
}
|
|
||||||
if fits != test.fits {
|
|
||||||
t.Errorf("[%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
// running attachable predicate tests with feature gate and limit present on nodes
|
// running attachable predicate tests with feature gate and limit present on nodes
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -477,12 +477,10 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo
|
|||||||
numNewVolumes := len(newVolumes)
|
numNewVolumes := len(newVolumes)
|
||||||
maxAttachLimit := c.maxVolumeFunc(node)
|
maxAttachLimit := c.maxVolumeFunc(node)
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
volumeLimits := nodeInfo.VolumeLimits()
|
volumeLimits := nodeInfo.VolumeLimits()
|
||||||
if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok {
|
if maxAttachLimitFromAllocatable, ok := volumeLimits[c.volumeLimitKey]; ok {
|
||||||
maxAttachLimit = int(maxAttachLimitFromAllocatable)
|
maxAttachLimit = int(maxAttachLimitFromAllocatable)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
if numExistingVolumes+numNewVolumes > maxAttachLimit {
|
||||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||||
|
@ -1169,30 +1169,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "disable beta feature AttachVolumeLimit",
|
|
||||||
JSON: `{
|
|
||||||
"kind": "Policy",
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"predicates": [
|
|
||||||
{"name": "MaxCSIVolumeCountPred"},
|
|
||||||
{"name": "CheckVolumeBinding"}
|
|
||||||
],
|
|
||||||
"priorities": [
|
|
||||||
]
|
|
||||||
}`,
|
|
||||||
featureGates: map[featuregate.Feature]bool{
|
|
||||||
features.AttachVolumeLimit: false,
|
|
||||||
},
|
|
||||||
wantPlugins: map[string][]config.Plugin{
|
|
||||||
"FilterPlugin": {
|
|
||||||
{Name: "NodeUnschedulable"},
|
|
||||||
{Name: "TaintToleration"},
|
|
||||||
{Name: "NodeVolumeLimits"},
|
|
||||||
{Name: "VolumeBinding"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
registeredPredicates := sets.NewString(scheduler.ListRegisteredFitPredicates()...)
|
registeredPredicates := sets.NewString(scheduler.ListRegisteredFitPredicates()...)
|
||||||
registeredPriorities := sets.NewString(scheduler.ListRegisteredPriorityFunctions()...)
|
registeredPriorities := sets.NewString(scheduler.ListRegisteredPriorityFunctions()...)
|
||||||
|
@ -22,9 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -355,8 +352,6 @@ func TestAzureDiskLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -22,9 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -84,8 +81,6 @@ func TestCinderLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -447,7 +447,6 @@ func TestCSILimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
// running attachable predicate tests with feature gate and limit present on nodes
|
// running attachable predicate tests with feature gate and limit present on nodes
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
|
@ -24,10 +24,7 @@ import (
|
|||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
@ -469,8 +466,6 @@ func TestEBSLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -22,9 +22,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -355,8 +352,6 @@ func TestGCEPDLimits(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.test, func(t *testing.T) {
|
t.Run(test.test, func(t *testing.T) {
|
||||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||||
|
@ -556,7 +556,6 @@ func (nim *nodeInfoManager) installDriverToCSINode(
|
|||||||
TopologyKeys: topologyKeys.List(),
|
TopologyKeys: topologyKeys.List(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
if maxAttachLimit > 0 {
|
if maxAttachLimit > 0 {
|
||||||
if maxAttachLimit > math.MaxInt32 {
|
if maxAttachLimit > math.MaxInt32 {
|
||||||
klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32)
|
klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32)
|
||||||
@ -567,7 +566,6 @@ func (nim *nodeInfoManager) installDriverToCSINode(
|
|||||||
} else {
|
} else {
|
||||||
klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName)
|
klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
newDriverSpecs = append(newDriverSpecs, driverSpec)
|
newDriverSpecs = append(newDriverSpecs, driverSpec)
|
||||||
nodeInfo.Spec.Drivers = newDriverSpecs
|
nodeInfo.Spec.Drivers = newDriverSpecs
|
||||||
|
@ -1011,7 +1011,6 @@ func getClientSet(existingNode *v1.Node, existingCSINode *storage.CSINode) *fake
|
|||||||
|
|
||||||
func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []testcase) {
|
func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []testcase) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, csiNodeInfoEnabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSINodeInfo, csiNodeInfoEnabled)()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
|
||||||
|
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Logf("test case: %q", tc.name)
|
t.Logf("test case: %q", tc.name)
|
||||||
|
@ -498,10 +498,8 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
|||||||
// Needed to check API access. These creates are non-mutating
|
// Needed to check API access. These creates are non-mutating
|
||||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||||
}
|
// Needed for volume limits
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) &&
|
rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie(),
|
||||||
utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
|
||||||
kubeSchedulerRules = append(kubeSchedulerRules, rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie())
|
|
||||||
}
|
}
|
||||||
roles = append(roles, rbacv1.ClusterRole{
|
roles = append(roles, rbacv1.ClusterRole{
|
||||||
// a role to use for the kube-scheduler
|
// a role to use for the kube-scheduler
|
||||||
|
Loading…
Reference in New Issue
Block a user