mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
[migration phase 1] CSIMaxVolumeLimitChecker as filter plugin
This commit is contained in:
parent
534051acec
commit
f06925b0ee
@ -1063,7 +1063,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
|
||||
addLimitToNode := func() {
|
||||
for _, driver := range driverNames {
|
||||
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
node.Status.Allocatable[GetVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1111,18 +1111,3 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, csiNode
|
||||
}
|
||||
|
||||
func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||
switch filterType {
|
||||
case EBSVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
case GCEPDVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
case AzureDiskVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case CinderVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// FakePersistentVolumeClaimInfo declares a []v1.PersistentVolumeClaim type for testing.
|
||||
@ -93,3 +94,19 @@ func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to find storage class: %s", name)
|
||||
}
|
||||
|
||||
// GetVolumeLimitKey returns a ResourceName by filter type
|
||||
func GetVolumeLimitKey(filterType string) v1.ResourceName {
|
||||
switch filterType {
|
||||
case EBSVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||
case GCEPDVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||
case AzureDiskVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||
case CinderVolumeFilterType:
|
||||
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||
default:
|
||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||
}
|
||||
}
|
||||
|
@ -911,7 +911,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MatchInterPodAffinity",
|
||||
"GeneralPredicates",
|
||||
"TestServiceAffinity",
|
||||
@ -935,6 +934,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResources"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
},
|
||||
@ -1028,7 +1028,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxCinderVolumeCount",
|
||||
"MatchInterPodAffinity",
|
||||
"GeneralPredicates",
|
||||
@ -1053,6 +1052,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResources"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
},
|
||||
@ -1150,7 +1150,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxCinderVolumeCount",
|
||||
"MatchInterPodAffinity",
|
||||
"GeneralPredicates",
|
||||
@ -1175,6 +1174,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResources"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
},
|
||||
@ -1213,6 +1213,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"VolumeBinding": "CheckVolumeBinding",
|
||||
"VolumeRestrictions": "NoDiskConflict",
|
||||
"VolumeZone": "NoVolumeZoneConflict",
|
||||
"NodeVolumeLimits": "MaxCSIVolumeCountPred",
|
||||
}
|
||||
scoreToPriorityMap := map[string]string{
|
||||
"ImageLocality": "ImageLocalityPriority",
|
||||
|
@ -16,6 +16,7 @@ go_library(
|
||||
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodepreferavoidpods:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodevolumelimits:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||
@ -48,6 +49,7 @@ filegroup(
|
||||
"//pkg/scheduler/framework/plugins/nodeports:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/nodepreferavoidpods:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/noderesources:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/nodevolumelimits:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/tainttoleration:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/volumebinding:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/volumerestrictions:all-srcs",
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodepreferavoidpods"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
||||
@ -78,6 +79,9 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
|
||||
volumezone.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
return volumezone.New(pvInfo, pvcInfo, classInfo), nil
|
||||
},
|
||||
nodevolumelimits.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
return nodevolumelimits.New(args.SchedulerCache, pvInfo, pvcInfo, classInfo), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,6 +149,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, volumezone.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MaxCSIVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.Name, nil)
|
||||
return
|
||||
})
|
||||
|
||||
registry.RegisterPriority(priorities.TaintTolerationPriority,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
|
50
pkg/scheduler/framework/plugins/nodevolumelimits/BUILD
Normal file
50
pkg/scheduler/framework/plugins/nodevolumelimits/BUILD
Normal file
@ -0,0 +1,50 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["node_volume_limits.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["node_volume_limits_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// NodeVolumeLimits is a plugin that checks node volume limits
|
||||
type NodeVolumeLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ = framework.FilterPlugin(&NodeVolumeLimits{})
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "NodeVolumeLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *NodeVolumeLimits) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *NodeVolumeLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(csiNodeInfo predicates.CSINodeInfo, pvInfo predicates.PersistentVolumeInfo, pvcInfo predicates.PersistentVolumeClaimInfo, classInfo predicates.StorageClassInfo) framework.Plugin {
|
||||
return &NodeVolumeLimits{
|
||||
predicate: predicates.NewCSIMaxVolumeLimitPredicate(csiNodeInfo, pvInfo, pvcInfo, classInfo),
|
||||
}
|
||||
}
|
@ -0,0 +1,622 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/storage/v1beta1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
const (
|
||||
ebsCSIDriverName = csilibplugins.AWSEBSDriverName
|
||||
gceCSIDriverName = csilibplugins.GCEPDDriverName
|
||||
|
||||
hostpathInTreePluginName = "kubernetes.io/hostpath"
|
||||
)
|
||||
|
||||
func TestNodeVolumeLimits(t *testing.T) {
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pendingVolumePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Different pod than pendingVolumePod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
missingPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
noSCPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-5",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
gceTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// In-tree volumes
|
||||
inTreeOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// pods with matching csi driver names
|
||||
csiEBSOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
csiEBSTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeNonMigratableOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/hostpath-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
test string
|
||||
migrationEnabled bool
|
||||
limitSource string
|
||||
expectedFailureReason *predicates.PredicateFailureError
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 4,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "fits when node volume limit >= new pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "doesn't when node volume limit <= pods CSI volume",
|
||||
limitSource: "node",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "should when driver does not support volume limits",
|
||||
limitSource: "csinode-with-no-limit",
|
||||
},
|
||||
// should count pending PVCs
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "count pending PVCs towards volume limit <= pods CSI volume",
|
||||
limitSource: "node",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
// two same pending PVCs should be counted as 1
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 4,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
// should count PVCs with invalid PV name but valid SC
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "should count PVCs with invalid PV name but valid SC",
|
||||
limitSource: "node",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
// don't count a volume which has storageclass missing
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, noSCPVCPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "don't count pvcs with missing SC towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
// don't count multiple volume types
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
|
||||
test: "count pvcs with the same type towards volume limit",
|
||||
limitSource: "node",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: gceTwoVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
|
||||
test: "don't count pvcs with different type towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
// Tests for in-tree volume migration
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree volumes if migration is enabled",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: pendingVolumePod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count unbound in-tree volumes if migration is enabled",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree volume if migration is disabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode-with-no-limit",
|
||||
test: "should not limit pod if volume used does not report limits",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode-with-no-limit",
|
||||
test: "should not limit in-tree pod if migration is disabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeNonMigratableOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should not count non-migratable in-tree volumes",
|
||||
},
|
||||
// mixed volumes
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 3,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling csi volumes)",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling in-tree volumes)",
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, int64(test.maxVols), test.driverNames...)
|
||||
if test.migrationEnabled {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, true)()
|
||||
enableMigrationOnNode(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
||||
} else {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, false)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, false)()
|
||||
}
|
||||
|
||||
p := New(getFakeCSINodeInfo(csiNode),
|
||||
getFakeCSIPVInfo(test.filterName, test.driverNames...),
|
||||
getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...),
|
||||
getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0]))
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSIPVInfo(volumeName string, driverNames ...string) predicates.FakePersistentVolumeInfo {
|
||||
pvInfos := predicates.FakePersistentVolumeInfo{}
|
||||
for _, driver := range driverNames {
|
||||
for j := 0; j < 4; j++ {
|
||||
volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
||||
pv := v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeHandle},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driver,
|
||||
VolumeHandle: volumeHandle,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch driver {
|
||||
case csilibplugins.AWSEBSInTreePluginName:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: volumeHandle,
|
||||
},
|
||||
}
|
||||
case hostpathInTreePluginName:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
},
|
||||
}
|
||||
default:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driver,
|
||||
VolumeHandle: volumeHandle,
|
||||
},
|
||||
}
|
||||
}
|
||||
pvInfos = append(pvInfos, pv)
|
||||
}
|
||||
|
||||
}
|
||||
return pvInfos
|
||||
}
|
||||
|
||||
func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) predicates.FakePersistentVolumeClaimInfo {
|
||||
pvcInfos := predicates.FakePersistentVolumeClaimInfo{}
|
||||
for _, driver := range driverNames {
|
||||
for j := 0; j < 4; j++ {
|
||||
v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
||||
pvc := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: v},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: v},
|
||||
}
|
||||
pvcInfos = append(pvcInfos, pvc)
|
||||
}
|
||||
}
|
||||
|
||||
pvcInfos = append(pvcInfos, v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-4"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName},
|
||||
})
|
||||
pvcInfos = append(pvcInfos, v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-5"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{},
|
||||
})
|
||||
// a pvc with missing PV but available storageclass.
|
||||
pvcInfos = append(pvcInfos, v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-6"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName, VolumeName: "missing-in-action"},
|
||||
})
|
||||
return pvcInfos
|
||||
}
|
||||
|
||||
func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||
if nodeInfoAnnotations == nil {
|
||||
nodeInfoAnnotations = map[string]string{}
|
||||
}
|
||||
|
||||
newAnnotationSet := sets.NewString()
|
||||
newAnnotationSet.Insert(pluginName)
|
||||
nas := strings.Join(newAnnotationSet.List(), ",")
|
||||
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
|
||||
|
||||
csiNode.Annotations = nodeInfoAnnotations
|
||||
}
|
||||
|
||||
func getFakeCSIStorageClassInfo(scName, provisionerName string) predicates.FakeStorageClassInfo {
|
||||
return predicates.FakeStorageClassInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: scName},
|
||||
Provisioner: provisionerName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSINodeInfo(csiNode *storagev1beta1.CSINode) predicates.FakeCSINodeInfo {
|
||||
if csiNode != nil {
|
||||
return predicates.FakeCSINodeInfo(*csiNode)
|
||||
}
|
||||
return predicates.FakeCSINodeInfo{}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
var csiNode *v1beta1.CSINode
|
||||
|
||||
addLimitToNode := func() {
|
||||
for _, driver := range driverNames {
|
||||
node.Status.Allocatable[predicates.GetVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
|
||||
initCSINode := func() {
|
||||
csiNode = &v1beta1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||
Spec: v1beta1.CSINodeSpec{
|
||||
Drivers: []v1beta1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
addDriversCSINode := func(addLimits bool) {
|
||||
initCSINode()
|
||||
for _, driver := range driverNames {
|
||||
driver := v1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
NodeID: "node-for-max-pd-test-1",
|
||||
}
|
||||
if addLimits {
|
||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||
}
|
||||
}
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver)
|
||||
}
|
||||
}
|
||||
|
||||
switch limitSource {
|
||||
case "node":
|
||||
addLimitToNode()
|
||||
case "csinode":
|
||||
addDriversCSINode(true)
|
||||
case "both":
|
||||
addLimitToNode()
|
||||
addDriversCSINode(true)
|
||||
case "csinode-with-no-limit":
|
||||
addDriversCSINode(false)
|
||||
case "no-csi-driver":
|
||||
initCSINode()
|
||||
default:
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo, csiNode
|
||||
}
|
@ -132,7 +132,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
),
|
||||
@ -147,6 +146,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"FilterPlugin": {
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
},
|
||||
@ -203,7 +203,6 @@ kind: Policy
|
||||
"GeneralPredicates",
|
||||
"MatchInterPodAffinity",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCSIVolumeCountPred",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
),
|
||||
@ -218,6 +217,7 @@ kind: Policy
|
||||
"FilterPlugin": {
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
},
|
||||
|
Loading…
Reference in New Issue
Block a user