mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Merge pull request #84148 from gongguan/filter_plugin
[migration phase 1] Add filter plugin for cloud provider storage predicate
This commit is contained in:
commit
ffffd6b6f5
@ -194,9 +194,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
wantPredicates: sets.NewString(
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -215,6 +212,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeZone"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
@ -260,9 +260,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
}`,
|
||||
wantPredicates: sets.NewString(
|
||||
"CheckNodeMemoryPressure",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -280,6 +277,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
},
|
||||
@ -331,9 +331,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
wantPredicates: sets.NewString(
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodeDiskPressure",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -351,6 +348,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
},
|
||||
@ -413,9 +413,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
wantPredicates: sets.NewString(
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodeDiskPressure",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -433,6 +430,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
},
|
||||
@ -508,9 +508,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -528,6 +525,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
},
|
||||
@ -604,9 +604,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeMemoryPressure",
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -624,6 +621,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -706,9 +706,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -726,6 +723,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -820,9 +820,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -841,6 +838,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -936,9 +936,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -957,7 +954,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -1052,10 +1052,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCinderVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -1074,7 +1070,11 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "CinderLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -1173,10 +1173,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"CheckNodeDiskPressure",
|
||||
"CheckNodePIDPressure",
|
||||
"CheckNodeCondition",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxCinderVolumeCount",
|
||||
"TestServiceAffinity",
|
||||
"TestLabelsPresence",
|
||||
),
|
||||
@ -1195,7 +1191,11 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
{Name: "NodeResourcesFit"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "CinderLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -1242,6 +1242,10 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
"VolumeRestrictions": "NoDiskConflict",
|
||||
"VolumeZone": "NoVolumeZoneConflict",
|
||||
"NodeVolumeLimits": "MaxCSIVolumeCountPred",
|
||||
"EBSLimits": "MaxEBSVolumeCount",
|
||||
"GCEPDLimits": "MaxGCEPDVolumeCount",
|
||||
"AzureDiskLimits": "MaxAzureDiskVolumeCount",
|
||||
"CinderLimits": "MaxCinderVolumeCount",
|
||||
"InterPodAffinity": "MatchInterPodAffinity",
|
||||
}
|
||||
scoreToPriorityMap := map[string]string{
|
||||
|
@ -68,9 +68,13 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
|
||||
volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
return volumebinding.NewFromVolumeBinder(args.VolumeBinder), nil
|
||||
},
|
||||
volumerestrictions.Name: volumerestrictions.New,
|
||||
volumezone.Name: volumezone.New,
|
||||
nodevolumelimits.Name: nodevolumelimits.New,
|
||||
volumerestrictions.Name: volumerestrictions.New,
|
||||
volumezone.Name: volumezone.New,
|
||||
nodevolumelimits.CSIName: nodevolumelimits.NewCSI,
|
||||
nodevolumelimits.EBSName: nodevolumelimits.NewEBS,
|
||||
nodevolumelimits.GCEPDName: nodevolumelimits.NewGCEPD,
|
||||
nodevolumelimits.AzureDiskName: nodevolumelimits.NewAzureDisk,
|
||||
nodevolumelimits.CinderName: nodevolumelimits.NewCinder,
|
||||
interpodaffinity.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
return interpodaffinity.New(args.SchedulerCache, args.SchedulerCache), nil
|
||||
},
|
||||
@ -158,7 +162,27 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MaxCSIVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.Name, nil)
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.CSIName, nil)
|
||||
return
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MaxEBSVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.EBSName, nil)
|
||||
return
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MaxGCEPDVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.GCEPDName, nil)
|
||||
return
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MaxAzureDiskVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.AzureDiskName, nil)
|
||||
return
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MaxCinderVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.CinderName, nil)
|
||||
return
|
||||
})
|
||||
registry.RegisterPredicate(predicates.MatchInterPodAffinityPred,
|
||||
|
@ -2,7 +2,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["node_volume_limits.go"],
|
||||
srcs = [
|
||||
"azure.go",
|
||||
"cinder.go",
|
||||
"csi.go",
|
||||
"ebs.go",
|
||||
"gce.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
@ -17,7 +23,13 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["node_volume_limits_test.go"],
|
||||
srcs = [
|
||||
"azure_test.go",
|
||||
"cinder_test.go",
|
||||
"csi_test.go",
|
||||
"ebs_test.go",
|
||||
"gce_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
|
71
pkg/scheduler/framework/plugins/nodevolumelimits/azure.go
Normal file
71
pkg/scheduler/framework/plugins/nodevolumelimits/azure.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// AzureDiskLimits is a plugin that checks node volume limits.
|
||||
type AzureDiskLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &AzureDiskLimits{}
|
||||
|
||||
// AzureDiskName is the name of the plugin used in the plugin registry and configurations.
|
||||
const AzureDiskName = "AzureDiskLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *AzureDiskLimits) Name() string {
|
||||
return AzureDiskName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *AzureDiskLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewAzureDisk returns function that initializes a new plugin and returns it.
|
||||
func NewAzureDisk(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
csiNodeInfo := &predicates.CachedCSINodeInfo{
|
||||
CSINodeLister: informerFactory.Storage().V1beta1().CSINodes().Lister(),
|
||||
}
|
||||
pvInfo := &predicates.CachedPersistentVolumeInfo{
|
||||
PersistentVolumeLister: informerFactory.Core().V1().PersistentVolumes().Lister(),
|
||||
}
|
||||
pvcInfo := &predicates.CachedPersistentVolumeClaimInfo{
|
||||
PersistentVolumeClaimLister: informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
}
|
||||
classInfo := &predicates.CachedStorageClassInfo{
|
||||
StorageClassLister: informerFactory.Storage().V1().StorageClasses().Lister(),
|
||||
}
|
||||
|
||||
return &AzureDiskLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.AzureDiskVolumeFilterType, csiNodeInfo, classInfo, pvInfo, pvcInfo),
|
||||
}, nil
|
||||
}
|
372
pkg/scheduler/framework/plugins/nodevolumelimits/azure_test.go
Normal file
372
pkg/scheduler/framework/plugins/nodevolumelimits/azure_test.go
Normal file
@ -0,0 +1,372 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestAzureDiskLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "fit when node capacity < new pod's AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{twoVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores non-AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts ignore non-AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count considers PVCs backed by AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitPVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores PVCs not backed by AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts considers PVCs backed by AzureDisk volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "already-mounted AzureDisk volumes are always ok to allow",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.AzureDiskVolumeFilterType)},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "the same AzureDisk volumes are not counted multiple times",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing two PVCs is counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: deletedPVPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing the same PV are counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherDeletedPVPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing different PVs are counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.AzureDiskVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherUnboundPVCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.AzureDiskVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &AzureDiskLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeInfo(csiNode), getFakeCSIStorageClassInfo(test.filterName, test.driverName), getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
71
pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go
Normal file
71
pkg/scheduler/framework/plugins/nodevolumelimits/cinder.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// CinderLimits is a plugin that checks node volume limits.
|
||||
type CinderLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &CinderLimits{}
|
||||
|
||||
// CinderName is the name of the plugin used in the plugin registry and configurations.
|
||||
const CinderName = "CinderLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *CinderLimits) Name() string {
|
||||
return CinderName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *CinderLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewCinder returns function that initializes a new plugin and returns it.
|
||||
func NewCinder(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
csiNodeInfo := &predicates.CachedCSINodeInfo{
|
||||
CSINodeLister: informerFactory.Storage().V1beta1().CSINodes().Lister(),
|
||||
}
|
||||
pvInfo := &predicates.CachedPersistentVolumeInfo{
|
||||
PersistentVolumeLister: informerFactory.Core().V1().PersistentVolumes().Lister(),
|
||||
}
|
||||
pvcInfo := &predicates.CachedPersistentVolumeClaimInfo{
|
||||
PersistentVolumeClaimLister: informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
}
|
||||
classInfo := &predicates.CachedStorageClassInfo{
|
||||
StorageClassLister: informerFactory.Storage().V1().StorageClasses().Lister(),
|
||||
}
|
||||
|
||||
return &CinderLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.CinderVolumeFilterType, csiNodeInfo, classInfo, pvInfo, pvcInfo),
|
||||
}, nil
|
||||
}
|
101
pkg/scheduler/framework/plugins/nodevolumelimits/cinder_test.go
Normal file
101
pkg/scheduler/framework/plugins/nodevolumelimits/cinder_test.go
Normal file
@ -0,0 +1,101 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestCinderLimits(t *testing.T) {
|
||||
twoVolCinderPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
oneVolCinderPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Cinder: &v1.CinderVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolCinderPod,
|
||||
existingPods: []*v1.Pod{twoVolCinderPod},
|
||||
filterName: predicates.CinderVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's Cinder volumes",
|
||||
},
|
||||
{
|
||||
newPod: oneVolCinderPod,
|
||||
existingPods: []*v1.Pod{twoVolCinderPod},
|
||||
filterName: predicates.CinderVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "not fit when node capacity < new pod's Cinder volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &CinderLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeInfo(csiNode), getFakeCSIStorageClassInfo(test.filterName, test.driverName), getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -27,30 +27,30 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// NodeVolumeLimits is a plugin that checks node volume limits
|
||||
type NodeVolumeLimits struct {
|
||||
// CSILimits is a plugin that checks node volume limits.
|
||||
type CSILimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &NodeVolumeLimits{}
|
||||
var _ framework.FilterPlugin = &CSILimits{}
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "NodeVolumeLimits"
|
||||
// CSIName is the name of the plugin used in the plugin registry and configurations.
|
||||
const CSIName = "NodeVolumeLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *NodeVolumeLimits) Name() string {
|
||||
return Name
|
||||
func (pl *CSILimits) Name() string {
|
||||
return CSIName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *NodeVolumeLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
// NewCSI initializes a new plugin and returns it.
|
||||
func NewCSI(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
csiNodeInfo := &predicates.CachedCSINodeInfo{
|
||||
CSINodeLister: informerFactory.Storage().V1beta1().CSINodes().Lister(),
|
||||
@ -65,7 +65,7 @@ func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin
|
||||
StorageClassLister: informerFactory.Storage().V1().StorageClasses().Lister(),
|
||||
}
|
||||
|
||||
return &NodeVolumeLimits{
|
||||
return &CSILimits{
|
||||
predicate: predicates.NewCSIMaxVolumeLimitPredicate(csiNodeInfo, pvInfo, pvcInfo, classInfo),
|
||||
}, nil
|
||||
}
|
@ -47,7 +47,7 @@ const (
|
||||
hostpathInTreePluginName = "kubernetes.io/hostpath"
|
||||
)
|
||||
|
||||
func TestNodeVolumeLimits(t *testing.T) {
|
||||
func TestCSILimits(t *testing.T) {
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
@ -444,7 +444,7 @@ func TestNodeVolumeLimits(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, false)()
|
||||
}
|
||||
|
||||
p := &NodeVolumeLimits{
|
||||
p := &CSILimits{
|
||||
predicate: predicates.NewCSIMaxVolumeLimitPredicate(getFakeCSINodeInfo(csiNode), getFakeCSIPVInfo(test.filterName, test.driverNames...), getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...), getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0])),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
71
pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go
Normal file
71
pkg/scheduler/framework/plugins/nodevolumelimits/ebs.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// EBSLimits is a plugin that checks node volume limits.
|
||||
type EBSLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &EBSLimits{}
|
||||
|
||||
// EBSName is the name of the plugin used in the plugin registry and configurations.
|
||||
const EBSName = "EBSLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *EBSLimits) Name() string {
|
||||
return EBSName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *EBSLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewEBS returns function that initializes a new plugin and returns it.
|
||||
func NewEBS(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
csiNodeInfo := &predicates.CachedCSINodeInfo{
|
||||
CSINodeLister: informerFactory.Storage().V1beta1().CSINodes().Lister(),
|
||||
}
|
||||
pvInfo := &predicates.CachedPersistentVolumeInfo{
|
||||
PersistentVolumeLister: informerFactory.Core().V1().PersistentVolumes().Lister(),
|
||||
}
|
||||
pvcInfo := &predicates.CachedPersistentVolumeClaimInfo{
|
||||
PersistentVolumeClaimLister: informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
}
|
||||
classInfo := &predicates.CachedStorageClassInfo{
|
||||
StorageClassLister: informerFactory.Storage().V1().StorageClasses().Lister(),
|
||||
}
|
||||
|
||||
return &EBSLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilterType, csiNodeInfo, classInfo, pvInfo, pvcInfo),
|
||||
}, nil
|
||||
}
|
565
pkg/scheduler/framework/plugins/nodevolumelimits/ebs_test.go
Normal file
565
pkg/scheduler/framework/plugins/nodevolumelimits/ebs_test.go
Normal file
@ -0,0 +1,565 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func onePVCPod(filterName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "some" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func splitPVCPod(filterName string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "someNon" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "some" + filterName + "Vol",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestEBSLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCwithInvalidSCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVCwithInvalidSCPod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
unboundPVCwithDefaultSCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVCwithDefaultSCPod",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "doesn't fit when node capacity < new pod's EBS volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{twoVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores non-EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts ignore non-EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "new pod's count considers PVCs backed by EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitPVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores PVCs not backed by EBS volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.EBSVolumeFilterType)},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts considers PVCs backed by EBS volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.EBSVolumeFilterType)},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 4,
|
||||
test: "already-mounted EBS volumes are always ok to allow",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.EBSVolumeFilterType)},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "the same EBS volumes are not counted multiple times",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "missing PVC is not counted towards the PV limit",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "missing PVC is not counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two missing PVCs are not counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCwithInvalidSCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "unbound PVC with invalid SC is not counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCwithDefaultSCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 1,
|
||||
test: "unbound PVC from different provisioner is not counted towards the PV limit",
|
||||
},
|
||||
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: deletedPVPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two pods missing the same PV are counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherDeletedPVPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two pods missing different PVs are counted towards the PV limit twice",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.EBSVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 3,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherUnboundPVCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.EBSVolumeFilterType,
|
||||
driverName: csilibplugins.AWSEBSInTreePluginName,
|
||||
maxVols: 2,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()),
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &EBSLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeInfo(csiNode), getFakeCSIStorageClassInfo(test.filterName, test.driverName), getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVCInfo(filterName string) predicates.FakePersistentVolumeClaimInfo {
|
||||
return predicates.FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "some" + filterName + "Vol",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "someNon" + filterName + "Vol",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pvcWithDeletedPV",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherPVCWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "anotherPVCWithDeletedPV",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "anotherUnboundPVC"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: &filterName,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithDefaultSCPod"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: utilpointer.StringPtr("standard-sc"),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "unboundPVCwithInvalidSCPod"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "",
|
||||
StorageClassName: utilpointer.StringPtr("invalid-sc"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getFakePVInfo(filterName string) predicates.FakePersistentVolumeInfo {
|
||||
return predicates.FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: strings.ToLower(filterName) + "Vol"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNon" + filterName + "Vol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
71
pkg/scheduler/framework/plugins/nodevolumelimits/gce.go
Normal file
71
pkg/scheduler/framework/plugins/nodevolumelimits/gce.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// GCEPDLimits is a plugin that checks node volume limits.
|
||||
type GCEPDLimits struct {
|
||||
predicate predicates.FitPredicate
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &GCEPDLimits{}
|
||||
|
||||
// GCEPDName is the name of the plugin used in the plugin registry and configurations.
|
||||
const GCEPDName = "GCEPDLimits"
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *GCEPDLimits) Name() string {
|
||||
return GCEPDName
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *GCEPDLimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
// metadata is not needed
|
||||
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// NewGCEPD returns function that initializes a new plugin and returns it.
|
||||
func NewGCEPD(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
csiNodeInfo := &predicates.CachedCSINodeInfo{
|
||||
CSINodeLister: informerFactory.Storage().V1beta1().CSINodes().Lister(),
|
||||
}
|
||||
pvInfo := &predicates.CachedPersistentVolumeInfo{
|
||||
PersistentVolumeLister: informerFactory.Core().V1().PersistentVolumes().Lister(),
|
||||
}
|
||||
pvcInfo := &predicates.CachedPersistentVolumeClaimInfo{
|
||||
PersistentVolumeClaimLister: informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||
}
|
||||
classInfo := &predicates.CachedStorageClassInfo{
|
||||
StorageClassLister: informerFactory.Storage().V1().StorageClasses().Lister(),
|
||||
}
|
||||
|
||||
return &GCEPDLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilterType, csiNodeInfo, classInfo, pvInfo, pvcInfo),
|
||||
}, nil
|
||||
}
|
372
pkg/scheduler/framework/plugins/nodevolumelimits/gce_test.go
Normal file
372
pkg/scheduler/framework/plugins/nodevolumelimits/gce_test.go
Normal file
@ -0,0 +1,372 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nodevolumelimits
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestGCEPDLimits(t *testing.T) {
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
splitVolsPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonApplicablePod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoDeletedPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "deletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherDeletedPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// deletedPVPod2 is a different pod than deletedPVPod but using the same PVC
|
||||
deletedPVPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "pvcWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// anotherDeletedPVPod is a different pod than deletedPVPod and uses another PVC
|
||||
anotherDeletedPVPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherPVCWithDeletedPV",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
unboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Different pod than unboundPVCPod, but using the same unbound PVC
|
||||
unboundPVCPod2 := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "unboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// pod with unbound PVC that's different to unboundPVC
|
||||
anotherUnboundPVCPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "anotherUnboundPVC",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
driverName string
|
||||
maxVols int
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "fits when node capacity >= new pod's GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "fit when node capacity < new pod's GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{twoVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores non-GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts ignore non-GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count considers PVCs backed by GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: splitPVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "new pod's count ignores PVCs not backed by GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "existing pods' counts considers PVCs backed by GCE volumes",
|
||||
},
|
||||
{
|
||||
newPod: twoVolPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 4,
|
||||
test: "already-mounted EBS volumes are always ok to allow",
|
||||
},
|
||||
{
|
||||
newPod: splitVolsPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(predicates.GCEPDVolumeFilterType)},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "the same GCE volumes are not counted multiple times",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing two PVCs is counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with missing PV is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: deletedPVPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing the same PV are counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherDeletedPVPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two pods missing different PVs are counted towards the PV limit twice",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: onePVCPod(predicates.GCEPDVolumeFilterType),
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 3,
|
||||
test: "pod with unbound PVC is counted towards the PV limit",
|
||||
},
|
||||
{
|
||||
newPod: unboundPVCPod2,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
|
||||
},
|
||||
{
|
||||
newPod: anotherUnboundPVCPod,
|
||||
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
|
||||
filterName: predicates.GCEPDVolumeFilterType,
|
||||
maxVols: 2,
|
||||
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
p := &GCEPDLimits{
|
||||
predicate: predicates.NewMaxPDVolumeCountPredicate(test.filterName, getFakeCSINodeInfo(csiNode), getFakeCSIStorageClassInfo(test.filterName, test.driverName), getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName)),
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -129,11 +129,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
"kind" : "Policy",
|
||||
"apiVersion" : "v1"
|
||||
}`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(
|
||||
"InterPodAffinityPriority",
|
||||
"SelectorSpreadPriority",
|
||||
@ -147,7 +142,10 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
{Name: "NodeAffinity"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
@ -209,11 +207,6 @@ priorities:
|
||||
policy: `apiVersion: v1
|
||||
kind: Policy
|
||||
`,
|
||||
expectedPredicates: sets.NewString(
|
||||
"MaxAzureDiskVolumeCount",
|
||||
"MaxEBSVolumeCount",
|
||||
"MaxGCEPDVolumeCount",
|
||||
),
|
||||
expectedPrioritizers: sets.NewString(
|
||||
"InterPodAffinityPriority",
|
||||
"SelectorSpreadPriority",
|
||||
@ -227,7 +220,10 @@ kind: Policy
|
||||
{Name: "NodeAffinity"},
|
||||
{Name: "VolumeRestrictions"},
|
||||
{Name: "TaintToleration"},
|
||||
{Name: "EBSLimits"},
|
||||
{Name: "GCEPDLimits"},
|
||||
{Name: "NodeVolumeLimits"},
|
||||
{Name: "AzureDiskLimits"},
|
||||
{Name: "VolumeBinding"},
|
||||
{Name: "VolumeZone"},
|
||||
{Name: "InterPodAffinity"},
|
||||
|
Loading…
Reference in New Issue
Block a user