Merge pull request #115398 from tangwz/add_NodeVolumeLimits_PreFilter

feat(NodeVolumeLimits): return Skip in PreFilter
This commit is contained in:
Kubernetes Prow Robot 2023-04-27 01:44:14 -07:00 committed by GitHub
commit 87f3acf7f6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 571 additions and 133 deletions

View File

@ -38,6 +38,10 @@ var PluginsV1beta2 = &config.Plugins{
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
@ -206,6 +210,10 @@ var ExpandedPluginsV1beta3 = &config.Plugins{
{Name: names.NodePorts},
{Name: names.NodeResourcesFit},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.VolumeBinding},
{Name: names.VolumeZone},
{Name: names.PodTopologySpread},
@ -385,6 +393,10 @@ var ExpandedPluginsV1 = &config.Plugins{
{Name: names.NodePorts},
{Name: names.NodeResourcesFit},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.VolumeBinding},
{Name: names.VolumeZone},
{Name: names.PodTopologySpread},

View File

@ -39,6 +39,10 @@ func getDefaultPlugins() *v1beta2.Plugins {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},

View File

@ -51,6 +51,10 @@ func TestApplyFeatureGates(t *testing.T) {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},
@ -141,6 +145,10 @@ func TestApplyFeatureGates(t *testing.T) {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},

View File

@ -341,6 +341,10 @@ func TestSchedulerDefaults(t *testing.T) {
{Name: names.NodeResourcesFit},
{Name: names.NodePorts},
{Name: names.VolumeRestrictions},
{Name: names.EBSLimits},
{Name: names.GCEPDLimits},
{Name: names.NodeVolumeLimits},
{Name: names.AzureDiskLimits},
{Name: names.PodTopologySpread},
{Name: names.InterPodAffinity},
{Name: names.VolumeBinding},

View File

@ -60,6 +60,7 @@ type CSILimits struct {
translator InTreeToCSITranslator
}
var _ framework.PreFilterPlugin = &CSILimits{}
var _ framework.FilterPlugin = &CSILimits{}
var _ framework.EnqueueExtensions = &CSILimits{}
@ -80,6 +81,26 @@ func (pl *CSILimits) EventsToRegister() []framework.ClusterEvent {
}
}
// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *CSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
if vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil || pl.translator.IsInlineMigratable(vol) {
return nil, nil
}
}
return nil, framework.NewStatus(framework.Skip)
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *CSILimits) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
// Filter invoked at the filter extension point.
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If the new pod doesn't have any volume attached to it, the predicate will always be true

View File

@ -17,13 +17,13 @@ limitations under the License.
package nodevolumelimits
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
@ -36,6 +36,7 @@ import (
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
st "k8s.io/kubernetes/pkg/scheduler/testing"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/utils/ktesting"
"k8s.io/utils/pointer"
)
@ -189,19 +190,105 @@ func TestCSILimits(t *testing.T) {
},
},
}
onlyConfigmapAndSecretVolPod := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
},
},
}
pvcPodWithConfigmapAndSecret := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "csi-ebs.csi.aws.com-0"},
},
},
},
},
}
ephemeralPodWithConfigmapAndSecret := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: ephemeralVolumePod.Namespace,
Name: ephemeralVolumePod.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
{
Name: "xyz",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{},
},
},
},
},
}
inlineMigratablePodWithConfigmapAndSecret := &v1.Pod{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
},
{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: "aws-inline1",
},
},
},
},
},
}
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
extraClaims []v1.PersistentVolumeClaim
filterName string
maxVols int
driverNames []string
test string
migrationEnabled bool
ephemeralEnabled bool
limitSource string
wantStatus *framework.Status
newPod *v1.Pod
existingPods []*v1.Pod
extraClaims []v1.PersistentVolumeClaim
filterName string
maxVols int
driverNames []string
test string
migrationEnabled bool
ephemeralEnabled bool
limitSource string
wantStatus *framework.Status
wantPreFilterStatus *framework.Status
}{
{
newPod: csiEBSOneVolPod,
@ -485,6 +572,42 @@ func TestCSILimits(t *testing.T) {
maxVols: 4,
test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume",
},
{
newPod: onlyConfigmapAndSecretVolPod,
filterName: "csi",
maxVols: 2,
driverNames: []string{ebsCSIDriverName},
test: "skip Filter when the pod only uses secrets and configmaps",
limitSource: "node",
wantPreFilterStatus: framework.NewStatus(framework.Skip),
},
{
newPod: pvcPodWithConfigmapAndSecret,
filterName: "csi",
maxVols: 2,
driverNames: []string{ebsCSIDriverName},
test: "don't skip Filter when the pod has pvcs",
limitSource: "node",
},
{
newPod: ephemeralPodWithConfigmapAndSecret,
filterName: "csi",
ephemeralEnabled: true,
driverNames: []string{ebsCSIDriverName},
test: "don't skip Filter when the pod has ephemeral volumes",
wantStatus: framework.AsStatus(errors.New(`looking up PVC test/abc-xyz: persistentvolumeclaim "abc-xyz" not found`)),
},
{
newPod: inlineMigratablePodWithConfigmapAndSecret,
existingPods: []*v1.Pod{inTreeTwoVolPod},
filterName: "csi",
maxVols: 2,
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
migrationEnabled: true,
limitSource: "csinode",
test: "don't skip Filter when the pod has inline migratable volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
},
}
// running attachable predicate tests with feature gate and limit present on nodes
@ -503,9 +626,16 @@ func TestCSILimits(t *testing.T) {
randomVolumeIDPrefix: rand.String(32),
translator: csiTranslator,
}
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
_, ctx := ktesting.NewTestContext(t)
_, gotPreFilterStatus := p.PreFilter(ctx, nil, test.newPod)
if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
}
if gotPreFilterStatus.Code() != framework.Skip {
gotStatus := p.Filter(ctx, nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
}
})
}

View File

@ -119,6 +119,7 @@ type nonCSILimits struct {
randomVolumeIDPrefix string
}
var _ framework.PreFilterPlugin = &nonCSILimits{}
var _ framework.FilterPlugin = &nonCSILimits{}
var _ framework.EnqueueExtensions = &nonCSILimits{}
@ -208,6 +209,27 @@ func (pl *nonCSILimits) EventsToRegister() []framework.ClusterEvent {
}
}
// PreFilter invoked at the prefilter extension point
//
// If the pod haven't those types of volumes, we'll skip the Filter phase
func (pl *nonCSILimits) PreFilter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) {
volumes := pod.Spec.Volumes
for i := range volumes {
vol := &volumes[i]
_, ok := pl.filter.FilterVolume(vol)
if ok || vol.PersistentVolumeClaim != nil || vol.Ephemeral != nil {
return nil, nil
}
}
return nil, framework.NewStatus(framework.Skip)
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *nonCSILimits) PreFilterExtensions() framework.PreFilterExtensions {
return nil
}
// Filter invoked at the filter extension point.
func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If a pod doesn't have any volume attached to it, the predicate will always be true.

View File

@ -25,6 +25,7 @@ import (
"strings"
"testing"
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
csilibplugins "k8s.io/csi-translation-lib/plugins"
@ -36,34 +37,29 @@ import (
)
var (
oneVolPod = st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
},
}).Obj()
twoVolPod = st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
},
}).Obj()
splitVolsPod = st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
},
}).Obj()
nonApplicablePod = st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{},
},
}).Obj()
onlyConfigmapAndSecretPod = st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
}).Obj()
pvcPodWithConfigmapAndSecret = st.MakePod().PVC("pvcWithDeletedPV").Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
}).Obj()
deletedPVCPod = st.MakePod().PVC("deletedPVC").Obj()
twoDeletedPVCPod = st.MakePod().PVC("deletedPVC").PVC("anotherDeletedPVC").Obj()
@ -114,14 +110,30 @@ func TestEphemeralLimits(t *testing.T) {
conflictingClaim := ephemeralClaim.DeepCopy()
conflictingClaim.OwnerReferences = nil
ephemeralPodWithConfigmapAndSecret := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume(v1.Volume{
Name: "xyz",
VolumeSource: v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
}).Obj()
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
extraClaims []v1.PersistentVolumeClaim
ephemeralEnabled bool
maxVols int
test string
wantStatus *framework.Status
newPod *v1.Pod
existingPods []*v1.Pod
extraClaims []v1.PersistentVolumeClaim
ephemeralEnabled bool
maxVols int
test string
wantStatus *framework.Status
wantPreFilterStatus *framework.Status
}{
{
newPod: ephemeralVolumePod,
@ -151,6 +163,21 @@ func TestEphemeralLimits(t *testing.T) {
test: "volume unbound, exceeds limit",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
},
{
newPod: onlyConfigmapAndSecretPod,
ephemeralEnabled: true,
extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
maxVols: 1,
test: "skip Filter when the pod only uses secrets and configmaps",
wantPreFilterStatus: framework.NewStatus(framework.Skip),
},
{
newPod: ephemeralPodWithConfigmapAndSecret,
ephemeralEnabled: true,
extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
maxVols: 1,
test: "don't skip Filter when the pods has ephemeral volumes",
},
}
for _, test := range tests {
@ -158,190 +185,302 @@ func TestEphemeralLimits(t *testing.T) {
fts := feature.Features{}
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), filterName)
p := newNonCSILimits(filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(filterName, driverName), getFakePVLister(filterName), append(getFakePVCLister(filterName), test.extraClaims...), fts).(framework.FilterPlugin)
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
_, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod)
if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
}
if gotPreFilterStatus.Code() != framework.Skip {
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
}
})
}
}
func TestAzureDiskLimits(t *testing.T) {
oneAzureDiskPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{},
},
}).Obj()
twoAzureDiskPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{},
},
}).Obj()
splitAzureDiskPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{},
},
}).Obj()
AzureDiskPodWithConfigmapAndSecret := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
}).Obj()
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
driverName string
maxVols int
test string
wantStatus *framework.Status
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
driverName string
maxVols int
test string
wantStatus *framework.Status
wantPreFilterStatus *framework.Status
}{
{
newPod: oneVolPod,
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
newPod: oneAzureDiskPod,
existingPods: []*v1.Pod{twoAzureDiskPod, oneAzureDiskPod},
filterName: azureDiskVolumeFilterType,
maxVols: 4,
test: "fits when node capacity >= new pod's AzureDisk volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{oneVolPod},
newPod: twoAzureDiskPod,
existingPods: []*v1.Pod{oneAzureDiskPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "fit when node capacity < new pod's AzureDisk volumes",
},
{
newPod: splitVolsPod,
existingPods: []*v1.Pod{twoVolPod},
newPod: splitAzureDiskPod,
existingPods: []*v1.Pod{twoAzureDiskPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "new pod's count ignores non-AzureDisk volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
newPod: twoAzureDiskPod,
existingPods: []*v1.Pod{splitAzureDiskPod, nonApplicablePod, emptyPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "existing pods' counts ignore non-AzureDisk volumes",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
existingPods: []*v1.Pod{splitAzureDiskPod, nonApplicablePod, emptyPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "new pod's count considers PVCs backed by AzureDisk volumes",
},
{
newPod: splitPVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
existingPods: []*v1.Pod{splitAzureDiskPod, oneAzureDiskPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "new pod's count ignores PVCs not backed by AzureDisk volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{oneVolPod, onePVCPod(azureDiskVolumeFilterType)},
newPod: twoAzureDiskPod,
existingPods: []*v1.Pod{oneAzureDiskPod, onePVCPod(azureDiskVolumeFilterType)},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "existing pods' counts considers PVCs backed by AzureDisk volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(azureDiskVolumeFilterType)},
newPod: twoAzureDiskPod,
existingPods: []*v1.Pod{oneAzureDiskPod, twoAzureDiskPod, onePVCPod(azureDiskVolumeFilterType)},
filterName: azureDiskVolumeFilterType,
maxVols: 4,
test: "already-mounted AzureDisk volumes are always ok to allow",
},
{
newPod: splitVolsPod,
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(azureDiskVolumeFilterType)},
newPod: splitAzureDiskPod,
existingPods: []*v1.Pod{oneAzureDiskPod, oneAzureDiskPod, onePVCPod(azureDiskVolumeFilterType)},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "the same AzureDisk volumes are not counted multiple times",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "pod with missing PVC is counted towards the PV limit",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "pod with missing PVC is counted towards the PV limit",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, twoDeletedPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "pod with missing two PVCs is counted towards the PV limit twice",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "pod with missing PV is counted towards the PV limit",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "pod with missing PV is counted towards the PV limit",
},
{
newPod: deletedPVPod2,
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "two pods missing the same PV are counted towards the PV limit only once",
},
{
newPod: anotherDeletedPVPod,
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "two pods missing different PVs are counted towards the PV limit twice",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "pod with unbound PVC is counted towards the PV limit",
},
{
newPod: onePVCPod(azureDiskVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 3,
test: "pod with unbound PVC is counted towards the PV limit",
},
{
newPod: unboundPVCPod2,
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
},
{
newPod: anotherUnboundPVCPod,
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneAzureDiskPod, unboundPVCPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
},
{
newPod: onlyConfigmapAndSecretPod,
existingPods: []*v1.Pod{twoAzureDiskPod, oneAzureDiskPod},
filterName: azureDiskVolumeFilterType,
maxVols: 4,
test: "skip Filter when the pod only uses secrets and configmaps",
wantPreFilterStatus: framework.NewStatus(framework.Skip),
},
{
newPod: pvcPodWithConfigmapAndSecret,
existingPods: []*v1.Pod{oneAzureDiskPod, deletedPVPod},
filterName: azureDiskVolumeFilterType,
maxVols: 2,
test: "don't skip Filter when the pod has pvcs",
},
{
newPod: AzureDiskPodWithConfigmapAndSecret,
existingPods: []*v1.Pod{twoAzureDiskPod, oneAzureDiskPod},
filterName: azureDiskVolumeFilterType,
maxVols: 4,
test: "don't skip Filter when the pod has AzureDisk volumes",
},
}
for _, test := range tests {
t.Run(test.test, func(t *testing.T) {
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin)
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
_, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod)
if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
}
if gotPreFilterStatus.Code() != framework.Skip {
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
}
})
}
}
func TestEBSLimits(t *testing.T) {
oneVolPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
},
}).Obj()
twoVolPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"},
},
}).Obj()
splitVolsPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"},
},
}).Obj()
unboundPVCWithInvalidSCPod := st.MakePod().PVC("unboundPVCWithInvalidSCPod").Obj()
unboundPVCWithDefaultSCPod := st.MakePod().PVC("unboundPVCWithDefaultSCPod").Obj()
EBSPodWithConfigmapAndSecret := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
}).Obj()
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
driverName string
maxVols int
test string
wantStatus *framework.Status
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
driverName string
maxVols int
test string
wantStatus *framework.Status
wantPreFilterStatus *framework.Status
}{
{
newPod: oneVolPod,
@ -526,179 +665,277 @@ func TestEBSLimits(t *testing.T) {
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
},
{
newPod: onlyConfigmapAndSecretPod,
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
filterName: ebsVolumeFilterType,
driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 4,
test: "skip Filter when the pod only uses secrets and configmaps",
wantPreFilterStatus: framework.NewStatus(framework.Skip),
},
{
newPod: pvcPodWithConfigmapAndSecret,
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
filterName: ebsVolumeFilterType,
driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 2,
test: "don't skip Filter when the pod has pvcs",
},
{
newPod: EBSPodWithConfigmapAndSecret,
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
filterName: ebsVolumeFilterType,
driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 4,
test: "don't skip Filter when the pod has EBS volumes",
},
}
for _, test := range tests {
t.Run(test.test, func(t *testing.T) {
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin)
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
_, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod)
if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
}
if gotPreFilterStatus.Code() != framework.Skip {
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
}
})
}
}
func TestGCEPDLimits(t *testing.T) {
oneGCEPDPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
}).Obj()
twoGCEPDPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
}).Obj()
splitGCEPDPod := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
}).Obj()
GCEPDPodWithConfigmapAndSecret := st.MakePod().Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{},
},
}).Volume(v1.Volume{
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{},
},
}).Obj()
tests := []struct {
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
driverName string
maxVols int
test string
wantStatus *framework.Status
newPod *v1.Pod
existingPods []*v1.Pod
filterName string
driverName string
maxVols int
test string
wantStatus *framework.Status
wantPreFilterStatus *framework.Status
}{
{
newPod: oneVolPod,
existingPods: []*v1.Pod{twoVolPod, oneVolPod},
newPod: oneGCEPDPod,
existingPods: []*v1.Pod{twoGCEPDPod, oneGCEPDPod},
filterName: gcePDVolumeFilterType,
maxVols: 4,
test: "fits when node capacity >= new pod's GCE volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{oneVolPod},
newPod: twoGCEPDPod,
existingPods: []*v1.Pod{oneGCEPDPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "fit when node capacity < new pod's GCE volumes",
},
{
newPod: splitVolsPod,
existingPods: []*v1.Pod{twoVolPod},
newPod: splitGCEPDPod,
existingPods: []*v1.Pod{twoGCEPDPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "new pod's count ignores non-GCE volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
newPod: twoGCEPDPod,
existingPods: []*v1.Pod{splitGCEPDPod, nonApplicablePod, emptyPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "existing pods' counts ignore non-GCE volumes",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod},
existingPods: []*v1.Pod{splitGCEPDPod, nonApplicablePod, emptyPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "new pod's count considers PVCs backed by GCE volumes",
},
{
newPod: splitPVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{splitVolsPod, oneVolPod},
existingPods: []*v1.Pod{splitGCEPDPod, oneGCEPDPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "new pod's count ignores PVCs not backed by GCE volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{oneVolPod, onePVCPod(gcePDVolumeFilterType)},
newPod: twoGCEPDPod,
existingPods: []*v1.Pod{oneGCEPDPod, onePVCPod(gcePDVolumeFilterType)},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "existing pods' counts considers PVCs backed by GCE volumes",
},
{
newPod: twoVolPod,
existingPods: []*v1.Pod{oneVolPod, twoVolPod, onePVCPod(gcePDVolumeFilterType)},
newPod: twoGCEPDPod,
existingPods: []*v1.Pod{oneGCEPDPod, twoGCEPDPod, onePVCPod(gcePDVolumeFilterType)},
filterName: gcePDVolumeFilterType,
maxVols: 4,
test: "already-mounted EBS volumes are always ok to allow",
},
{
newPod: splitVolsPod,
existingPods: []*v1.Pod{oneVolPod, oneVolPod, onePVCPod(gcePDVolumeFilterType)},
newPod: splitGCEPDPod,
existingPods: []*v1.Pod{oneGCEPDPod, oneGCEPDPod, onePVCPod(gcePDVolumeFilterType)},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "the same GCE volumes are not counted multiple times",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "pod with missing PVC is counted towards the PV limit",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "pod with missing PVC is counted towards the PV limit",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, twoDeletedPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, twoDeletedPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "pod with missing two PVCs is counted towards the PV limit twice",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "pod with missing PV is counted towards the PV limit",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "pod with missing PV is counted towards the PV limit",
},
{
newPod: deletedPVPod2,
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "two pods missing the same PV are counted towards the PV limit only once",
},
{
newPod: anotherDeletedPVPod,
existingPods: []*v1.Pod{oneVolPod, deletedPVPod},
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "two pods missing different PVs are counted towards the PV limit twice",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "pod with unbound PVC is counted towards the PV limit",
},
{
newPod: onePVCPod(gcePDVolumeFilterType),
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 3,
test: "pod with unbound PVC is counted towards the PV limit",
},
{
newPod: unboundPVCPod2,
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "the same unbound PVC in multiple pods is counted towards the PV limit only once",
},
{
newPod: anotherUnboundPVCPod,
existingPods: []*v1.Pod{oneVolPod, unboundPVCPod},
existingPods: []*v1.Pod{oneGCEPDPod, unboundPVCPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "two different unbound PVCs are counted towards the PV limit as two volumes",
},
{
newPod: onlyConfigmapAndSecretPod,
existingPods: []*v1.Pod{twoGCEPDPod, oneGCEPDPod},
filterName: gcePDVolumeFilterType,
maxVols: 4,
test: "skip Filter when the pod only uses secrets and configmaps",
wantPreFilterStatus: framework.NewStatus(framework.Skip),
},
{
newPod: pvcPodWithConfigmapAndSecret,
existingPods: []*v1.Pod{oneGCEPDPod, deletedPVPod},
filterName: gcePDVolumeFilterType,
maxVols: 2,
test: "don't skip Filter when the pods has pvcs",
},
{
newPod: GCEPDPodWithConfigmapAndSecret,
existingPods: []*v1.Pod{twoGCEPDPod, oneGCEPDPod},
filterName: gcePDVolumeFilterType,
maxVols: 4,
test: "don't skip Filter when the pods has GCE volumes",
},
}
for _, test := range tests {
t.Run(test.test, func(t *testing.T) {
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
p := newNonCSILimits(test.filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(test.filterName, test.driverName), getFakePVLister(test.filterName), getFakePVCLister(test.filterName), feature.Features{}).(framework.FilterPlugin)
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
_, gotPreFilterStatus := p.(*nonCSILimits).PreFilter(context.Background(), nil, test.newPod)
if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
}
if gotPreFilterStatus.Code() != framework.Skip {
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
}
}
})
}