From aae9a7f7045985efc04cd05773aec5ad44cfe84d Mon Sep 17 00:00:00 2001 From: Haosdent Huang Date: Mon, 6 Jan 2020 02:30:40 +0800 Subject: [PATCH] Break nodevolumelimits Filter plugins dependency on predicates package --- pkg/scheduler/algorithm/predicates/error.go | 2 -- .../framework/plugins/nodevolumelimits/csi.go | 3 +-- .../plugins/nodevolumelimits/csi_test.go | 16 ++++++++-------- .../plugins/nodevolumelimits/non_csi.go | 5 ++++- .../plugins/nodevolumelimits/non_csi_test.go | 16 ++++++++-------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/error.go b/pkg/scheduler/algorithm/predicates/error.go index bf1518add33..44e130bc9c4 100644 --- a/pkg/scheduler/algorithm/predicates/error.go +++ b/pkg/scheduler/algorithm/predicates/error.go @@ -55,8 +55,6 @@ var ( ErrNodeLabelPresenceViolated = NewPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels") // ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error. ErrServiceAffinityViolated = NewPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity") - // ErrMaxVolumeCountExceeded is used for MaxVolumeCount predicate error. - ErrMaxVolumeCountExceeded = NewPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count") // ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error. ErrNodeUnderMemoryPressure = NewPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure") // ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error. diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go index 1a4e23b32a7..89a17d218cb 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -28,7 +28,6 @@ import ( storagelisters "k8s.io/client-go/listers/storage/v1" csitrans "k8s.io/csi-translation-lib" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" - "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" @@ -130,7 +129,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v if ok { currentVolumeCount := attachedVolumeCount[volumeLimitKey] if currentVolumeCount+count > int(maxVolumeLimit) { - return framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()) + return framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded) } } } diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go index 5aa4f18487a..9466eb5e250 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go @@ -267,7 +267,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, test: "doesn't when node volume limit <= pods CSI volume", limitSource: "node", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: csiEBSOneVolPod, @@ -287,7 +287,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, test: "count pending PVCs towards volume limit <= pods CSI volume", limitSource: "node", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, // two same pending PVCs should be counted as 1 { @@ -308,7 +308,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName}, test: "should count PVCs with invalid PV name but valid SC", limitSource: "node", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, // don't count a volume which has storageclass missing { @@ -329,7 +329,7 @@ func TestCSILimits(t *testing.T) { driverNames: []string{ebsCSIDriverName, gceCSIDriverName}, test: "count pvcs with the same type towards volume limit", limitSource: "node", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: gceTwoVolPod, @@ -350,7 +350,7 @@ func TestCSILimits(t *testing.T) { migrationEnabled: true, limitSource: "csinode", test: "should count in-tree volumes if migration is enabled", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: pendingVolumePod, @@ -361,7 +361,7 @@ func TestCSILimits(t *testing.T) { migrationEnabled: true, limitSource: "csinode", test: "should count unbound in-tree volumes if migration is enabled", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: inTreeOneVolPod, @@ -413,7 +413,7 @@ func TestCSILimits(t *testing.T) { migrationEnabled: true, limitSource: "csinode", test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: csiEBSOneVolPod, @@ -424,7 +424,7 @@ func TestCSILimits(t *testing.T) { migrationEnabled: true, limitSource: "csinode", test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: csiEBSOneVolPod, diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go index c25fc9945ae..26809a77428 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go @@ -58,6 +58,9 @@ const ( azureDiskVolumeFilterType = "AzureDisk" // cinderVolumeFilterType defines the filter name for cinderVolumeFilter. cinderVolumeFilterType = "Cinder" + + // ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error. + ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count" ) // AzureDiskName is the name of the plugin used in the plugin registry and configurations. @@ -254,7 +257,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod if numExistingVolumes+numNewVolumes > maxAttachLimit { // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount - return framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()) + return framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded) } if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) { nodeInfo.TransientInfo.TransientLock.Lock() diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go index b8d48a06671..ba3bea6ed23 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go @@ -421,7 +421,7 @@ func TestCinderLimits(t *testing.T) { filterName: cinderVolumeFilterType, maxVols: 2, test: "not fit when node capacity < new pod's Cinder volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, } @@ -661,7 +661,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 2, test: "doesn't fit when node capacity < new pod's EBS volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: splitVolsPod, @@ -702,7 +702,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 3, test: "existing pods' counts considers PVCs backed by EBS volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: twoVolPod, @@ -727,7 +727,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 1, test: "missing PVC is not counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: onePVCPod(ebsVolumeFilterType), @@ -769,7 +769,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 2, test: "pod with missing PV is counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: onePVCPod(ebsVolumeFilterType), @@ -794,7 +794,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 2, test: "two pods missing different PVs are counted towards the PV limit twice", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: onePVCPod(ebsVolumeFilterType), @@ -803,7 +803,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 2, test: "pod with unbound PVC is counted towards the PV limit", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, { newPod: onePVCPod(ebsVolumeFilterType), @@ -828,7 +828,7 @@ func TestEBSLimits(t *testing.T) { driverName: csilibplugins.AWSEBSInTreePluginName, maxVols: 2, test: "two different unbound PVCs are counted towards the PV limit as two volumes", - wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), + wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), }, }