Break nodevolumelimits Filter plugins dependency on predicates package

This commit is contained in:
Haosdent Huang 2020-01-06 02:30:40 +08:00
parent 862c8bf818
commit aae9a7f704
5 changed files with 21 additions and 21 deletions

View File

@ -55,8 +55,6 @@ var (
ErrNodeLabelPresenceViolated = NewPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels") ErrNodeLabelPresenceViolated = NewPredicateFailureError("CheckNodeLabelPresence", "node(s) didn't have the requested labels")
// ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error. // ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error.
ErrServiceAffinityViolated = NewPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity") ErrServiceAffinityViolated = NewPredicateFailureError("CheckServiceAffinity", "node(s) didn't match service affinity")
// ErrMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
ErrMaxVolumeCountExceeded = NewPredicateFailureError("MaxVolumeCount", "node(s) exceed max volume count")
// ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error. // ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error.
ErrNodeUnderMemoryPressure = NewPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure") ErrNodeUnderMemoryPressure = NewPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
// ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error. // ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error.

View File

@ -28,7 +28,6 @@ import (
storagelisters "k8s.io/client-go/listers/storage/v1" storagelisters "k8s.io/client-go/listers/storage/v1"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
@ -130,7 +129,7 @@ func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v
if ok { if ok {
currentVolumeCount := attachedVolumeCount[volumeLimitKey] currentVolumeCount := attachedVolumeCount[volumeLimitKey]
if currentVolumeCount+count > int(maxVolumeLimit) { if currentVolumeCount+count > int(maxVolumeLimit) {
return framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()) return framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded)
} }
} }
} }

View File

@ -267,7 +267,7 @@ func TestCSILimits(t *testing.T) {
driverNames: []string{ebsCSIDriverName}, driverNames: []string{ebsCSIDriverName},
test: "doesn't when node volume limit <= pods CSI volume", test: "doesn't when node volume limit <= pods CSI volume",
limitSource: "node", limitSource: "node",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: csiEBSOneVolPod, newPod: csiEBSOneVolPod,
@ -287,7 +287,7 @@ func TestCSILimits(t *testing.T) {
driverNames: []string{ebsCSIDriverName}, driverNames: []string{ebsCSIDriverName},
test: "count pending PVCs towards volume limit <= pods CSI volume", test: "count pending PVCs towards volume limit <= pods CSI volume",
limitSource: "node", limitSource: "node",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
// two same pending PVCs should be counted as 1 // two same pending PVCs should be counted as 1
{ {
@ -308,7 +308,7 @@ func TestCSILimits(t *testing.T) {
driverNames: []string{ebsCSIDriverName}, driverNames: []string{ebsCSIDriverName},
test: "should count PVCs with invalid PV name but valid SC", test: "should count PVCs with invalid PV name but valid SC",
limitSource: "node", limitSource: "node",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
// don't count a volume which has storageclass missing // don't count a volume which has storageclass missing
{ {
@ -329,7 +329,7 @@ func TestCSILimits(t *testing.T) {
driverNames: []string{ebsCSIDriverName, gceCSIDriverName}, driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
test: "count pvcs with the same type towards volume limit", test: "count pvcs with the same type towards volume limit",
limitSource: "node", limitSource: "node",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: gceTwoVolPod, newPod: gceTwoVolPod,
@ -350,7 +350,7 @@ func TestCSILimits(t *testing.T) {
migrationEnabled: true, migrationEnabled: true,
limitSource: "csinode", limitSource: "csinode",
test: "should count in-tree volumes if migration is enabled", test: "should count in-tree volumes if migration is enabled",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: pendingVolumePod, newPod: pendingVolumePod,
@ -361,7 +361,7 @@ func TestCSILimits(t *testing.T) {
migrationEnabled: true, migrationEnabled: true,
limitSource: "csinode", limitSource: "csinode",
test: "should count unbound in-tree volumes if migration is enabled", test: "should count unbound in-tree volumes if migration is enabled",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: inTreeOneVolPod, newPod: inTreeOneVolPod,
@ -413,7 +413,7 @@ func TestCSILimits(t *testing.T) {
migrationEnabled: true, migrationEnabled: true,
limitSource: "csinode", limitSource: "csinode",
test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)", test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: csiEBSOneVolPod, newPod: csiEBSOneVolPod,
@ -424,7 +424,7 @@ func TestCSILimits(t *testing.T) {
migrationEnabled: true, migrationEnabled: true,
limitSource: "csinode", limitSource: "csinode",
test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)", test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: csiEBSOneVolPod, newPod: csiEBSOneVolPod,

View File

@ -58,6 +58,9 @@ const (
azureDiskVolumeFilterType = "AzureDisk" azureDiskVolumeFilterType = "AzureDisk"
// cinderVolumeFilterType defines the filter name for cinderVolumeFilter. // cinderVolumeFilterType defines the filter name for cinderVolumeFilter.
cinderVolumeFilterType = "Cinder" cinderVolumeFilterType = "Cinder"
// ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count"
) )
// AzureDiskName is the name of the plugin used in the plugin registry and configurations. // AzureDiskName is the name of the plugin used in the plugin registry and configurations.
@ -254,7 +257,7 @@ func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod
if numExistingVolumes+numNewVolumes > maxAttachLimit { if numExistingVolumes+numNewVolumes > maxAttachLimit {
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
return framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()) return framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded)
} }
if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) { if nodeInfo != nil && nodeInfo.TransientInfo != nil && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) {
nodeInfo.TransientInfo.TransientLock.Lock() nodeInfo.TransientInfo.TransientLock.Lock()

View File

@ -421,7 +421,7 @@ func TestCinderLimits(t *testing.T) {
filterName: cinderVolumeFilterType, filterName: cinderVolumeFilterType,
maxVols: 2, maxVols: 2,
test: "not fit when node capacity < new pod's Cinder volumes", test: "not fit when node capacity < new pod's Cinder volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
} }
@ -661,7 +661,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 2, maxVols: 2,
test: "doesn't fit when node capacity < new pod's EBS volumes", test: "doesn't fit when node capacity < new pod's EBS volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: splitVolsPod, newPod: splitVolsPod,
@ -702,7 +702,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 3, maxVols: 3,
test: "existing pods' counts considers PVCs backed by EBS volumes", test: "existing pods' counts considers PVCs backed by EBS volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: twoVolPod, newPod: twoVolPod,
@ -727,7 +727,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 1, maxVols: 1,
test: "missing PVC is not counted towards the PV limit", test: "missing PVC is not counted towards the PV limit",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: onePVCPod(ebsVolumeFilterType), newPod: onePVCPod(ebsVolumeFilterType),
@ -769,7 +769,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 2, maxVols: 2,
test: "pod with missing PV is counted towards the PV limit", test: "pod with missing PV is counted towards the PV limit",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: onePVCPod(ebsVolumeFilterType), newPod: onePVCPod(ebsVolumeFilterType),
@ -794,7 +794,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 2, maxVols: 2,
test: "two pods missing different PVs are counted towards the PV limit twice", test: "two pods missing different PVs are counted towards the PV limit twice",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: onePVCPod(ebsVolumeFilterType), newPod: onePVCPod(ebsVolumeFilterType),
@ -803,7 +803,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 2, maxVols: 2,
test: "pod with unbound PVC is counted towards the PV limit", test: "pod with unbound PVC is counted towards the PV limit",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
{ {
newPod: onePVCPod(ebsVolumeFilterType), newPod: onePVCPod(ebsVolumeFilterType),
@ -828,7 +828,7 @@ func TestEBSLimits(t *testing.T) {
driverName: csilibplugins.AWSEBSInTreePluginName, driverName: csilibplugins.AWSEBSInTreePluginName,
maxVols: 2, maxVols: 2,
test: "two different unbound PVCs are counted towards the PV limit as two volumes", test: "two different unbound PVCs are counted towards the PV limit as two volumes",
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrMaxVolumeCountExceeded.GetReason()), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
}, },
} }