feat: remove CheckNodeMemoryPressure/DiskPressure/PIDPressure/Condition predicates

This commit is contained in:
draveness 2019-10-21 18:14:39 +08:00
parent 3ff376923d
commit 09f333940e
8 changed files with 3 additions and 484 deletions

View File

@ -19,7 +19,6 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/core/v1/helper/qos:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",

View File

@ -351,7 +351,6 @@ func GetPredicateMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulerno
predicateMetadata := &predicateMetadata{
pod: pod,
podBestEffort: isPodBestEffort(pod),
evenPodsSpreadMetadata: evenPodsSpreadMetadata,
podAffinityMetadata: podAffinityMetadata,
podFitsResourcesMetadata: getPodFitsResourcesMetedata(pod),

View File

@ -40,7 +40,6 @@ import (
volumehelpers "k8s.io/cloud-provider/volume/helpers"
csilibplugins "k8s.io/csi-translation-lib/plugins"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
@ -56,8 +55,6 @@ const (
MatchInterPodAffinityPred = "MatchInterPodAffinity"
// CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding.
CheckVolumeBindingPred = "CheckVolumeBinding"
// CheckNodeConditionPred defines the name of predicate CheckNodeCondition.
CheckNodeConditionPred = "CheckNodeCondition"
// GeneralPred defines the name of predicate GeneralPredicates.
GeneralPred = "GeneralPredicates"
// HostNamePred defines the name of predicate HostName.
@ -100,12 +97,6 @@ const (
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
// CheckNodeMemoryPressurePred defines the name of predicate CheckNodeMemoryPressure.
CheckNodeMemoryPressurePred = "CheckNodeMemoryPressure"
// CheckNodeDiskPressurePred defines the name of predicate CheckNodeDiskPressure.
CheckNodeDiskPressurePred = "CheckNodeDiskPressure"
// CheckNodePIDPressurePred defines the name of predicate CheckNodePIDPressure.
CheckNodePIDPressurePred = "CheckNodePIDPressure"
// EvenPodsSpreadPred defines the name of predicate EvenPodsSpread.
EvenPodsSpreadPred = "EvenPodsSpread"
@ -144,13 +135,13 @@ const (
// The order is based on the restrictiveness & complexity of predicates.
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
var (
predicatesOrdering = []string{CheckNodeConditionPred, CheckNodeUnschedulablePred,
predicatesOrdering = []string{CheckNodeUnschedulablePred,
GeneralPred, HostNamePred, PodFitsHostPortsPred,
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
CheckNodeMemoryPressurePred, CheckNodePIDPressurePred, CheckNodeDiskPressurePred, EvenPodsSpreadPred, MatchInterPodAffinityPred}
EvenPodsSpreadPred, MatchInterPodAffinityPred}
)
// Ordering returns the ordering of predicates.
@ -1639,80 +1630,6 @@ func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, f
return false, []PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
}
// isPodBestEffort checks if pod is scheduled with best-effort QoS.
func isPodBestEffort(pod *v1.Pod) bool {
return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort
}
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
// reporting memory pressure condition.
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
var podBestEffort bool
if predicateMeta, ok := meta.(*predicateMetadata); ok {
podBestEffort = predicateMeta.podBestEffort
} else {
// We couldn't parse metadata - fallback to computing it.
podBestEffort = isPodBestEffort(pod)
}
// pod is not BestEffort pod
if !podBestEffort {
return true, nil, nil
}
// check if node is under memory pressure
if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue {
return false, []PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
}
return true, nil, nil
}
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
// reporting disk pressure condition.
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// check if node is under disk pressure
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
return false, []PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
}
return true, nil, nil
}
// CheckNodePIDPressurePredicate checks if a pod can be scheduled on a node
// reporting pid pressure condition.
func CheckNodePIDPressurePredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
// check if node is under pid pressure
if nodeInfo.PIDPressureCondition() == v1.ConditionTrue {
return false, []PredicateFailureReason{ErrNodeUnderPIDPressure}, nil
}
return true, nil, nil
}
// CheckNodeConditionPredicate checks if a pod can be scheduled on a node reporting
// network unavailable and not ready condition. Only node conditions are accounted in this predicate.
func CheckNodeConditionPredicate(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
reasons := []PredicateFailureReason{}
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
node := nodeInfo.Node()
for _, cond := range node.Status.Conditions {
// We consider the node for scheduling only when its:
// - NodeReady condition status is ConditionTrue,
// - NodeNetworkUnavailable condition status is ConditionFalse.
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
reasons = append(reasons, ErrNodeNotReady)
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
reasons = append(reasons, ErrNodeNetworkUnavailable)
}
}
if node.Spec.Unschedulable {
reasons = append(reasons, ErrNodeUnschedulable)
}
return len(reasons) == 0, reasons, nil
}
// VolumeBindingChecker contains information to check a volume binding.
type VolumeBindingChecker struct {
binder *volumebinder.VolumeBinder

View File

@ -24,7 +24,7 @@ import (
"strings"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -4283,296 +4283,6 @@ func TestPodToleratesTaints(t *testing.T) {
}
}
func makeEmptyNodeInfo(node *v1.Node) *schedulernodeinfo.NodeInfo {
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(node)
return nodeInfo
}
func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
// specify best-effort pod
bestEffortPod := &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: "image",
ImagePullPolicy: "Always",
// no requirements -> best effort pod
Resources: v1.ResourceRequirements{},
},
},
},
}
// specify non-best-effort pod
nonBestEffortPod := &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: "image",
ImagePullPolicy: "Always",
// at least one requirement -> burstable pod
Resources: v1.ResourceRequirements{
Requests: makeAllocatableResources(100, 100, 100, 0, 0, 0),
},
},
},
},
}
// specify a node with no memory pressure condition on
noMemoryPressureNode := &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: "Ready",
Status: "True",
},
},
},
}
// specify a node with memory pressure condition on
memoryPressureNode := &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: "MemoryPressure",
Status: "True",
},
},
},
}
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
fits bool
name string
}{
{
pod: bestEffortPod,
nodeInfo: makeEmptyNodeInfo(noMemoryPressureNode),
fits: true,
name: "best-effort pod schedulable on node without memory pressure condition on",
},
{
pod: bestEffortPod,
nodeInfo: makeEmptyNodeInfo(memoryPressureNode),
fits: false,
name: "best-effort pod not schedulable on node with memory pressure condition on",
},
{
pod: nonBestEffortPod,
nodeInfo: makeEmptyNodeInfo(memoryPressureNode),
fits: true,
name: "non best-effort pod schedulable on node with memory pressure condition on",
},
{
pod: nonBestEffortPod,
nodeInfo: makeEmptyNodeInfo(noMemoryPressureNode),
fits: true,
name: "non best-effort pod schedulable on node without memory pressure condition on",
},
}
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderMemoryPressure}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fits, reasons, err := CheckNodeMemoryPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("expected %v got %v", test.fits, fits)
}
})
}
}
func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
pod := &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: "image",
ImagePullPolicy: "Always",
},
},
},
}
// specify a node with no disk pressure condition on
noPressureNode := &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: "Ready",
Status: "True",
},
},
},
}
// specify a node with pressure condition on
pressureNode := &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: "DiskPressure",
Status: "True",
},
},
},
}
tests := []struct {
pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo
fits bool
name string
}{
{
pod: pod,
nodeInfo: makeEmptyNodeInfo(noPressureNode),
fits: true,
name: "pod schedulable on node without pressure condition on",
},
{
pod: pod,
nodeInfo: makeEmptyNodeInfo(pressureNode),
fits: false,
name: "pod not schedulable on node with pressure condition on",
},
}
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderDiskPressure}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fits, reasons, err := CheckNodeDiskPressurePredicate(test.pod, GetPredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("expected %v got %v", test.fits, fits)
}
})
}
}
func TestPodSchedulesOnNodeWithPIDPressureCondition(t *testing.T) {
// specify a node with no pid pressure condition on
noPressureNode := &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
},
},
},
}
// specify a node with pressure condition on
pressureNode := &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodePIDPressure,
Status: v1.ConditionTrue,
},
},
},
}
tests := []struct {
nodeInfo *schedulernodeinfo.NodeInfo
fits bool
name string
}{
{
nodeInfo: makeEmptyNodeInfo(noPressureNode),
fits: true,
name: "pod schedulable on node without pressure condition on",
},
{
nodeInfo: makeEmptyNodeInfo(pressureNode),
fits: false,
name: "pod not schedulable on node with pressure condition on",
},
}
expectedFailureReasons := []PredicateFailureReason{ErrNodeUnderPIDPressure}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fits, reasons, err := CheckNodePIDPressurePredicate(&v1.Pod{}, GetPredicateMetadata(&v1.Pod{}, nil), test.nodeInfo)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("expected %v got %v", test.fits, fits)
}
})
}
}
func TestNodeConditionPredicate(t *testing.T) {
tests := []struct {
name string
node *v1.Node
schedulable bool
}{
{
name: "node1 considered",
node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}},
schedulable: true,
},
{
name: "node2 ignored - node not Ready",
node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}}},
schedulable: false,
},
{
name: "node3 ignored - node unschedulable",
node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node9"}, Spec: v1.NodeSpec{Unschedulable: true}},
schedulable: false,
},
{
name: "node4 considered",
node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node10"}, Spec: v1.NodeSpec{Unschedulable: false}},
schedulable: true,
},
{
name: "node5 considered",
node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node11"}},
schedulable: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeInfo := makeEmptyNodeInfo(test.node)
if fit, reasons, err := CheckNodeConditionPredicate(nil, nil, nodeInfo); fit != test.schedulable {
t.Errorf("%s: expected: %t, got %t; %+v, %v",
test.node.Name, test.schedulable, fit, reasons, err)
}
})
}
}
func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: pod, Namespace: "default"},

View File

@ -103,18 +103,6 @@ func init() {
// (e.g. kubelet and all schedulers)
scheduler.RegisterFitPredicate(predicates.GeneralPred, predicates.GeneralPredicates)
// Fit is determined by node memory pressure condition.
scheduler.RegisterFitPredicate(predicates.CheckNodeMemoryPressurePred, predicates.CheckNodeMemoryPressurePredicate)
// Fit is determined by node disk pressure condition.
scheduler.RegisterFitPredicate(predicates.CheckNodeDiskPressurePred, predicates.CheckNodeDiskPressurePredicate)
// Fit is determined by node pid pressure condition.
scheduler.RegisterFitPredicate(predicates.CheckNodePIDPressurePred, predicates.CheckNodePIDPressurePredicate)
// Fit is determined by node conditions: not ready, network unavailable or out of disk.
scheduler.RegisterFitPredicate(predicates.CheckNodeConditionPred, predicates.CheckNodeConditionPredicate)
// Fit is determined based on whether a pod can tolerate all of the node's taints
scheduler.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)

View File

@ -78,10 +78,6 @@ func TestApplyFeatureGates(t *testing.T) {
t.Fatalf("Error retrieving provider: %v", err)
}
if p.FitPredicateKeys.Has("CheckNodeCondition") {
t.Fatalf("Unexpected predicate: 'CheckNodeCondition'")
}
if !p.FitPredicateKeys.Has("PodToleratesNodeTaints") {
t.Fatalf("Failed to find predicate: 'PodToleratesNodeTaints'")
}
@ -100,10 +96,6 @@ func TestApplyFeatureGates(t *testing.T) {
if !p.FitPredicateKeys.Has("PodToleratesNodeTaints") {
t.Fatalf("Failed to find predicate: 'PodToleratesNodeTaints'")
}
if p.FitPredicateKeys.Has("CheckNodeCondition") {
t.Fatalf("Unexpected predicate: 'CheckNodeCondition'")
}
})
}
}

View File

@ -240,7 +240,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -259,7 +258,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -307,8 +305,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -329,8 +325,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -379,8 +373,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -411,8 +403,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -472,9 +462,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -505,9 +492,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -567,9 +551,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -601,9 +582,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -665,10 +643,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -702,10 +676,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodePIDPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -768,10 +738,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -816,10 +782,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodePIDPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -883,10 +845,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -932,10 +890,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodePIDPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -998,10 +952,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -1048,10 +998,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodePIDPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),
@ -1115,10 +1061,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodePIDPressure"},
{"name": "CheckNodeCondition"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MaxAzureDiskVolumeCount"},
@ -1169,10 +1111,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
}]
}`,
wantPredicates: sets.NewString(
"CheckNodeMemoryPressure",
"CheckNodeDiskPressure",
"CheckNodePIDPressure",
"CheckNodeCondition",
"TestServiceAffinity",
"TestLabelsPresence",
),

View File

@ -357,30 +357,6 @@ func (n *NodeInfo) SetTaints(newTaints []v1.Taint) {
n.taints = newTaints
}
// MemoryPressureCondition returns the memory pressure condition status on this node.
func (n *NodeInfo) MemoryPressureCondition() v1.ConditionStatus {
if n == nil {
return v1.ConditionUnknown
}
return n.memoryPressureCondition
}
// DiskPressureCondition returns the disk pressure condition status on this node.
func (n *NodeInfo) DiskPressureCondition() v1.ConditionStatus {
if n == nil {
return v1.ConditionUnknown
}
return n.diskPressureCondition
}
// PIDPressureCondition returns the pid pressure condition status on this node.
func (n *NodeInfo) PIDPressureCondition() v1.ConditionStatus {
if n == nil {
return v1.ConditionUnknown
}
return n.pidPressureCondition
}
// RequestedResource returns aggregated resource request of pods on this node.
func (n *NodeInfo) RequestedResource() Resource {
if n == nil {