diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 5b3c5b6997b..398cdf76631 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -584,7 +584,7 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No // We couldn't parse metadata - fallback to computing it. podRequest = GetResourceRequest(pod) } - if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && len(podRequest.OpaqueIntResources) == 0 { + if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.OpaqueIntResources) == 0 { return len(predicateFails) == 0, predicateFails, nil } diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index b7a5556640f..1bda90b9f90 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -435,7 +435,7 @@ func TestPodFitsResources(t *testing.T) { nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})), fits: false, - test: "due to init container scratch disk", + test: "due to container scratch disk", reasons: []algorithm.PredicateFailureReason{ NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10), NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20), @@ -453,7 +453,17 @@ func TestPodFitsResources(t *testing.T) { nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})), fits: false, - test: "request exceeds allocatable", + test: "request exceeds allocatable overlay storage resource", + reasons: []algorithm.PredicateFailureReason{ + NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20), + }, + }, + { + pod: newResourcePod(schedulercache.Resource{StorageOverlay: 18}), + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})), + fits: false, + test: "request exceeds allocatable overlay storage resource", reasons: []algorithm.PredicateFailureReason{ NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20), }, @@ -470,6 +480,18 @@ func TestPodFitsResources(t *testing.T) { NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20), }, }, + { + pod: newResourcePod(schedulercache.Resource{}), + emptyDirLimit: 25, + storageMedium: v1.StorageMediumDefault, + nodeInfo: schedulercache.NewNodeInfo( + newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})), + fits: false, + test: "storage scratchrequest exceeds allocatable", + reasons: []algorithm.PredicateFailureReason{ + NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20), + }, + }, { pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}), emptyDirLimit: 15, @@ -477,10 +499,7 @@ func TestPodFitsResources(t *testing.T) { nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})), fits: true, - test: "storage scratchrequest exceeds allocatable", - reasons: []algorithm.PredicateFailureReason{ - NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20), - }, + test: "pod fit with memory medium", }, }