mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 21:12:07 +00:00
Fix local isolation for pod requesting only overlay
This commit is contained in:
parent
5404948e7b
commit
761e079ed1
@ -584,7 +584,7 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
|
|||||||
// We couldn't parse metadata - fallback to computing it.
|
// We couldn't parse metadata - fallback to computing it.
|
||||||
podRequest = GetResourceRequest(pod)
|
podRequest = GetResourceRequest(pod)
|
||||||
}
|
}
|
||||||
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && len(podRequest.OpaqueIntResources) == 0 {
|
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && len(podRequest.OpaqueIntResources) == 0 {
|
||||||
return len(predicateFails) == 0, predicateFails, nil
|
return len(predicateFails) == 0, predicateFails, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -435,7 +435,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
nodeInfo: schedulercache.NewNodeInfo(
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})),
|
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})),
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "due to init container scratch disk",
|
test: "due to container scratch disk",
|
||||||
reasons: []algorithm.PredicateFailureReason{
|
reasons: []algorithm.PredicateFailureReason{
|
||||||
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
|
||||||
NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20),
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20),
|
||||||
@ -458,6 +458,16 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
pod: newResourcePod(schedulercache.Resource{StorageOverlay: 18}),
|
||||||
|
nodeInfo: schedulercache.NewNodeInfo(
|
||||||
|
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
|
||||||
|
fits: false,
|
||||||
|
test: "request exceeds allocatable",
|
||||||
|
reasons: []algorithm.PredicateFailureReason{
|
||||||
|
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
|
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
|
||||||
emptyDirLimit: 15,
|
emptyDirLimit: 15,
|
||||||
|
Loading…
Reference in New Issue
Block a user