mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-10-27 19:40:29 +00:00
Scheduler support for hugepages
This commit is contained in:
@@ -73,11 +73,12 @@ func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.Pe
|
||||
}
|
||||
|
||||
var (
|
||||
opaqueResourceA = v1helper.OpaqueIntResourceName("AAA")
|
||||
opaqueResourceB = v1helper.OpaqueIntResourceName("BBB")
|
||||
opaqueResourceA = v1helper.OpaqueIntResourceName("AAA")
|
||||
opaqueResourceB = v1helper.OpaqueIntResourceName("BBB")
|
||||
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
|
||||
)
|
||||
|
||||
func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.NodeResources {
|
||||
func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage, hugePageA int64) v1.NodeResources {
|
||||
return v1.NodeResources{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
@@ -86,11 +87,12 @@ func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v
|
||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
|
||||
opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage int64) v1.ResourceList {
|
||||
func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, storage, hugePageA int64) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
@@ -98,6 +100,7 @@ func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA, stora
|
||||
v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI),
|
||||
opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
|
||||
hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,10 +351,38 @@ func TestPodFitsResources(t *testing.T) {
|
||||
test: "opaque resource allocatable enforced for unknown resource for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceB, 1, 0, 0)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
test: "hugepages resource capacity enforced",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}),
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
fits: false,
|
||||
test: "hugepages resource capacity enforced for init container",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 10, 0, 5)},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 3}},
|
||||
schedulercache.Resource{MilliCPU: 1, Memory: 1, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, HugePages: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
|
||||
fits: false,
|
||||
test: "hugepages resource allocatable enforced for multiple containers",
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(hugePageResourceA, 6, 2, 5)},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range enoughPodsTests {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5, 20).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5, 20)}}
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
@@ -406,7 +437,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1, 0, 0)}}
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
@@ -464,7 +495,7 @@ func TestPodFitsResources(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range storagePodsTests {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5, 20).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5, 20)}}
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||
if err != nil {
|
||||
@@ -1889,7 +1920,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: true,
|
||||
wErr: nil,
|
||||
@@ -1901,7 +1932,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
@@ -1915,7 +1946,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
pod: &v1.Pod{},
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0, 0)}},
|
||||
node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0, 0, 0)}},
|
||||
fits: true,
|
||||
wErr: nil,
|
||||
test: "no resources/port/host requested always fits on GPU machine",
|
||||
@@ -1924,7 +1955,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 1})),
|
||||
node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0, 0)}},
|
||||
node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0, 0, 0)}},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceNvidiaGPU, 1, 1, 1)},
|
||||
@@ -1934,7 +1965,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}),
|
||||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 0})),
|
||||
node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0, 0)}},
|
||||
node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0, 0, 0)}},
|
||||
fits: true,
|
||||
wErr: nil,
|
||||
test: "enough GPU resource",
|
||||
@@ -1948,7 +1979,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
nodeInfo: schedulercache.NewNodeInfo(),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
@@ -1960,7 +1991,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
nodeInfo: schedulercache.NewNodeInfo(newPodWithPort(123)),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0)},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0, 0, 0)},
|
||||
},
|
||||
fits: false,
|
||||
wErr: nil,
|
||||
@@ -3252,7 +3283,7 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
|
||||
ImagePullPolicy: "Always",
|
||||
// at least one requirement -> burstable pod
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: makeAllocatableResources(100, 100, 100, 100, 0, 0),
|
||||
Requests: makeAllocatableResources(100, 100, 100, 100, 0, 0, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user