mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Optimize scheduler res scorer on non-requested extended res
This commit is contained in:
parent
042472d02d
commit
20f84b12a1
@ -95,7 +95,8 @@ func NewLeastAllocated(laArgs runtime.Object, h framework.Handle, fts feature.Fe
|
||||
func leastResourceScorer(resToWeightMap resourceToWeightMap) func(resourceToValueMap, resourceToValueMap) int64 {
|
||||
return func(requested, allocable resourceToValueMap) int64 {
|
||||
var nodeScore, weightSum int64
|
||||
for resource, weight := range resToWeightMap {
|
||||
for resource := range requested {
|
||||
weight := resToWeightMap[resource]
|
||||
resourceScore := leastRequestedScore(requested[resource], allocable[resource])
|
||||
nodeScore += resourceScore * weight
|
||||
weightSum += weight
|
||||
|
@ -100,6 +100,14 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
}
|
||||
extendedRes := "abc.com/xyz"
|
||||
extendedResourceLeastAllocatedSet := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
{Name: extendedRes, Weight: 1},
|
||||
}
|
||||
cpuMemoryAndExtendedRes := *cpuAndMemory.DeepCopy()
|
||||
cpuMemoryAndExtendedRes.Containers[0].Resources.Requests[v1.ResourceName(extendedRes)] = resource.MustParse("2")
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
@ -270,7 +278,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
name: "nothing scheduled, resources requested with different weight on CPU and memory, differently sized machines",
|
||||
},
|
||||
{
|
||||
// resource with negtive weight is not allowed
|
||||
// resource with negative weight is not allowed
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: -1}, {Name: "cpu", Weight: 1}}},
|
||||
@ -308,6 +316,40 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
}.ToAggregate(),
|
||||
name: "resource weight larger than MaxNodeScore",
|
||||
},
|
||||
{
|
||||
// Bypass extended resource if the pod does not request.
|
||||
// For both nodes: cpuScore and memScore are 50
|
||||
// Given that extended resource score are intentionally bypassed,
|
||||
// the final scores are:
|
||||
// - node1: (50 + 50) / 2 = 50
|
||||
// - node2: (50 + 50) / 2 = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{
|
||||
makeNode("machine1", 6000, 10000),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: extendedResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 50}},
|
||||
name: "bypass extended resource if the pod does not request",
|
||||
},
|
||||
{
|
||||
// Honor extended resource if the pod requests.
|
||||
// For both nodes: cpuScore and memScore are 50.
|
||||
// In terms of extended resource score:
|
||||
// - node1 get: 2 / 4 * 100 = 50
|
||||
// - node2 get: (10 - 2) / 10 * 100 = 80
|
||||
// So the final scores are:
|
||||
// - node1: (50 + 50 + 50) / 3 = 50
|
||||
// - node2: (50 + 50 + 80) / 3 = 60
|
||||
pod: &v1.Pod{Spec: cpuMemoryAndExtendedRes},
|
||||
nodes: []*v1.Node{
|
||||
makeNodeWithExtendedResource("machine1", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 10}),
|
||||
},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: extendedResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 60}},
|
||||
name: "honor extended resource if the pod requests",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -93,7 +93,8 @@ func NewMostAllocated(maArgs runtime.Object, h framework.Handle, fts feature.Fea
|
||||
func mostResourceScorer(resToWeightMap resourceToWeightMap) func(requested, allocable resourceToValueMap) int64 {
|
||||
return func(requested, allocable resourceToValueMap) int64 {
|
||||
var nodeScore, weightSum int64
|
||||
for resource, weight := range resToWeightMap {
|
||||
for resource := range requested {
|
||||
weight := resToWeightMap[resource]
|
||||
resourceScore := mostRequestedScore(requested[resource], allocable[resource])
|
||||
nodeScore += resourceScore * weight
|
||||
weightSum += weight
|
||||
@ -101,7 +102,7 @@ func mostResourceScorer(resToWeightMap resourceToWeightMap) func(requested, allo
|
||||
if weightSum == 0 {
|
||||
return 0
|
||||
}
|
||||
return (nodeScore / weightSum)
|
||||
return nodeScore / weightSum
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,6 +122,14 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
}
|
||||
extendedRes := "abc.com/xyz"
|
||||
extendedResourceMostAllocatedSet := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
{Name: extendedRes, Weight: 1},
|
||||
}
|
||||
cpuMemoryAndExtendedRes := *cpuAndMemory.DeepCopy()
|
||||
cpuMemoryAndExtendedRes.Containers[0].Resources.Requests[v1.ResourceName(extendedRes)] = resource.MustParse("2")
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
@ -294,6 +302,40 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
}.ToAggregate(),
|
||||
name: "resource weight larger than MaxNodeScore",
|
||||
},
|
||||
{
|
||||
// Bypass extended resource if the pod does not request.
|
||||
// For both nodes: cpuScore and memScore are 50
|
||||
// Given that extended resource score are intentionally bypassed,
|
||||
// the final scores are:
|
||||
// - node1: (50 + 50) / 2 = 50
|
||||
// - node2: (50 + 50) / 2 = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{
|
||||
makeNode("machine1", 6000, 10000),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: extendedResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 50}},
|
||||
name: "bypass extended resource if the pod does not request",
|
||||
},
|
||||
{
|
||||
// Honor extended resource if the pod requests.
|
||||
// For both nodes: cpuScore and memScore are 50.
|
||||
// In terms of extended resource score:
|
||||
// - node1 get: 2 / 4 * 100 = 50
|
||||
// - node2 get: 2 / 10 * 100 = 20
|
||||
// So the final scores are:
|
||||
// - node1: (50 + 50 + 50) / 3 = 50
|
||||
// - node2: (50 + 50 + 20) / 3 = 40
|
||||
pod: &v1.Pod{Spec: cpuMemoryAndExtendedRes},
|
||||
nodes: []*v1.Node{
|
||||
makeNodeWithExtendedResource("machine1", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 10}),
|
||||
},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: extendedResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 40}},
|
||||
name: "honor extended resource if the pod requests",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -125,7 +125,8 @@ func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.Fun
|
||||
}
|
||||
return func(requested, allocable resourceToValueMap) int64 {
|
||||
var nodeScore, weightSum int64
|
||||
for resource, weight := range resourceToWeightMap {
|
||||
for resource := range requested {
|
||||
weight := resourceToWeightMap[resource]
|
||||
resourceScore := resourceScoringFunction(requested[resource], allocable[resource])
|
||||
if resourceScore > 0 {
|
||||
nodeScore += resourceScore * weight
|
||||
|
@ -191,7 +191,6 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
extendedResource1 := map[string]int64{
|
||||
"intel.com/foo": 4,
|
||||
}
|
||||
|
||||
extendedResource2 := map[string]int64{
|
||||
"intel.com/foo": 8,
|
||||
}
|
||||
@ -231,27 +230,13 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
name string
|
||||
}{
|
||||
{
|
||||
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
// Node1 Score:
|
||||
// rawScoringFunction(used + requested / available)
|
||||
// resourceScoringFunction((0+0),8)
|
||||
// = 0/8 * maxUtilization = 0 = rawScoringFunction(0)
|
||||
// Node1 Score: 0
|
||||
// Node2 scores (used resources) on 0-MaxNodeScore scale
|
||||
// rawScoringFunction(used + requested / available)
|
||||
// resourceScoringFunction((0+0),4)
|
||||
// = 0/4 * maxUtilization = 0 = rawScoringFunction(0)
|
||||
// Node2 Score: 0
|
||||
|
||||
// Node1 Score = Node2 Score = 0 as the incoming Pod doesn't request extended resource.
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
},
|
||||
|
||||
{
|
||||
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
// Node1 Score:
|
||||
// rawScoringFunction(used + requested / available)
|
||||
@ -263,7 +248,6 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((0+2),4)
|
||||
// = 2/4 * maxUtilization = 50 = rawScoringFunction(50)
|
||||
// Node2 Score: 5
|
||||
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
|
||||
@ -272,9 +256,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
{Spec: noResources},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
// Node1 Score:
|
||||
// rawScoringFunction(used + requested / available)
|
||||
@ -286,7 +268,6 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((2+2),4)
|
||||
// = 4/4 * maxUtilization = maxUtilization = rawScoringFunction(maxUtilization)
|
||||
// Node2 Score: 10
|
||||
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
|
||||
@ -295,9 +276,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
{Spec: machine2Pod},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
// Node1 Score:
|
||||
// rawScoringFunction(used + requested / available)
|
||||
@ -309,7 +288,6 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((0+4),4)
|
||||
// = 4/4 * maxUtilization = maxUtilization = rawScoringFunction(maxUtilization)
|
||||
// Node2 Score: 10
|
||||
|
||||
pod: &v1.Pod{Spec: extendedResourcePod2},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
|
||||
|
@ -52,14 +52,17 @@ func (r *resourceAllocationScorer) score(
|
||||
if r.resourceToWeightMap == nil {
|
||||
return 0, framework.NewStatus(framework.Error, "resources not found")
|
||||
}
|
||||
requested := make(resourceToValueMap, len(r.resourceToWeightMap))
|
||||
allocatable := make(resourceToValueMap, len(r.resourceToWeightMap))
|
||||
requested := make(resourceToValueMap)
|
||||
allocatable := make(resourceToValueMap)
|
||||
for resource := range r.resourceToWeightMap {
|
||||
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource, r.enablePodOverhead)
|
||||
alloc, req := calculateResourceAllocatableRequest(nodeInfo, pod, resource, r.enablePodOverhead)
|
||||
if alloc != 0 {
|
||||
// Only fill the extended resource entry when it's non-zero.
|
||||
allocatable[resource], requested[resource] = alloc, req
|
||||
}
|
||||
}
|
||||
var score int64
|
||||
|
||||
score = r.scorer(requested, allocatable)
|
||||
score := r.scorer(requested, allocatable)
|
||||
|
||||
if klog.V(10).Enabled() {
|
||||
klog.Infof(
|
||||
@ -72,15 +75,22 @@ func (r *resourceAllocationScorer) score(
|
||||
return score, nil
|
||||
}
|
||||
|
||||
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values
|
||||
// calculateResourceAllocatableRequest returns 2 parameters:
|
||||
// - 1st param: quantity of allocatable resource on the node.
|
||||
// - 2nd param: aggregated quantity of requested resource on the node.
|
||||
// Note: if it's an extended resource, and the pod doesn't request it, (0, 0) is returned.
|
||||
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) (int64, int64) {
|
||||
podRequest := calculatePodResourceRequest(pod, resource, enablePodOverhead)
|
||||
// If it's an extended resource, and the pod doesn't request it. We return (0, 0)
|
||||
// as an implication to bypass scoring on this resource.
|
||||
if podRequest == 0 && schedutil.IsScalarResourceName(resource) {
|
||||
return 0, 0
|
||||
}
|
||||
switch resource {
|
||||
case v1.ResourceCPU:
|
||||
return nodeInfo.Allocatable.MilliCPU, (nodeInfo.NonZeroRequested.MilliCPU + podRequest)
|
||||
case v1.ResourceMemory:
|
||||
return nodeInfo.Allocatable.Memory, (nodeInfo.NonZeroRequested.Memory + podRequest)
|
||||
|
||||
case v1.ResourceEphemeralStorage:
|
||||
return nodeInfo.Allocatable.EphemeralStorage, (nodeInfo.Requested.EphemeralStorage + podRequest)
|
||||
default:
|
||||
@ -89,9 +99,7 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
|
||||
}
|
||||
}
|
||||
if klog.V(10).Enabled() {
|
||||
klog.Infof("requested resource %v not considered for node score calculation",
|
||||
resource,
|
||||
)
|
||||
klog.Infof("requested resource %v not considered for node score calculation", resource)
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user