mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #102925 from alculquicondor/fix-node-resources
Fix Node Resources plugins score when there are pods with no requests
This commit is contained in:
commit
f899bbe445
@ -82,12 +82,16 @@ func NewBalancedAllocation(_ runtime.Object, h framework.Handle, fts feature.Fea
|
||||
|
||||
// todo: use resource weights in the scorer function
|
||||
func balancedResourceScorer(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
|
||||
// This to find a node which has most balanced CPU, memory and volume usage.
|
||||
cpuFraction := fractionOfCapacity(requested[v1.ResourceCPU], allocable[v1.ResourceCPU])
|
||||
memoryFraction := fractionOfCapacity(requested[v1.ResourceMemory], allocable[v1.ResourceMemory])
|
||||
// This to find a node which has most balanced CPU, memory and volume usage.
|
||||
if cpuFraction >= 1 || memoryFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferred.
|
||||
return 0
|
||||
// fractions might be greater than 1 because pods with no requests get minimum
|
||||
// values.
|
||||
if cpuFraction > 1 {
|
||||
cpuFraction = 1
|
||||
}
|
||||
if memoryFraction > 1 {
|
||||
memoryFraction = 1
|
||||
}
|
||||
|
||||
// includeVolumes is only true when BalanceAttachedNodeVolumes feature gate is enabled (see resource_allocation.go#score())
|
||||
|
@ -214,6 +214,13 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
nonZeroContainer := v1.PodSpec{
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
nonZeroContainer1 := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
@ -269,6 +276,24 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 300 / 250 = 100%
|
||||
// Memory Fraction: 600 / 10000 = 60%
|
||||
// Node1 Score: MaxNodeScore - (100-60)*MaxNodeScore = 60
|
||||
// Node2 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 100 / 250 = 40%
|
||||
// Memory Fraction: 200 / 10000 = 20%
|
||||
// Node2 Score: MaxNodeScore - (40-20)*MaxNodeScore= 80
|
||||
pod: &v1.Pod{Spec: nonZeroContainer},
|
||||
nodes: []*v1.Node{makeNode("machine1", 250, 1000*1024*1024), makeNode("machine2", 250, 1000*1024*1024)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 60}, {Name: "machine2", Score: 80}},
|
||||
name: "no resources requested, pods scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: nonZeroContainer1},
|
||||
{Spec: nonZeroContainer1},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 6000 / 10000 = 60%
|
||||
@ -327,27 +352,17 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
|
||||
// CPU Fraction: 6000 / 6000 = 1
|
||||
// Memory Fraction: 0 / 10000 = 0
|
||||
// Node1 Score: 0
|
||||
// Node1 Score: MaxNodeScore - (1 - 0) * MaxNodeScore = 0
|
||||
// Node2 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
|
||||
// CPU Fraction: 6000 / 6000 = 1
|
||||
// Memory Fraction 5000 / 10000 = 50%
|
||||
// Node2 Score: 0
|
||||
// Node2 Score: MaxNodeScore - (1 - 0.5) * MaxNodeScore = 50
|
||||
pod: &v1.Pod{Spec: cpuOnly},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "requested resources exceed node capacity",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "zero node resources, pods scheduled with resources",
|
||||
nodes: []*v1.Node{makeNode("machine1", 6000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
name: "requested resources at node capacity",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
@ -399,7 +414,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
|
||||
t.Errorf("got score %v for host %v, expected %v", hostResult, test.nodes[i].Name, test.expectedList[i].Score)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -115,7 +115,9 @@ func mostRequestedScore(requested, capacity int64) int64 {
|
||||
return 0
|
||||
}
|
||||
if requested > capacity {
|
||||
return 0
|
||||
// `requested` might be greater than `capacity` because pods with no
|
||||
// requests get minimum values.
|
||||
requested = capacity
|
||||
}
|
||||
|
||||
return (requested * framework.MaxNodeScore) / capacity
|
||||
|
@ -111,6 +111,13 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
nonZeroContainer := v1.PodSpec{
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
nonZeroContainer1 := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
defaultResourceMostAllocatedSet := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
@ -203,18 +210,18 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
// CPU Score: 5000 > 4000 return 0
|
||||
// CPU Score: 5000 * MaxNodeScore / 5000 return 100
|
||||
// Memory Score: (9000 * MaxNodeScore) / 10000 = 90
|
||||
// Node1 Score: (0 + 90) / 2 = 45
|
||||
// Node1 Score: (100 + 90) / 2 = 95
|
||||
// Node2 scores on 0-MaxNodeScore scale
|
||||
// CPU Score: (5000 * MaxNodeScore) / 10000 = 50
|
||||
// Memory Score: 9000 > 8000 return 0
|
||||
// Node2 Score: (50 + 0) / 2 = 25
|
||||
// Memory Score: 8000 *MaxNodeScore / 8000 return 100
|
||||
// Node2 Score: (50 + 100) / 2 = 75
|
||||
pod: &v1.Pod{Spec: bigCPUAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 10000, 8000)},
|
||||
nodes: []*v1.Node{makeNode("machine1", 5000, 10000), makeNode("machine2", 10000, 9000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: defaultResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 45}, {Name: "machine2", Score: 25}},
|
||||
name: "resources requested with more than the node, pods scheduled with resources",
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 95}, {Name: "machine2", Score: 75}},
|
||||
name: "resources requested equal node capacity",
|
||||
},
|
||||
{
|
||||
// CPU Score: (3000 *100) / 4000 = 75
|
||||
@ -229,6 +236,25 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 58}, {Name: "machine2", Score: 50}},
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 300 / 250 = 100%
|
||||
// Memory Fraction: 600 / 10000 = 60%
|
||||
// Node1 Score: (100 + 60) / 2 = 80
|
||||
// Node2 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 100 / 250 = 40%
|
||||
// Memory Fraction: 200 / 10000 = 20%
|
||||
// Node2 Score: (20 + 40) / 2 = 30
|
||||
pod: &v1.Pod{Spec: nonZeroContainer},
|
||||
nodes: []*v1.Node{makeNode("machine1", 250, 1000*1024*1024), makeNode("machine2", 250, 1000*1024*1024)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 1}, {Name: "cpu", Weight: 1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 80}, {Name: "machine2", Score: 30}},
|
||||
name: "no resources requested, pods scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: nonZeroContainer1},
|
||||
{Spec: nonZeroContainer1},
|
||||
},
|
||||
},
|
||||
{
|
||||
// resource with negtive weight is not allowed
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
@ -298,7 +324,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
|
||||
t.Errorf("got score %v for host %v, expected %v", hostResult, test.nodes[i].Name, test.expectedList[i].Score)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user