mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #62934 from wackxu/scto
Automatic merge from submit-queue (batch tested with PRs 62354, 62934, 63502). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Refactor GetResourceRequest and GetResourceLimit **What this PR does / why we need it**: **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: /assign @bsalamat **Release note**: ```release-note NONE ```
This commit is contained in:
commit
aea6addb82
@ -672,29 +672,7 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
||||
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for rName, rQuantity := range container.Resources.Requests {
|
||||
switch rName {
|
||||
case v1.ResourceMemory:
|
||||
if mem := rQuantity.Value(); mem > result.Memory {
|
||||
result.Memory = mem
|
||||
}
|
||||
case v1.ResourceEphemeralStorage:
|
||||
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage {
|
||||
result.EphemeralStorage = ephemeralStorage
|
||||
}
|
||||
case v1.ResourceCPU:
|
||||
if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
|
||||
result.MilliCPU = cpu
|
||||
}
|
||||
default:
|
||||
if v1helper.IsScalarResourceName(rName) {
|
||||
value := rQuantity.Value()
|
||||
if value > result.ScalarResources[rName] {
|
||||
result.SetScalar(rName, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result.SetMaxResource(container.Resources.Requests)
|
||||
}
|
||||
|
||||
return result
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
|
||||
@ -93,31 +92,7 @@ func getResourceLimits(pod *v1.Pod) *schedulercache.Resource {
|
||||
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for rName, rQuantity := range container.Resources.Limits {
|
||||
switch rName {
|
||||
case v1.ResourceMemory:
|
||||
if mem := rQuantity.Value(); mem > result.Memory {
|
||||
result.Memory = mem
|
||||
}
|
||||
case v1.ResourceCPU:
|
||||
if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
|
||||
result.MilliCPU = cpu
|
||||
}
|
||||
// keeping these resources though score computation in other priority functions and in this
|
||||
// are only computed based on cpu and memory only.
|
||||
case v1.ResourceEphemeralStorage:
|
||||
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage {
|
||||
result.EphemeralStorage = ephemeralStorage
|
||||
}
|
||||
default:
|
||||
if v1helper.IsScalarResourceName(rName) {
|
||||
value := rQuantity.Value()
|
||||
if value > result.ScalarResources[rName] {
|
||||
result.SetScalar(rName, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result.SetMaxResource(container.Resources.Limits)
|
||||
}
|
||||
|
||||
return result
|
||||
|
@ -219,6 +219,37 @@ func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
|
||||
r.ScalarResources[name] = quantity
|
||||
}
|
||||
|
||||
// SetMaxResource compares with ResourceList and takes max value for each Resource.
|
||||
func (r *Resource) SetMaxResource(rl v1.ResourceList) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for rName, rQuantity := range rl {
|
||||
switch rName {
|
||||
case v1.ResourceMemory:
|
||||
if mem := rQuantity.Value(); mem > r.Memory {
|
||||
r.Memory = mem
|
||||
}
|
||||
case v1.ResourceCPU:
|
||||
if cpu := rQuantity.MilliValue(); cpu > r.MilliCPU {
|
||||
r.MilliCPU = cpu
|
||||
}
|
||||
case v1.ResourceEphemeralStorage:
|
||||
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
|
||||
r.EphemeralStorage = ephemeralStorage
|
||||
}
|
||||
default:
|
||||
if v1helper.IsScalarResourceName(rName) {
|
||||
value := rQuantity.Value()
|
||||
if value > r.ScalarResources[rName] {
|
||||
r.SetScalar(rName, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewNodeInfo returns a ready to use empty NodeInfo object.
|
||||
// If any pods are given in arguments, their information will be aggregated in
|
||||
// the returned object.
|
||||
|
@ -185,6 +185,56 @@ func TestResourceAddScalar(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetMaxResource(t *testing.T) {
|
||||
tests := []struct {
|
||||
resource *Resource
|
||||
resourceList v1.ResourceList
|
||||
expected *Resource
|
||||
}{
|
||||
{
|
||||
resource: &Resource{},
|
||||
resourceList: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
expected: &Resource{
|
||||
MilliCPU: 4,
|
||||
Memory: 2000,
|
||||
EphemeralStorage: 5000,
|
||||
},
|
||||
},
|
||||
{
|
||||
resource: &Resource{
|
||||
MilliCPU: 4,
|
||||
Memory: 4000,
|
||||
EphemeralStorage: 5000,
|
||||
ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 1, "hugepages-test": 2},
|
||||
},
|
||||
resourceList: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(7000, resource.BinarySI),
|
||||
"scalar.test/scalar1": *resource.NewQuantity(4, resource.DecimalSI),
|
||||
v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(5, resource.BinarySI),
|
||||
},
|
||||
expected: &Resource{
|
||||
MilliCPU: 4,
|
||||
Memory: 4000,
|
||||
EphemeralStorage: 7000,
|
||||
ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 4, "hugepages-test": 5},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test.resource.SetMaxResource(test.resourceList)
|
||||
if !reflect.DeepEqual(test.expected, test.resource) {
|
||||
t.Errorf("expected: %#v, got: %#v", test.expected, test.resource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNodeInfo(t *testing.T) {
|
||||
nodeName := "test-node"
|
||||
pods := []*v1.Pod{
|
||||
|
Loading…
Reference in New Issue
Block a user