From adea3733a8c077a1fb4371df32a7bb90fb1bbe73 Mon Sep 17 00:00:00 2001 From: wackxu Date: Sat, 21 Apr 2018 16:13:58 +0800 Subject: [PATCH] add SetMaxResource for Resource --- .../algorithm/predicates/predicates.go | 24 +-------- .../algorithm/priorities/resource_limits.go | 27 +--------- pkg/scheduler/schedulercache/node_info.go | 31 ++++++++++++ .../schedulercache/node_info_test.go | 50 +++++++++++++++++++ 4 files changed, 83 insertions(+), 49 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 1055a55528d..101f3baf758 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -668,29 +668,7 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { // take max_resource(sum_pod, any_init_container) for _, container := range pod.Spec.InitContainers { - for rName, rQuantity := range container.Resources.Requests { - switch rName { - case v1.ResourceMemory: - if mem := rQuantity.Value(); mem > result.Memory { - result.Memory = mem - } - case v1.ResourceEphemeralStorage: - if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage { - result.EphemeralStorage = ephemeralStorage - } - case v1.ResourceCPU: - if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { - result.MilliCPU = cpu - } - default: - if v1helper.IsScalarResourceName(rName) { - value := rQuantity.Value() - if value > result.ScalarResources[rName] { - result.SetScalar(rName, value) - } - } - } - } + result.SetMaxResource(container.Resources.Requests) } return result diff --git a/pkg/scheduler/algorithm/priorities/resource_limits.go b/pkg/scheduler/algorithm/priorities/resource_limits.go index 6f440d8f1dd..5a02ff8dccf 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits.go @@ -20,7 +20,6 @@ import ( "fmt" "k8s.io/api/core/v1" - v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/scheduler/schedulercache" @@ -93,31 +92,7 @@ func getResourceLimits(pod *v1.Pod) *schedulercache.Resource { // take max_resource(sum_pod, any_init_container) for _, container := range pod.Spec.InitContainers { - for rName, rQuantity := range container.Resources.Limits { - switch rName { - case v1.ResourceMemory: - if mem := rQuantity.Value(); mem > result.Memory { - result.Memory = mem - } - case v1.ResourceCPU: - if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { - result.MilliCPU = cpu - } - // keeping these resources though score computation in other priority functions and in this - // are only computed based on cpu and memory only. - case v1.ResourceEphemeralStorage: - if ephemeralStorage := rQuantity.Value(); ephemeralStorage > result.EphemeralStorage { - result.EphemeralStorage = ephemeralStorage - } - default: - if v1helper.IsScalarResourceName(rName) { - value := rQuantity.Value() - if value > result.ScalarResources[rName] { - result.SetScalar(rName, value) - } - } - } - } + result.SetMaxResource(container.Resources.Limits) } return result diff --git a/pkg/scheduler/schedulercache/node_info.go b/pkg/scheduler/schedulercache/node_info.go index a98e9cdcabc..9ed6e0dc4e9 100644 --- a/pkg/scheduler/schedulercache/node_info.go +++ b/pkg/scheduler/schedulercache/node_info.go @@ -202,6 +202,37 @@ func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) { r.ScalarResources[name] = quantity } +// SetMaxResource compares with ResourceList and takes max value for each Resource. +func (r *Resource) SetMaxResource(rl v1.ResourceList) { + if r == nil { + return + } + + for rName, rQuantity := range rl { + switch rName { + case v1.ResourceMemory: + if mem := rQuantity.Value(); mem > r.Memory { + r.Memory = mem + } + case v1.ResourceCPU: + if cpu := rQuantity.MilliValue(); cpu > r.MilliCPU { + r.MilliCPU = cpu + } + case v1.ResourceEphemeralStorage: + if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage { + r.EphemeralStorage = ephemeralStorage + } + default: + if v1helper.IsScalarResourceName(rName) { + value := rQuantity.Value() + if value > r.ScalarResources[rName] { + r.SetScalar(rName, value) + } + } + } + } +} + // NewNodeInfo returns a ready to use empty NodeInfo object. // If any pods are given in arguments, their information will be aggregated in // the returned object. diff --git a/pkg/scheduler/schedulercache/node_info_test.go b/pkg/scheduler/schedulercache/node_info_test.go index 40a9e5afbac..ea47a965b3f 100644 --- a/pkg/scheduler/schedulercache/node_info_test.go +++ b/pkg/scheduler/schedulercache/node_info_test.go @@ -185,6 +185,56 @@ func TestResourceAddScalar(t *testing.T) { } } +func TestSetMaxResource(t *testing.T) { + tests := []struct { + resource *Resource + resourceList v1.ResourceList + expected *Resource + }{ + { + resource: &Resource{}, + resourceList: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(4, -3), + v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI), + }, + expected: &Resource{ + MilliCPU: 4, + Memory: 2000, + EphemeralStorage: 5000, + }, + }, + { + resource: &Resource{ + MilliCPU: 4, + Memory: 4000, + EphemeralStorage: 5000, + ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 1, "hugepages-test": 2}, + }, + resourceList: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewScaledQuantity(4, -3), + v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI), + v1.ResourceEphemeralStorage: *resource.NewQuantity(7000, resource.BinarySI), + "scalar.test/scalar1": *resource.NewQuantity(4, resource.DecimalSI), + v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(5, resource.BinarySI), + }, + expected: &Resource{ + MilliCPU: 4, + Memory: 4000, + EphemeralStorage: 7000, + ScalarResources: map[v1.ResourceName]int64{"scalar.test/scalar1": 4, "hugepages-test": 5}, + }, + }, + } + + for _, test := range tests { + test.resource.SetMaxResource(test.resourceList) + if !reflect.DeepEqual(test.expected, test.resource) { + t.Errorf("expected: %#v, got: %#v", test.expected, test.resource) + } + } +} + func TestNewNodeInfo(t *testing.T) { nodeName := "test-node" pods := []*v1.Pod{