From 9d2d37be270224fc23ebd4ce9b4bff4578d70174 Mon Sep 17 00:00:00 2001 From: Eric Ernst Date: Fri, 2 Aug 2019 14:11:19 -0700 Subject: [PATCH 1/4] resource: modify resource helpers for better reuse Update GetResoureqRequest function to utilize a new helper, GetResourceRequestQuantity. This logic was duplicated in a couple of areas in the K/K codebase, so consolidating for better test coverage. Signed-off-by: Eric Ernst --- pkg/api/v1/resource/helpers.go | 61 +++++++++++++++++++++------------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index b983d924843..1c18d56af0c 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -23,6 +23,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" ) // addResourceList adds the resources in newList to list @@ -68,34 +70,47 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) { return } -// GetResourceRequest finds and returns the request for a specific resource. +// GetResourceRequestQuantity finds and returns the request quantity for a specific resource. +func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity { + + requestQuantity := resource.Quantity{Format: resource.BinarySI} + + if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + // if the local storage capacity isolation feature gate is disabled, pods request 0 disk + return requestQuantity + } + + for _, container := range pod.Spec.Containers { + if rQuantity, ok := container.Resources.Requests[resourceName]; ok { + requestQuantity.Add(rQuantity) + } + } + + for _, container := range pod.Spec.InitContainers { + if rQuantity, ok := container.Resources.Requests[resourceName]; ok { + if requestQuantity.Cmp(rQuantity) < 0 { + requestQuantity = rQuantity + } + } + } + + return requestQuantity +} + +// GetResourceRequest finds and returns the request value for a specific resource. func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { if resource == v1.ResourcePods { return 1 } - totalResources := int64(0) - for _, container := range pod.Spec.Containers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == v1.ResourceCPU { - totalResources += rQuantity.MilliValue() - } else { - totalResources += rQuantity.Value() - } - } + + requestQuantity := GetResourceRequestQuantity(pod, resource) + + if resource == v1.ResourceCPU { + return requestQuantity.MilliValue() + } else { + return requestQuantity.Value() } - // take max_resource(sum_pod, any_init_container) - for _, container := range pod.Spec.InitContainers { - if rQuantity, ok := container.Resources.Requests[resource]; ok { - if resource == v1.ResourceCPU { - if rQuantity.MilliValue() > totalResources { - totalResources = rQuantity.MilliValue() - } - } else if rQuantity.Value() > totalResources { - totalResources = rQuantity.Value() - } - } - } - return totalResources + } // ExtractResourceValueByContainerName extracts the value of a resource From 476c1c7a2bfa5c6f9637aaf2fa98b67249ee8ba8 Mon Sep 17 00:00:00 2001 From: Eric Ernst Date: Fri, 2 Aug 2019 11:07:24 -0700 Subject: [PATCH 2/4] kube-eviction: use common resource summation functions Utilize resource helpers' GetResourceRequestQuantity instead of duplicating the logic here. Signed-off-by: Eric Ernst --- pkg/kubelet/eviction/helpers.go | 54 ++++++--------------------------- 1 file changed, 9 insertions(+), 45 deletions(-) diff --git a/pkg/kubelet/eviction/helpers.go b/pkg/kubelet/eviction/helpers.go index 70b3714e4d8..dfdb8ce3b60 100644 --- a/pkg/kubelet/eviction/helpers.go +++ b/pkg/kubelet/eviction/helpers.go @@ -25,9 +25,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog" - "k8s.io/kubernetes/pkg/features" + v1resource "k8s.io/kubernetes/pkg/api/v1/resource" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" @@ -536,8 +535,8 @@ func exceedMemoryRequests(stats statsFunc) cmpFunc { p1Memory := memoryUsage(p1Stats.Memory) p2Memory := memoryUsage(p2Stats.Memory) - p1ExceedsRequests := p1Memory.Cmp(podRequest(p1, v1.ResourceMemory)) == 1 - p2ExceedsRequests := p2Memory.Cmp(podRequest(p2, v1.ResourceMemory)) == 1 + p1ExceedsRequests := p1Memory.Cmp(v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory)) == 1 + p2ExceedsRequests := p2Memory.Cmp(v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory)) == 1 // prioritize evicting the pod which exceeds its requests return cmpBool(p1ExceedsRequests, p2ExceedsRequests) } @@ -555,11 +554,11 @@ func memory(stats statsFunc) cmpFunc { // adjust p1, p2 usage relative to the request (if any) p1Memory := memoryUsage(p1Stats.Memory) - p1Request := podRequest(p1, v1.ResourceMemory) + p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceMemory) p1Memory.Sub(p1Request) p2Memory := memoryUsage(p2Stats.Memory) - p2Request := podRequest(p2, v1.ResourceMemory) + p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceMemory) p2Memory.Sub(p2Request) // prioritize evicting the pod which has the larger consumption of memory @@ -567,41 +566,6 @@ func memory(stats statsFunc) cmpFunc { } } -// podRequest returns the total resource request of a pod which is the -// max(max of init container requests, sum of container requests) -func podRequest(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity { - containerValue := resource.Quantity{Format: resource.BinarySI} - if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { - // if the local storage capacity isolation feature gate is disabled, pods request 0 disk - return containerValue - } - for i := range pod.Spec.Containers { - switch resourceName { - case v1.ResourceMemory: - containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.Memory()) - case v1.ResourceEphemeralStorage: - containerValue.Add(*pod.Spec.Containers[i].Resources.Requests.StorageEphemeral()) - } - } - initValue := resource.Quantity{Format: resource.BinarySI} - for i := range pod.Spec.InitContainers { - switch resourceName { - case v1.ResourceMemory: - if initValue.Cmp(*pod.Spec.InitContainers[i].Resources.Requests.Memory()) < 0 { - initValue = *pod.Spec.InitContainers[i].Resources.Requests.Memory() - } - case v1.ResourceEphemeralStorage: - if initValue.Cmp(*pod.Spec.InitContainers[i].Resources.Requests.StorageEphemeral()) < 0 { - initValue = *pod.Spec.InitContainers[i].Resources.Requests.StorageEphemeral() - } - } - } - if containerValue.Cmp(initValue) > 0 { - return containerValue - } - return initValue -} - // exceedDiskRequests compares whether or not pods' disk usage exceeds their requests func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc { return func(p1, p2 *v1.Pod) int { @@ -621,8 +585,8 @@ func exceedDiskRequests(stats statsFunc, fsStatsToMeasure []fsStatsType, diskRes p1Disk := p1Usage[diskResource] p2Disk := p2Usage[diskResource] - p1ExceedsRequests := p1Disk.Cmp(podRequest(p1, diskResource)) == 1 - p2ExceedsRequests := p2Disk.Cmp(podRequest(p2, diskResource)) == 1 + p1ExceedsRequests := p1Disk.Cmp(v1resource.GetResourceRequestQuantity(p1, diskResource)) == 1 + p2ExceedsRequests := p2Disk.Cmp(v1resource.GetResourceRequestQuantity(p2, diskResource)) == 1 // prioritize evicting the pod which exceeds its requests return cmpBool(p1ExceedsRequests, p2ExceedsRequests) } @@ -647,9 +611,9 @@ func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.Resou // adjust p1, p2 usage relative to the request (if any) p1Disk := p1Usage[diskResource] p2Disk := p2Usage[diskResource] - p1Request := podRequest(p1, v1.ResourceEphemeralStorage) + p1Request := v1resource.GetResourceRequestQuantity(p1, v1.ResourceEphemeralStorage) p1Disk.Sub(p1Request) - p2Request := podRequest(p2, v1.ResourceEphemeralStorage) + p2Request := v1resource.GetResourceRequestQuantity(p2, v1.ResourceEphemeralStorage) p2Disk.Sub(p2Request) // prioritize evicting the pod which has the larger consumption of disk return p2Disk.Cmp(p1Disk) From f137a9cdb9686cbeea1aea1d0851fa587f178d82 Mon Sep 17 00:00:00 2001 From: Eric Ernst Date: Tue, 13 Aug 2019 15:49:54 -0700 Subject: [PATCH 3/4] resource: cleanup helpers_test.go No test content changes - just improvements for readability Signed-off-by: Eric Ernst --- pkg/api/v1/resource/helpers_test.go | 67 ++++++++++++++++------------- 1 file changed, 36 insertions(+), 31 deletions(-) diff --git a/pkg/api/v1/resource/helpers_test.go b/pkg/api/v1/resource/helpers_test.go index 30697045bfb..5dc3d4776ee 100644 --- a/pkg/api/v1/resource/helpers_test.go +++ b/pkg/api/v1/resource/helpers_test.go @@ -71,12 +71,12 @@ func TestGetResourceRequest(t *testing.T) { expectedError error }{ { - pod: getPod("foo", "9", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "9"}), res: v1.ResourceCPU, expectedValue: 9000, }, { - pod: getPod("foo", "", "", "90Mi", ""), + pod: getPod("foo", podResources{memoryRequest: "90Mi"}), res: v1.ResourceMemory, expectedValue: 94371840, }, @@ -101,7 +101,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "limits.cpu", }, cName: "foo", - pod: getPod("foo", "", "9", "", ""), + pod: getPod("foo", podResources{cpuLimit: "9"}), expectedValue: "9", }, { @@ -109,7 +109,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.cpu", }, cName: "foo", - pod: getPod("foo", "", "", "", ""), + pod: getPod("foo", podResources{}), expectedValue: "0", }, { @@ -117,7 +117,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.cpu", }, cName: "foo", - pod: getPod("foo", "8", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "8"}), expectedValue: "8", }, { @@ -125,7 +125,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.cpu", }, cName: "foo", - pod: getPod("foo", "100m", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "100m"}), expectedValue: "1", }, { @@ -134,7 +134,7 @@ func TestExtractResourceValue(t *testing.T) { Divisor: resource.MustParse("100m"), }, cName: "foo", - pod: getPod("foo", "1200m", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "1200m"}), expectedValue: "12", }, { @@ -142,7 +142,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.memory", }, cName: "foo", - pod: getPod("foo", "", "", "100Mi", ""), + pod: getPod("foo", podResources{memoryRequest: "100Mi"}), expectedValue: "104857600", }, { @@ -151,7 +151,7 @@ func TestExtractResourceValue(t *testing.T) { Divisor: resource.MustParse("1Mi"), }, cName: "foo", - pod: getPod("foo", "", "", "100Mi", "1Gi"), + pod: getPod("foo", podResources{memoryRequest: "100Mi", memoryLimit: "1Gi"}), expectedValue: "100", }, { @@ -159,7 +159,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "limits.memory", }, cName: "foo", - pod: getPod("foo", "", "", "10Mi", "100Mi"), + pod: getPod("foo", podResources{memoryRequest: "10Mi", memoryLimit: "100Mi"}), expectedValue: "104857600", }, { @@ -167,7 +167,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "limits.cpu", }, cName: "init-foo", - pod: getPod("foo", "", "9", "", ""), + pod: getPod("foo", podResources{cpuLimit: "9"}), expectedValue: "9", }, { @@ -175,7 +175,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.cpu", }, cName: "init-foo", - pod: getPod("foo", "", "", "", ""), + pod: getPod("foo", podResources{}), expectedValue: "0", }, { @@ -183,7 +183,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.cpu", }, cName: "init-foo", - pod: getPod("foo", "8", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "8"}), expectedValue: "8", }, { @@ -191,7 +191,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.cpu", }, cName: "init-foo", - pod: getPod("foo", "100m", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "100m"}), expectedValue: "1", }, { @@ -200,7 +200,7 @@ func TestExtractResourceValue(t *testing.T) { Divisor: resource.MustParse("100m"), }, cName: "init-foo", - pod: getPod("foo", "1200m", "", "", ""), + pod: getPod("foo", podResources{cpuRequest: "1200m"}), expectedValue: "12", }, { @@ -208,7 +208,7 @@ func TestExtractResourceValue(t *testing.T) { Resource: "requests.memory", }, cName: "init-foo", - pod: getPod("foo", "", "", "100Mi", ""), + pod: getPod("foo", podResources{memoryRequest: "100Mi"}), expectedValue: "104857600", }, { @@ -217,15 +217,16 @@ func TestExtractResourceValue(t *testing.T) { Divisor: resource.MustParse("1Mi"), }, cName: "init-foo", - pod: getPod("foo", "", "", "100Mi", "1Gi"), + pod: getPod("foo", podResources{memoryRequest: "100Mi", memoryLimit: "1Gi"}), expectedValue: "100", }, { fs: &v1.ResourceFieldSelector{ Resource: "limits.memory", }, - cName: "init-foo", - pod: getPod("foo", "", "", "10Mi", "100Mi"), + cName: "init-foo", + pod: getPod("foo", podResources{memoryRequest: "10Mi", memoryLimit: "100Mi"}), + expectedValue: "104857600", }, } @@ -241,35 +242,39 @@ func TestExtractResourceValue(t *testing.T) { } } -func getPod(cname, cpuRequest, cpuLimit, memoryRequest, memoryLimit string) *v1.Pod { - resources := v1.ResourceRequirements{ +type podResources struct { + cpuRequest, cpuLimit, memoryRequest, memoryLimit, cpuOverhead, memoryOverhead string +} + +func getPod(cname string, resources podResources) *v1.Pod { + r := v1.ResourceRequirements{ Limits: make(v1.ResourceList), Requests: make(v1.ResourceList), } - if cpuLimit != "" { - resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit) + if resources.cpuLimit != "" { + r.Limits[v1.ResourceCPU] = resource.MustParse(resources.cpuLimit) } - if memoryLimit != "" { - resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit) + if resources.memoryLimit != "" { + r.Limits[v1.ResourceMemory] = resource.MustParse(resources.memoryLimit) } - if cpuRequest != "" { - resources.Requests[v1.ResourceCPU] = resource.MustParse(cpuRequest) + if resources.cpuRequest != "" { + r.Requests[v1.ResourceCPU] = resource.MustParse(resources.cpuRequest) } - if memoryRequest != "" { - resources.Requests[v1.ResourceMemory] = resource.MustParse(memoryRequest) + if resources.memoryRequest != "" { + r.Requests[v1.ResourceMemory] = resource.MustParse(resources.memoryRequest) } return &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: cname, - Resources: resources, + Resources: r, }, }, InitContainers: []v1.Container{ { Name: "init-" + cname, - Resources: resources, + Resources: r, }, }, }, From 80ee072b85fb8616b5209cfcafc291ccbf868657 Mon Sep 17 00:00:00 2001 From: Eric Ernst Date: Thu, 20 Jun 2019 14:50:09 -0700 Subject: [PATCH 4/4] pod-overhead: utilize pod overhead for cgroup sizing, eviction handling Pod and burstable QoS cgroups should take overhead of running a sandbox into account if the PodOverhead feature is enabled. These helper functions are utilized by Kubelet for sizing the pod and burstable QoS cgroups. Pod overhead is added to resource requests, regardless of the initial request values. A particular resource pod overhead is only added to a resource limit if a non-zero limit already existed. This commit updates eviction handling to also take Pod Overhead into account (if the feature is enabled). Signed-off-by: Eric Ernst --- pkg/api/v1/resource/BUILD | 6 ++ pkg/api/v1/resource/helpers.go | 45 ++++++++-- pkg/api/v1/resource/helpers_test.go | 125 ++++++++++++++++++++++++++-- 3 files changed, 163 insertions(+), 13 deletions(-) diff --git a/pkg/api/v1/resource/BUILD b/pkg/api/v1/resource/BUILD index 7cbc50774db..dca77a4d625 100644 --- a/pkg/api/v1/resource/BUILD +++ b/pkg/api/v1/resource/BUILD @@ -11,8 +11,12 @@ go_test( srcs = ["helpers_test.go"], embed = [":go_default_library"], deps = [ + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", ], ) @@ -22,8 +26,10 @@ go_library( srcs = ["helpers.go"], importpath = "k8s.io/kubernetes/pkg/api/v1/resource", deps = [ + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index 1c18d56af0c..60a7478da00 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -28,8 +28,8 @@ import ( ) // addResourceList adds the resources in newList to list -func addResourceList(list, new v1.ResourceList) { - for name, quantity := range new { +func addResourceList(list, newList v1.ResourceList) { + for name, quantity := range newList { if value, ok := list[name]; !ok { list[name] = *quantity.Copy() } else { @@ -55,7 +55,9 @@ func maxResourceList(list, new v1.ResourceList) { } // PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. +// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the +// total container resource requests and to the total container limits which have a +// non-zero quantity. func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) { reqs, limits = v1.ResourceList{}, v1.ResourceList{} for _, container := range pod.Spec.Containers { @@ -67,13 +69,35 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) { maxResourceList(reqs, container.Resources.Requests) maxResourceList(limits, container.Resources.Limits) } + + // if PodOverhead feature is supported, add overhead for running a pod + // to the sum of reqeuests and to non-zero limits: + if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { + addResourceList(reqs, pod.Spec.Overhead) + + for name, quantity := range pod.Spec.Overhead { + if value, ok := limits[name]; ok && !value.IsZero() { + value.Add(quantity) + limits[name] = value + } + } + } + return } // GetResourceRequestQuantity finds and returns the request quantity for a specific resource. func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity { + requestQuantity := resource.Quantity{} - requestQuantity := resource.Quantity{Format: resource.BinarySI} + switch resourceName { + case v1.ResourceCPU: + requestQuantity = resource.Quantity{Format: resource.DecimalSI} + case v1.ResourceMemory, v1.ResourceStorage, v1.ResourceEphemeralStorage: + requestQuantity = resource.Quantity{Format: resource.BinarySI} + default: + requestQuantity = resource.Quantity{Format: resource.DecimalSI} + } if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { // if the local storage capacity isolation feature gate is disabled, pods request 0 disk @@ -89,11 +113,19 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou for _, container := range pod.Spec.InitContainers { if rQuantity, ok := container.Resources.Requests[resourceName]; ok { if requestQuantity.Cmp(rQuantity) < 0 { - requestQuantity = rQuantity + requestQuantity = rQuantity.DeepCopy() } } } + // if PodOverhead feature is supported, add overhead for running a pod + // to the total requests if the resource total is non-zero + if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) { + if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() { + requestQuantity.Add(podOverhead) + } + } + return requestQuantity } @@ -107,10 +139,9 @@ func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { if resource == v1.ResourceCPU { return requestQuantity.MilliValue() - } else { - return requestQuantity.Value() } + return requestQuantity.Value() } // ExtractResourceValueByContainerName extracts the value of a resource diff --git a/pkg/api/v1/resource/helpers_test.go b/pkg/api/v1/resource/helpers_test.go index 5dc3d4776ee..0c3835d77d8 100644 --- a/pkg/api/v1/resource/helpers_test.go +++ b/pkg/api/v1/resource/helpers_test.go @@ -22,7 +22,11 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/kubernetes/pkg/features" ) func TestResourceHelpers(t *testing.T) { @@ -64,27 +68,53 @@ func TestDefaultResourceHelpers(t *testing.T) { } func TestGetResourceRequest(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)() + cases := []struct { pod *v1.Pod - res v1.ResourceName + cName string + resourceName v1.ResourceName expectedValue int64 - expectedError error }{ { pod: getPod("foo", podResources{cpuRequest: "9"}), - res: v1.ResourceCPU, + resourceName: v1.ResourceCPU, expectedValue: 9000, }, { pod: getPod("foo", podResources{memoryRequest: "90Mi"}), - res: v1.ResourceMemory, + resourceName: v1.ResourceMemory, expectedValue: 94371840, }, + { + cName: "just-overhead for cpu", + pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}), + resourceName: v1.ResourceCPU, + expectedValue: 0, + }, + { + cName: "just-overhead for memory", + pod: getPod("foo", podResources{memoryOverhead: "5"}), + resourceName: v1.ResourceMemory, + expectedValue: 0, + }, + { + cName: "cpu overhead and req", + pod: getPod("foo", podResources{cpuRequest: "2", cpuOverhead: "5", memoryOverhead: "5"}), + resourceName: v1.ResourceCPU, + expectedValue: 7000, + }, + { + cName: "mem overhead and req", + pod: getPod("foo", podResources{cpuRequest: "2", memoryRequest: "1024", cpuOverhead: "5", memoryOverhead: "5"}), + resourceName: v1.ResourceMemory, + expectedValue: 1029, + }, } as := assert.New(t) for idx, tc := range cases { - actual := GetResourceRequest(tc.pod, tc.res) - as.Equal(actual, tc.expectedValue, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual) + actual := GetResourceRequest(tc.pod, tc.resourceName) + as.Equal(actual, tc.expectedValue, "expected test case [%d] %v: to return %q; got %q instead", idx, tc.cName, tc.expectedValue, actual) } } @@ -242,6 +272,78 @@ func TestExtractResourceValue(t *testing.T) { } } +func TestPodRequestsAndLimits(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)() + + cases := []struct { + pod *v1.Pod + cName string + expectedRequests v1.ResourceList + expectedLimits v1.ResourceList + }{ + { + cName: "just-limit-no-overhead", + pod: getPod("foo", podResources{cpuLimit: "9"}), + expectedRequests: v1.ResourceList{}, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"), + }, + }, + { + cName: "just-overhead", + pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}), + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("5"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + cName: "req-and-overhead", + pod: getPod("foo", podResources{cpuRequest: "1", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}), + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"), + }, + expectedLimits: v1.ResourceList{}, + }, + { + cName: "all-req-lim-and-overhead", + pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", memoryLimit: "12", cpuOverhead: "5", memoryOverhead: "5"}), + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("17"), + }, + }, + { + cName: "req-some-lim-and-overhead", + pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}), + expectedRequests: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"), + v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"), + }, + }, + } + for idx, tc := range cases { + resRequests, resLimits := PodRequestsAndLimits(tc.pod) + + if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) { + t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedRequests, resRequests) + } + + if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) { + t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedLimits, resLimits) + } + } +} + type podResources struct { cpuRequest, cpuLimit, memoryRequest, memoryLimit, cpuOverhead, memoryOverhead string } @@ -251,6 +353,9 @@ func getPod(cname string, resources podResources) *v1.Pod { Limits: make(v1.ResourceList), Requests: make(v1.ResourceList), } + + overhead := make(v1.ResourceList) + if resources.cpuLimit != "" { r.Limits[v1.ResourceCPU] = resource.MustParse(resources.cpuLimit) } @@ -263,6 +368,13 @@ func getPod(cname string, resources podResources) *v1.Pod { if resources.memoryRequest != "" { r.Requests[v1.ResourceMemory] = resource.MustParse(resources.memoryRequest) } + if resources.cpuOverhead != "" { + overhead[v1.ResourceCPU] = resource.MustParse(resources.cpuOverhead) + } + if resources.memoryOverhead != "" { + overhead[v1.ResourceMemory] = resource.MustParse(resources.memoryOverhead) + } + return &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ @@ -277,6 +389,7 @@ func getPod(cname string, resources podResources) *v1.Pod { Resources: r, }, }, + Overhead: overhead, }, } }