mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
pod-overhead: utilize pod overhead for cgroup sizing, eviction handling
Pod and burstable QoS cgroups should take overhead of running a sandbox into account if the PodOverhead feature is enabled. These helper functions are utilized by Kubelet for sizing the pod and burstable QoS cgroups. Pod overhead is added to resource requests, regardless of the initial request values. A particular resource pod overhead is only added to a resource limit if a non-zero limit already existed. This commit updates eviction handling to also take Pod Overhead into account (if the feature is enabled). Signed-off-by: Eric Ernst <eric.ernst@intel.com>
This commit is contained in:
parent
f137a9cdb9
commit
80ee072b85
@ -11,8 +11,12 @@ go_test(
|
||||
srcs = ["helpers_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -22,8 +26,10 @@ go_library(
|
||||
srcs = ["helpers.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/api/v1/resource",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -28,8 +28,8 @@ import (
|
||||
)
|
||||
|
||||
// addResourceList adds the resources in newList to list
|
||||
func addResourceList(list, new v1.ResourceList) {
|
||||
for name, quantity := range new {
|
||||
func addResourceList(list, newList v1.ResourceList) {
|
||||
for name, quantity := range newList {
|
||||
if value, ok := list[name]; !ok {
|
||||
list[name] = *quantity.Copy()
|
||||
} else {
|
||||
@ -55,7 +55,9 @@ func maxResourceList(list, new v1.ResourceList) {
|
||||
}
|
||||
|
||||
// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all
|
||||
// containers of the pod.
|
||||
// containers of the pod. If PodOverhead feature is enabled, pod overhead is added to the
|
||||
// total container resource requests and to the total container limits which have a
|
||||
// non-zero quantity.
|
||||
func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
|
||||
reqs, limits = v1.ResourceList{}, v1.ResourceList{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@ -67,13 +69,35 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {
|
||||
maxResourceList(reqs, container.Resources.Requests)
|
||||
maxResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
|
||||
// if PodOverhead feature is supported, add overhead for running a pod
|
||||
// to the sum of reqeuests and to non-zero limits:
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
addResourceList(reqs, pod.Spec.Overhead)
|
||||
|
||||
for name, quantity := range pod.Spec.Overhead {
|
||||
if value, ok := limits[name]; ok && !value.IsZero() {
|
||||
value.Add(quantity)
|
||||
limits[name] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetResourceRequestQuantity finds and returns the request quantity for a specific resource.
|
||||
func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resource.Quantity {
|
||||
requestQuantity := resource.Quantity{}
|
||||
|
||||
requestQuantity := resource.Quantity{Format: resource.BinarySI}
|
||||
switch resourceName {
|
||||
case v1.ResourceCPU:
|
||||
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||
case v1.ResourceMemory, v1.ResourceStorage, v1.ResourceEphemeralStorage:
|
||||
requestQuantity = resource.Quantity{Format: resource.BinarySI}
|
||||
default:
|
||||
requestQuantity = resource.Quantity{Format: resource.DecimalSI}
|
||||
}
|
||||
|
||||
if resourceName == v1.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk
|
||||
@ -89,11 +113,19 @@ func GetResourceRequestQuantity(pod *v1.Pod, resourceName v1.ResourceName) resou
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if rQuantity, ok := container.Resources.Requests[resourceName]; ok {
|
||||
if requestQuantity.Cmp(rQuantity) < 0 {
|
||||
requestQuantity = rQuantity
|
||||
requestQuantity = rQuantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if PodOverhead feature is supported, add overhead for running a pod
|
||||
// to the total requests if the resource total is non-zero
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if podOverhead, ok := pod.Spec.Overhead[resourceName]; ok && !requestQuantity.IsZero() {
|
||||
requestQuantity.Add(podOverhead)
|
||||
}
|
||||
}
|
||||
|
||||
return requestQuantity
|
||||
}
|
||||
|
||||
@ -107,10 +139,9 @@ func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
|
||||
if resource == v1.ResourceCPU {
|
||||
return requestQuantity.MilliValue()
|
||||
} else {
|
||||
return requestQuantity.Value()
|
||||
}
|
||||
|
||||
return requestQuantity.Value()
|
||||
}
|
||||
|
||||
// ExtractResourceValueByContainerName extracts the value of a resource
|
||||
|
@ -22,7 +22,11 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestResourceHelpers(t *testing.T) {
|
||||
@ -64,27 +68,53 @@ func TestDefaultResourceHelpers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetResourceRequest(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
|
||||
cases := []struct {
|
||||
pod *v1.Pod
|
||||
res v1.ResourceName
|
||||
cName string
|
||||
resourceName v1.ResourceName
|
||||
expectedValue int64
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
pod: getPod("foo", podResources{cpuRequest: "9"}),
|
||||
res: v1.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
expectedValue: 9000,
|
||||
},
|
||||
{
|
||||
pod: getPod("foo", podResources{memoryRequest: "90Mi"}),
|
||||
res: v1.ResourceMemory,
|
||||
resourceName: v1.ResourceMemory,
|
||||
expectedValue: 94371840,
|
||||
},
|
||||
{
|
||||
cName: "just-overhead for cpu",
|
||||
pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
resourceName: v1.ResourceCPU,
|
||||
expectedValue: 0,
|
||||
},
|
||||
{
|
||||
cName: "just-overhead for memory",
|
||||
pod: getPod("foo", podResources{memoryOverhead: "5"}),
|
||||
resourceName: v1.ResourceMemory,
|
||||
expectedValue: 0,
|
||||
},
|
||||
{
|
||||
cName: "cpu overhead and req",
|
||||
pod: getPod("foo", podResources{cpuRequest: "2", cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
resourceName: v1.ResourceCPU,
|
||||
expectedValue: 7000,
|
||||
},
|
||||
{
|
||||
cName: "mem overhead and req",
|
||||
pod: getPod("foo", podResources{cpuRequest: "2", memoryRequest: "1024", cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
resourceName: v1.ResourceMemory,
|
||||
expectedValue: 1029,
|
||||
},
|
||||
}
|
||||
as := assert.New(t)
|
||||
for idx, tc := range cases {
|
||||
actual := GetResourceRequest(tc.pod, tc.res)
|
||||
as.Equal(actual, tc.expectedValue, "expected test case [%d] to return %q; got %q instead", idx, tc.expectedValue, actual)
|
||||
actual := GetResourceRequest(tc.pod, tc.resourceName)
|
||||
as.Equal(actual, tc.expectedValue, "expected test case [%d] %v: to return %q; got %q instead", idx, tc.cName, tc.expectedValue, actual)
|
||||
}
|
||||
}
|
||||
|
||||
@ -242,6 +272,78 @@ func TestExtractResourceValue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodRequestsAndLimits(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
|
||||
cases := []struct {
|
||||
pod *v1.Pod
|
||||
cName string
|
||||
expectedRequests v1.ResourceList
|
||||
expectedLimits v1.ResourceList
|
||||
}{
|
||||
{
|
||||
cName: "just-limit-no-overhead",
|
||||
pod: getPod("foo", podResources{cpuLimit: "9"}),
|
||||
expectedRequests: v1.ResourceList{},
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("9"),
|
||||
},
|
||||
},
|
||||
{
|
||||
cName: "just-overhead",
|
||||
pod: getPod("foo", podResources{cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("5"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("5"),
|
||||
},
|
||||
expectedLimits: v1.ResourceList{},
|
||||
},
|
||||
{
|
||||
cName: "req-and-overhead",
|
||||
pod: getPod("foo", podResources{cpuRequest: "1", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"),
|
||||
},
|
||||
expectedLimits: v1.ResourceList{},
|
||||
},
|
||||
{
|
||||
cName: "all-req-lim-and-overhead",
|
||||
pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", memoryLimit: "12", cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"),
|
||||
},
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("17"),
|
||||
},
|
||||
},
|
||||
{
|
||||
cName: "req-some-lim-and-overhead",
|
||||
pod: getPod("foo", podResources{cpuRequest: "1", cpuLimit: "2", memoryRequest: "10", cpuOverhead: "5", memoryOverhead: "5"}),
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("6"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("15"),
|
||||
},
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("7"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for idx, tc := range cases {
|
||||
resRequests, resLimits := PodRequestsAndLimits(tc.pod)
|
||||
|
||||
if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) {
|
||||
t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedRequests, resRequests)
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) {
|
||||
t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.cName, tc.expectedLimits, resLimits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type podResources struct {
|
||||
cpuRequest, cpuLimit, memoryRequest, memoryLimit, cpuOverhead, memoryOverhead string
|
||||
}
|
||||
@ -251,6 +353,9 @@ func getPod(cname string, resources podResources) *v1.Pod {
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
}
|
||||
|
||||
overhead := make(v1.ResourceList)
|
||||
|
||||
if resources.cpuLimit != "" {
|
||||
r.Limits[v1.ResourceCPU] = resource.MustParse(resources.cpuLimit)
|
||||
}
|
||||
@ -263,6 +368,13 @@ func getPod(cname string, resources podResources) *v1.Pod {
|
||||
if resources.memoryRequest != "" {
|
||||
r.Requests[v1.ResourceMemory] = resource.MustParse(resources.memoryRequest)
|
||||
}
|
||||
if resources.cpuOverhead != "" {
|
||||
overhead[v1.ResourceCPU] = resource.MustParse(resources.cpuOverhead)
|
||||
}
|
||||
if resources.memoryOverhead != "" {
|
||||
overhead[v1.ResourceMemory] = resource.MustParse(resources.memoryOverhead)
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -277,6 +389,7 @@ func getPod(cname string, resources podResources) *v1.Pod {
|
||||
Resources: r,
|
||||
},
|
||||
},
|
||||
Overhead: overhead,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user