mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
cgroup configuration changes:
1. Pod cgrooup configured to use resources from pod spec if feature is enabled and resources are set at pod-level 2. Container cgroup limits defaulted to pod-level limits is container limits are not set
This commit is contained in:
parent
26f11c4586
commit
5ea57fb3b4
@ -118,16 +118,20 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
|
|||||||
|
|
||||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||||
func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
|
func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
|
||||||
|
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources)
|
||||||
|
// sum requests and limits.
|
||||||
reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
|
reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
|
||||||
// pod is already configured to the allocated resources, and we explicitly don't want to use
|
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||||
// the actual resources if we're instantiating a resize.
|
SkipPodLevelResources: !podLevelResourcesEnabled,
|
||||||
UseStatusResources: false,
|
UseStatusResources: false,
|
||||||
})
|
})
|
||||||
// track if limits were applied for each resource.
|
// track if limits were applied for each resource.
|
||||||
memoryLimitsDeclared := true
|
memoryLimitsDeclared := true
|
||||||
cpuLimitsDeclared := true
|
cpuLimitsDeclared := true
|
||||||
|
|
||||||
limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
|
limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
|
||||||
|
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||||
|
SkipPodLevelResources: !podLevelResourcesEnabled,
|
||||||
ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
|
ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
|
||||||
if res.Cpu().IsZero() {
|
if res.Cpu().IsZero() {
|
||||||
cpuLimitsDeclared = false
|
cpuLimitsDeclared = false
|
||||||
@ -137,6 +141,16 @@ func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if podLevelResourcesEnabled && resource.IsPodLevelResourcesSet(allocatedPod) {
|
||||||
|
if !allocatedPod.Spec.Resources.Limits.Cpu().IsZero() {
|
||||||
|
cpuLimitsDeclared = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !allocatedPod.Spec.Resources.Limits.Memory().IsZero() {
|
||||||
|
memoryLimitsDeclared = true
|
||||||
|
}
|
||||||
|
}
|
||||||
// map hugepage pagesize (bytes) to limits (bytes)
|
// map hugepage pagesize (bytes) to limits (bytes)
|
||||||
hugePageLimits := HugePageLimits(reqs)
|
hugePageLimits := HugePageLimits(reqs)
|
||||||
|
|
||||||
|
@ -70,10 +70,11 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||||||
cpuNoLimit := int64(-1)
|
cpuNoLimit := int64(-1)
|
||||||
guaranteedMemory := memoryQuantity.Value()
|
guaranteedMemory := memoryQuantity.Value()
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
expected *ResourceConfig
|
expected *ResourceConfig
|
||||||
enforceCPULimits bool
|
enforceCPULimits bool
|
||||||
quotaPeriod uint64 // in microseconds
|
quotaPeriod uint64 // in microseconds
|
||||||
|
podLevelResourcesEnabled bool
|
||||||
}{
|
}{
|
||||||
"besteffort": {
|
"besteffort": {
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
@ -274,12 +275,126 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||||||
quotaPeriod: tunedQuotaPeriod,
|
quotaPeriod: tunedQuotaPeriod,
|
||||||
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
expected: &ResourceConfig{CPUShares: &burstablePartialShares},
|
||||||
},
|
},
|
||||||
|
"burstable-with-pod-level-requests": {
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("100m", "100Mi"),
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "Container with no resources",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
enforceCPULimits: true,
|
||||||
|
quotaPeriod: defaultQuotaPeriod,
|
||||||
|
expected: &ResourceConfig{CPUShares: &burstableShares},
|
||||||
|
},
|
||||||
|
"burstable-with-pod-and-container-level-requests": {
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("100m", "100Mi"),
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "Container with resources",
|
||||||
|
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("", "")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
enforceCPULimits: true,
|
||||||
|
quotaPeriod: defaultQuotaPeriod,
|
||||||
|
expected: &ResourceConfig{CPUShares: &burstableShares},
|
||||||
|
},
|
||||||
|
"burstable-with-pod-level-resources": {
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("100m", "100Mi"),
|
||||||
|
Limits: getResourceList("200m", "200Mi"),
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "Container with no resources",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
enforceCPULimits: true,
|
||||||
|
quotaPeriod: defaultQuotaPeriod,
|
||||||
|
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||||
|
},
|
||||||
|
"burstable-with-pod-and-container-level-resources": {
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("100m", "100Mi"),
|
||||||
|
Limits: getResourceList("200m", "200Mi"),
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "Container with resources",
|
||||||
|
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "100Mi")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
enforceCPULimits: true,
|
||||||
|
quotaPeriod: defaultQuotaPeriod,
|
||||||
|
expected: &ResourceConfig{CPUShares: &burstableShares, CPUQuota: &burstableQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &burstableMemory},
|
||||||
|
},
|
||||||
|
"guaranteed-with-pod-level-resources": {
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("100m", "100Mi"),
|
||||||
|
Limits: getResourceList("100m", "100Mi"),
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "Container with no resources",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
enforceCPULimits: true,
|
||||||
|
quotaPeriod: defaultQuotaPeriod,
|
||||||
|
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||||
|
},
|
||||||
|
"guaranteed-with-pod-and-container-level-resources": {
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Resources: &v1.ResourceRequirements{
|
||||||
|
Requests: getResourceList("100m", "100Mi"),
|
||||||
|
Limits: getResourceList("100m", "100Mi"),
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "Container with resources",
|
||||||
|
Resources: getResourceRequirements(getResourceList("10m", "50Mi"), getResourceList("50m", "100Mi")),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podLevelResourcesEnabled: true,
|
||||||
|
enforceCPULimits: true,
|
||||||
|
quotaPeriod: defaultQuotaPeriod,
|
||||||
|
expected: &ResourceConfig{CPUShares: &guaranteedShares, CPUQuota: &guaranteedQuota, CPUPeriod: &defaultQuotaPeriod, Memory: &guaranteedMemory},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for testName, testCase := range testCases {
|
for testName, testCase := range testCases {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.PodLevelResources, testCase.podLevelResourcesEnabled)
|
||||||
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
|
actual := ResourceConfigForPod(testCase.pod, testCase.enforceCPULimits, testCase.quotaPeriod, false)
|
||||||
|
|
||||||
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
|
if !reflect.DeepEqual(actual.CPUPeriod, testCase.expected.CPUPeriod) {
|
||||||
t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUPeriod, *actual.CPUPeriod)
|
t.Errorf("unexpected result, test: %v, cpu period not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUPeriod, *actual.CPUPeriod)
|
||||||
}
|
}
|
||||||
@ -287,7 +402,7 @@ func TestResourceConfigForPod(t *testing.T) {
|
|||||||
t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUQuota, *actual.CPUQuota)
|
t.Errorf("unexpected result, test: %v, cpu quota not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUQuota, *actual.CPUQuota)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
|
if !reflect.DeepEqual(actual.CPUShares, testCase.expected.CPUShares) {
|
||||||
t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, &actual.CPUShares)
|
t.Errorf("unexpected result, test: %v, cpu shares not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.CPUShares, *actual.CPUShares)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
if !reflect.DeepEqual(actual.Memory, testCase.expected.Memory) {
|
||||||
t.Errorf("unexpected result, test: %v, memory not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.Memory, *actual.Memory)
|
t.Errorf("unexpected result, test: %v, memory not as expected. Expected: %v, Actual:%v", testName, *testCase.expected.Memory, *actual.Memory)
|
||||||
|
@ -179,7 +179,11 @@ func (m *qosContainerManagerImpl) setCPUCgroupConfig(configs map[v1.PodQOSClass]
|
|||||||
// we only care about the burstable qos tier
|
// we only care about the burstable qos tier
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
req := resource.PodRequests(pod, resource.PodResourcesOptions{Reuse: reuseReqs})
|
req := resource.PodRequests(pod, resource.PodResourcesOptions{
|
||||||
|
Reuse: reuseReqs,
|
||||||
|
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||||
|
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources),
|
||||||
|
})
|
||||||
if request, found := req[v1.ResourceCPU]; found {
|
if request, found := req[v1.ResourceCPU]; found {
|
||||||
burstablePodCPURequest += request.MilliValue()
|
burstablePodCPURequest += request.MilliValue()
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
resourcehelper "k8s.io/component-helpers/resource"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
@ -95,6 +96,34 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.C
|
|||||||
return lc, nil
|
return lc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getCPULimit returns the memory limit for the container to be used to calculate
|
||||||
|
// Linux Container Resources.
|
||||||
|
func getCPULimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
|
||||||
|
// When container-level CPU limit is not set, the pod-level
|
||||||
|
// limit is used in the calculation for components relying on linux resource limits
|
||||||
|
// to be set.
|
||||||
|
if container.Resources.Limits.Cpu().IsZero() {
|
||||||
|
return pod.Spec.Resources.Limits.Cpu()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return container.Resources.Limits.Cpu()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMemoryLimit returns the memory limit for the container to be used to calculate
|
||||||
|
// Linux Container Resources.
|
||||||
|
func getMemoryLimit(pod *v1.Pod, container *v1.Container) *resource.Quantity {
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelResourcesSet(pod) {
|
||||||
|
// When container-level memory limit is not set, the pod-level
|
||||||
|
// limit is used in the calculation for components relying on linux resource limits
|
||||||
|
// to be set.
|
||||||
|
if container.Resources.Limits.Memory().IsZero() {
|
||||||
|
return pod.Spec.Resources.Limits.Memory()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return container.Resources.Limits.Memory()
|
||||||
|
}
|
||||||
|
|
||||||
// generateLinuxContainerResources generates linux container resources config for runtime
|
// generateLinuxContainerResources generates linux container resources config for runtime
|
||||||
func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod, container *v1.Container, enforceMemoryQoS bool) *runtimeapi.LinuxContainerResources {
|
func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod, container *v1.Container, enforceMemoryQoS bool) *runtimeapi.LinuxContainerResources {
|
||||||
// set linux container resources
|
// set linux container resources
|
||||||
@ -102,7 +131,10 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerResources(pod *v1.Pod,
|
|||||||
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
|
if _, cpuRequestExists := container.Resources.Requests[v1.ResourceCPU]; cpuRequestExists {
|
||||||
cpuRequest = container.Resources.Requests.Cpu()
|
cpuRequest = container.Resources.Requests.Cpu()
|
||||||
}
|
}
|
||||||
lcr := m.calculateLinuxResources(cpuRequest, container.Resources.Limits.Cpu(), container.Resources.Limits.Memory())
|
|
||||||
|
memoryLimit := getMemoryLimit(pod, container)
|
||||||
|
cpuLimit := getCPULimit(pod, container)
|
||||||
|
lcr := m.calculateLinuxResources(cpuRequest, cpuLimit, memoryLimit)
|
||||||
|
|
||||||
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
lcr.OomScoreAdj = int64(qos.GetContainerOOMScoreAdjust(pod, container,
|
||||||
int64(m.machineInfo.MemoryCapacity)))
|
int64(m.machineInfo.MemoryCapacity)))
|
||||||
|
@ -167,13 +167,14 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
podResources v1.ResourceRequirements
|
containerResources v1.ResourceRequirements
|
||||||
expected *runtimeapi.LinuxContainerResources
|
podResources *v1.ResourceRequirements
|
||||||
|
expected *runtimeapi.LinuxContainerResources
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Request 128M/1C, Limit 256M/3C",
|
name: "Request 128M/1C, Limit 256M/3C",
|
||||||
podResources: v1.ResourceRequirements{
|
containerResources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceMemory: resource.MustParse("128Mi"),
|
v1.ResourceMemory: resource.MustParse("128Mi"),
|
||||||
v1.ResourceCPU: resource.MustParse("1"),
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
@ -192,7 +193,7 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Request 128M/2C, No Limit",
|
name: "Request 128M/2C, No Limit",
|
||||||
podResources: v1.ResourceRequirements{
|
containerResources: v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceMemory: resource.MustParse("128Mi"),
|
v1.ResourceMemory: resource.MustParse("128Mi"),
|
||||||
v1.ResourceCPU: resource.MustParse("2"),
|
v1.ResourceCPU: resource.MustParse("2"),
|
||||||
@ -205,6 +206,27 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
|||||||
MemoryLimitInBytes: 0,
|
MemoryLimitInBytes: 0,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Container Level Request 128M/1C, Pod Level Limit 256M/3C",
|
||||||
|
containerResources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("128Mi"),
|
||||||
|
v1.ResourceCPU: resource.MustParse("1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
podResources: &v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceMemory: resource.MustParse("256Mi"),
|
||||||
|
v1.ResourceCPU: resource.MustParse("3"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &runtimeapi.LinuxContainerResources{
|
||||||
|
CpuPeriod: 100000,
|
||||||
|
CpuQuota: 300000,
|
||||||
|
CpuShares: 1024,
|
||||||
|
MemoryLimitInBytes: 256 * 1024 * 1024,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
@ -222,12 +244,17 @@ func TestGenerateLinuxContainerConfigResources(t *testing.T) {
|
|||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
Command: []string{"testCommand"},
|
Command: []string{"testCommand"},
|
||||||
WorkingDir: "testWorkingDir",
|
WorkingDir: "testWorkingDir",
|
||||||
Resources: test.podResources,
|
Resources: test.containerResources,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if test.podResources != nil {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, true)
|
||||||
|
pod.Spec.Resources = test.podResources
|
||||||
|
}
|
||||||
|
|
||||||
linuxConfig, err := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false)
|
linuxConfig, err := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name)
|
assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name)
|
||||||
|
@ -22,7 +22,9 @@ package kuberuntime
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
|
||||||
resourcehelper "k8s.io/component-helpers/resource"
|
resourcehelper "k8s.io/component-helpers/resource"
|
||||||
)
|
)
|
||||||
@ -44,6 +46,8 @@ func (m *kubeGenericRuntimeManager) convertOverheadToLinuxResources(pod *v1.Pod)
|
|||||||
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
func (m *kubeGenericRuntimeManager) calculateSandboxResources(pod *v1.Pod) *runtimeapi.LinuxContainerResources {
|
||||||
opts := resourcehelper.PodResourcesOptions{
|
opts := resourcehelper.PodResourcesOptions{
|
||||||
ExcludeOverhead: true,
|
ExcludeOverhead: true,
|
||||||
|
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||||
|
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
||||||
}
|
}
|
||||||
req := resourcehelper.PodRequests(pod, opts)
|
req := resourcehelper.PodRequests(pod, opts)
|
||||||
lim := resourcehelper.PodLimits(pod, opts)
|
lim := resourcehelper.PodLimits(pod, opts)
|
||||||
|
Loading…
Reference in New Issue
Block a user