change doPodResizeAction to call updatePodSandBoxResources inside setPodCgroupConfig

This commit is contained in:
Filipe Xavier 2025-02-28 10:48:27 -03:00
parent dc8dc31442
commit 41e3efdb60
12 changed files with 935 additions and 85 deletions

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/types"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/security/apparmor"
)
@ -337,3 +338,86 @@ func getAppArmorProfile(pod *v1.Pod, container *v1.Container) (*runtimeapi.Secur
return securityProfile, deprecatedProfile, nil
}
func mergeResourceConfig(source, update *cm.ResourceConfig) *cm.ResourceConfig {
if source == nil {
return update
}
if update == nil {
return source
}
merged := *source
if update.Memory != nil {
merged.Memory = update.Memory
}
if update.CPUSet.Size() > 0 {
merged.CPUSet = update.CPUSet
}
if update.CPUShares != nil {
merged.CPUShares = update.CPUShares
}
if update.CPUQuota != nil {
merged.CPUQuota = update.CPUQuota
}
if update.CPUPeriod != nil {
merged.CPUPeriod = update.CPUPeriod
}
if update.PidsLimit != nil {
merged.PidsLimit = update.PidsLimit
}
if update.HugePageLimit != nil {
if merged.HugePageLimit == nil {
merged.HugePageLimit = make(map[int64]int64)
}
for k, v := range update.HugePageLimit {
merged.HugePageLimit[k] = v
}
}
if update.Unified != nil {
if merged.Unified == nil {
merged.Unified = make(map[string]string)
}
for k, v := range update.Unified {
merged.Unified[k] = v
}
}
return &merged
}
func convertResourceConfigToLinuxContainerResources(rc *cm.ResourceConfig) *runtimeapi.LinuxContainerResources {
if rc == nil {
return nil
}
lcr := &runtimeapi.LinuxContainerResources{}
if rc.CPUPeriod != nil {
lcr.CpuPeriod = int64(*rc.CPUPeriod)
}
if rc.CPUQuota != nil {
lcr.CpuQuota = *rc.CPUQuota
}
if rc.CPUShares != nil {
lcr.CpuShares = int64(*rc.CPUShares)
}
if rc.Memory != nil {
lcr.MemoryLimitInBytes = *rc.Memory
}
if rc.CPUSet.Size() > 0 {
lcr.CpusetCpus = rc.CPUSet.String()
}
if rc.Unified != nil {
lcr.Unified = make(map[string]string, len(rc.Unified))
for k, v := range rc.Unified {
lcr.Unified[k] = v
}
}
return lcr
}

View File

@ -20,8 +20,10 @@ limitations under the License.
package kuberuntime
import (
"k8s.io/kubernetes/pkg/kubelet/cm"
"math"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
)
const (
@ -77,3 +79,40 @@ func quotaToMilliCPU(quota int64, period int64) int64 {
}
return (quota * milliCPUToCPU) / period
}
func subtractOverheadFromResourceConfig(resCfg *cm.ResourceConfig, pod *v1.Pod) *cm.ResourceConfig {
if resCfg == nil {
return nil
}
rc := *resCfg
if pod.Spec.Overhead != nil {
if cpu, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
if rc.CPUPeriod != nil {
cpuPeriod := int64(*rc.CPUPeriod)
cpuQuota := *rc.CPUQuota - cm.MilliCPUToQuota(cpu.MilliValue(), cpuPeriod)
rc.CPUQuota = &cpuQuota
}
if rc.CPUShares != nil {
totalCPUMilli := sharesToMilliCPU(int64(*rc.CPUShares))
cpuShares := cm.MilliCPUToShares(totalCPUMilli - cpu.MilliValue())
rc.CPUShares = &cpuShares
}
}
if memory, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
if rc.Memory != nil {
currMemory := *rc.Memory
if mem, ok := memory.AsInt64(); ok {
currMemory -= mem
}
rc.Memory = &currMemory
}
}
}
return &rc
}

View File

@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
@ -494,3 +495,230 @@ func TestQuotaToMilliCPU(t *testing.T) {
})
}
}
func TestSubtractOverheadFromResourceConfig(t *testing.T) {
podCPUMilli := resource.MustParse("200m")
podMemory := resource.MustParse("256Mi")
podOverheadCPUMilli := resource.MustParse("100m")
podOverheadMemory := resource.MustParse("64Mi")
resCfg := &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUShares: uint64Ptr(306),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(30000),
}
for _, tc := range []struct {
name string
cfgInput *cm.ResourceConfig
pod *v1.Pod
expected *cm.ResourceConfig
}{
{
name: "withoutOverhead",
cfgInput: resCfg,
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
v1.ResourceMemory: podMemory,
},
},
},
},
},
},
expected: &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUShares: uint64Ptr(306),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(30000),
},
},
{
name: "withoutCPUOverhead",
cfgInput: resCfg,
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
v1.ResourceMemory: podMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceMemory: podOverheadMemory,
},
},
},
expected: &cm.ResourceConfig{
Memory: int64Ptr(268435456),
CPUShares: uint64Ptr(306),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(30000),
},
},
{
name: "withoutMemoryOverhead",
cfgInput: resCfg,
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
v1.ResourceMemory: podMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPUMilli,
},
},
},
expected: &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUShares: uint64Ptr(203),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(20000),
},
},
{
name: "withoutCPUPeriod",
cfgInput: &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUShares: uint64Ptr(306),
},
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
v1.ResourceMemory: podMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPUMilli,
},
},
},
expected: &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUShares: uint64Ptr(203),
},
},
{
name: "withoutCPUShares",
cfgInput: &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(30000),
},
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
v1.ResourceMemory: podMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPUMilli,
},
},
},
expected: &cm.ResourceConfig{
Memory: int64Ptr(335544320),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(20000),
},
},
{
name: "withOverhead",
cfgInput: resCfg,
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podCPUMilli,
v1.ResourceMemory: podMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPUMilli,
v1.ResourceMemory: podOverheadMemory,
},
},
},
expected: &cm.ResourceConfig{
Memory: int64Ptr(268435456),
CPUShares: uint64Ptr(203),
CPUPeriod: uint64Ptr(100000),
CPUQuota: int64Ptr(20000),
},
},
} {
t.Run(tc.name, func(t *testing.T) {
gotCfg := subtractOverheadFromResourceConfig(tc.cfgInput, tc.pod)
if tc.expected.CPUPeriod != nil && *gotCfg.CPUPeriod != *tc.expected.CPUPeriod {
t.Errorf("Test %s: expected CPUPeriod %v, but got %v", tc.name, *tc.expected.CPUPeriod, *gotCfg.CPUPeriod)
}
if tc.expected.CPUQuota != nil && *gotCfg.CPUQuota != *tc.expected.CPUQuota {
t.Errorf("Test %s: expected CPUQuota %v, but got %v", tc.name, *tc.expected.CPUQuota, *gotCfg.CPUQuota)
}
if tc.expected.CPUShares != nil && *gotCfg.CPUShares != *tc.expected.CPUShares {
t.Errorf("Test %s: expected CPUShares %v, but got %v", tc.name, *tc.expected.CPUShares, *gotCfg.CPUShares)
}
if tc.expected.Memory != nil && *gotCfg.Memory != *tc.expected.Memory {
t.Errorf("Test %s: expected Memory %v, but got %v", tc.name, *tc.expected.Memory, *gotCfg.Memory)
}
})
}
}

View File

@ -30,6 +30,7 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
runtimetesting "k8s.io/cri-api/pkg/apis/testing"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/utils/ptr"
)
@ -40,6 +41,14 @@ func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, na
return f(uid, name, namespace)
}
func int64Ptr(i int64) *int64 {
return &i
}
func uint64Ptr(i uint64) *uint64 {
return &i
}
func TestIsInitContainerFailed(t *testing.T) {
tests := []struct {
status *kubecontainer.Status
@ -441,3 +450,83 @@ func TestGetAppArmorProfile(t *testing.T) {
})
}
}
func TestMergeResourceConfig(t *testing.T) {
tests := []struct {
name string
source *cm.ResourceConfig
update *cm.ResourceConfig
expected *cm.ResourceConfig
}{
{
name: "merge all fields",
source: &cm.ResourceConfig{Memory: int64Ptr(1024), CPUShares: uint64Ptr(2)},
update: &cm.ResourceConfig{Memory: int64Ptr(2048), CPUQuota: int64Ptr(5000)},
expected: &cm.ResourceConfig{
Memory: int64Ptr(2048),
CPUShares: uint64Ptr(2),
CPUQuota: int64Ptr(5000),
},
},
{
name: "merge HugePageLimit and Unified",
source: &cm.ResourceConfig{HugePageLimit: map[int64]int64{2048: 1024}, Unified: map[string]string{"key1": "value1"}},
update: &cm.ResourceConfig{HugePageLimit: map[int64]int64{4096: 2048}, Unified: map[string]string{"key1": "newValue1", "key2": "value2"}},
expected: &cm.ResourceConfig{
HugePageLimit: map[int64]int64{2048: 1024, 4096: 2048},
Unified: map[string]string{"key1": "newValue1", "key2": "value2"},
},
},
{
name: "update nil source",
source: nil,
update: &cm.ResourceConfig{Memory: int64Ptr(4096)},
expected: &cm.ResourceConfig{
Memory: int64Ptr(4096),
},
},
{
name: "update nil update",
source: &cm.ResourceConfig{Memory: int64Ptr(1024)},
update: nil,
expected: &cm.ResourceConfig{
Memory: int64Ptr(1024),
},
},
{
name: "update empty source",
source: &cm.ResourceConfig{},
update: &cm.ResourceConfig{Memory: int64Ptr(8192)},
expected: &cm.ResourceConfig{
Memory: int64Ptr(8192),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
merged := mergeResourceConfig(tt.source, tt.update)
assert.Equal(t, tt.expected, merged)
})
}
}
func TestConvertResourceConfigToLinuxContainerResources(t *testing.T) {
resCfg := &cm.ResourceConfig{
Memory: int64Ptr(2048),
CPUShares: uint64Ptr(2),
CPUPeriod: uint64Ptr(10000),
CPUQuota: int64Ptr(5000),
HugePageLimit: map[int64]int64{4096: 2048},
Unified: map[string]string{"key1": "value1"},
}
lcr := convertResourceConfigToLinuxContainerResources(resCfg)
assert.Equal(t, int64(*resCfg.CPUPeriod), lcr.CpuPeriod)
assert.Equal(t, *resCfg.CPUQuota, lcr.CpuQuota)
assert.Equal(t, int64(*resCfg.CPUShares), lcr.CpuShares)
assert.Equal(t, *resCfg.Memory, lcr.MemoryLimitInBytes)
assert.Equal(t, resCfg.Unified, lcr.Unified)
}

View File

@ -273,7 +273,7 @@ func (in instrumentedRuntimeService) PortForward(ctx context.Context, req *runti
}
func (in instrumentedRuntimeService) UpdatePodSandboxResources(ctx context.Context, req *runtimeapi.UpdatePodSandboxResourcesRequest) (*runtimeapi.UpdatePodSandboxResourcesResponse, error) {
const operation = "update_podsandbox"
const operation = "update_podsandbox_resources"
defer recordOperation(operation, time.Now())
resp, err := in.service.UpdatePodSandboxResources(ctx, req)

View File

@ -53,6 +53,7 @@ import (
kubelettypes "k8s.io/kubelet/pkg/types"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
@ -412,8 +413,8 @@ func (m *kubeGenericRuntimeManager) updateContainerResources(pod *v1.Pod, contai
return err
}
func (m *kubeGenericRuntimeManager) updatePodSandboxResources(sandboxID string, pod *v1.Pod) error {
podResourcesRequest := m.generateUpdatePodSandboxResourcesRequest(sandboxID, pod)
func (m *kubeGenericRuntimeManager) updatePodSandboxResources(sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) error {
podResourcesRequest := m.generateUpdatePodSandboxResourcesRequest(sandboxID, pod, podResources)
if podResourcesRequest == nil {
return fmt.Errorf("sandboxID %q updatePodSandboxResources failed: cannot generate resources config", sandboxID)
}
@ -423,10 +424,10 @@ func (m *kubeGenericRuntimeManager) updatePodSandboxResources(sandboxID string,
if err != nil {
stat, _ := grpcstatus.FromError(err)
if stat.Code() == codes.Unimplemented {
klog.InfoS("updatePodSandboxResources failed: method unimplemented on runtime service", "sandboxID", sandboxID)
klog.V(3).InfoS("updatePodSandboxResources failed: unimplemented; this call is best-effort: proceeding with resize", "sandboxID", sandboxID)
return nil
}
klog.ErrorS(err, "updatePodSandboxResources failed", "sandboxID", sandboxID)
return fmt.Errorf("updatePodSandboxResources failed for sanboxID %q: %w", sandboxID, err)
}
return nil
}

View File

@ -249,11 +249,13 @@ func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, cont
}
// generateUpdatePodSandboxResourcesRequest generates platform specific (linux) podsandox resources config for runtime
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod) *runtimeapi.UpdatePodSandboxResourcesRequest {
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) *runtimeapi.UpdatePodSandboxResourcesRequest {
podResourcesWithoutOverhead := subtractOverheadFromResourceConfig(podResources, pod)
return &runtimeapi.UpdatePodSandboxResourcesRequest{
PodSandboxId: sandboxID,
Overhead: m.convertOverheadToLinuxResources(pod),
Resources: m.calculateSandboxResources(pod),
Resources: convertResourceConfigToLinuxContainerResources(podResourcesWithoutOverhead),
}
}

View File

@ -27,6 +27,7 @@ import (
"reflect"
"strconv"
"testing"
"time"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -34,6 +35,7 @@ import (
"github.com/google/go-cmp/cmp"
libcontainercgroups "github.com/opencontainers/cgroups"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -1259,6 +1261,448 @@ func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) {
}
}
func TestGenerateUpdatePodSandboxResourcesRequest(t *testing.T) {
_, _, m, err := createTestRuntimeManager()
require.NoError(t, err)
podRequestCPU := resource.MustParse("400m")
podLimitCPU := resource.MustParse("800m")
podRequestMemory := resource.MustParse("128Mi")
podLimitMemory := resource.MustParse("256Mi")
podOverheadCPU := resource.MustParse("100m")
podOverheadMemory := resource.MustParse("64Mi")
enforceCPULimits := true
m.cpuCFSQuota = true
for _, tc := range []struct {
name string
qosClass v1.PodQOSClass
pod *v1.Pod
sandboxID string
enforceCPULimits bool
}{
// Best effort pod (no resources defined)
{
name: "Best effort",
qosClass: v1.PodQOSBestEffort,
enforceCPULimits: enforceCPULimits,
sandboxID: "1",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{},
},
},
},
Overhead: v1.ResourceList{},
},
},
},
{
name: "Best effort with overhead",
qosClass: v1.PodQOSBestEffort,
enforceCPULimits: enforceCPULimits,
sandboxID: "2",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
// Guaranteed pod
{
name: "Guaranteed",
qosClass: v1.PodQOSGuaranteed,
enforceCPULimits: enforceCPULimits,
sandboxID: "3",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
},
},
},
},
{
name: "Guaranteed with overhead",
qosClass: v1.PodQOSGuaranteed,
enforceCPULimits: enforceCPULimits,
sandboxID: "4",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
// Burstable pods that leave some resources unspecified (e.g. only CPU/Mem requests)
{
name: "Burstable only cpu",
qosClass: v1.PodQOSBurstable,
enforceCPULimits: enforceCPULimits,
sandboxID: "5",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podLimitCPU,
},
},
},
},
},
},
},
{
name: "Burstable only cpu with overhead",
qosClass: v1.PodQOSBurstable,
enforceCPULimits: enforceCPULimits,
sandboxID: "6",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podLimitCPU,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
},
},
},
},
{
name: "Burstable only memory",
qosClass: v1.PodQOSBurstable,
enforceCPULimits: enforceCPULimits,
sandboxID: "7",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceMemory: podLimitMemory,
},
},
},
},
},
},
},
{
name: "Burstable only memory with overhead",
qosClass: v1.PodQOSBurstable,
enforceCPULimits: enforceCPULimits,
sandboxID: "8",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceMemory: podLimitMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
// With init container
{
name: "Pod with init container",
qosClass: v1.PodQOSGuaranteed,
enforceCPULimits: enforceCPULimits,
sandboxID: "9",
pod: &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
},
},
},
},
{
name: "Pod with init container and overhead",
qosClass: v1.PodQOSGuaranteed,
enforceCPULimits: enforceCPULimits,
sandboxID: "10",
pod: &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
// With a sidecar container
{
name: "Pod with sidecar container",
qosClass: v1.PodQOSBurstable,
enforceCPULimits: enforceCPULimits,
sandboxID: "11",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podLimitCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
{
Name: "bar",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podLimitCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
},
},
},
},
{
name: "Pod with sidecar container and overhead",
qosClass: v1.PodQOSBurstable,
enforceCPULimits: enforceCPULimits,
sandboxID: "11",
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podLimitCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
{
Name: "bar",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: podRequestCPU,
v1.ResourceMemory: podRequestMemory,
},
Limits: v1.ResourceList{
v1.ResourceCPU: podLimitCPU,
v1.ResourceMemory: podLimitMemory,
},
},
},
},
Overhead: v1.ResourceList{
v1.ResourceCPU: podOverheadCPU,
v1.ResourceMemory: podOverheadMemory,
},
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
expectedLcr := m.calculateSandboxResources(tc.pod)
expectedLcrOverhead := m.convertOverheadToLinuxResources(tc.pod)
podResourcesCfg := cm.ResourceConfigForPod(tc.pod, tc.enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
assert.NotNil(t, podResourcesCfg, "podResourcesCfg is expected to be not nil")
if podResourcesCfg.CPUPeriod == nil {
expectedLcr.CpuPeriod = 0
}
updatePodSandboxRequest := m.generateUpdatePodSandboxResourcesRequest("123", tc.pod, podResourcesCfg)
assert.NotNil(t, updatePodSandboxRequest, "updatePodSandboxRequest is expected to be not nil")
assert.Equal(t, expectedLcr, updatePodSandboxRequest.Resources, "expectedLcr need to be equal then updatePodSandboxRequest.Resources")
assert.Equal(t, expectedLcrOverhead, updatePodSandboxRequest.Overhead, "expectedLcrOverhead need to be equal then updatePodSandboxRequest.Overhead")
})
}
}
func TestUpdatePodSandboxResources(t *testing.T) {
fakeRuntime, _, m, errCreate := createTestRuntimeManager()
require.NoError(t, errCreate)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
// Create fake sandbox and container
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
assert.Len(t, fakeContainers, 1)
ctx := context.Background()
_, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace)
require.NoError(t, err)
resourceConfig := &cm.ResourceConfig{}
err = m.updatePodSandboxResources(fakeSandbox.Id, pod, resourceConfig)
require.NoError(t, err)
// Verify sandbox is updated
assert.Contains(t, fakeRuntime.Called, "UpdatePodSandboxResources")
}
type CgroupVersion string
const (

View File

@ -969,39 +969,3 @@ func TestUpdateContainerResources(t *testing.T) {
// Verify container is updated
assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources")
}
// TestUpdatePodSandboxResources tests updating a podSandBox resources.
func TestUpdatePodSandboxResources(t *testing.T) {
fakeRuntime, _, m, errCreate := createTestRuntimeManager()
require.NoError(t, errCreate)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "busybox",
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
// Create fake sandbox and container
fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
assert.Len(t, fakeContainers, 1)
ctx := context.Background()
_, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace)
require.NoError(t, err)
err = m.updatePodSandboxResources(fakeSandbox.Id, pod)
require.NoError(t, err)
// Verify sandbox is updated
assert.Contains(t, fakeRuntime.Called, "UpdatePodSandboxResources")
}

View File

@ -22,6 +22,7 @@ package kuberuntime
import (
v1 "k8s.io/api/core/v1"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
@ -36,7 +37,7 @@ func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, cont
}
// generateUpdatePodSandboxResourcesRequest generates platform specific podsandox resources config for runtime
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod) *runtimeapi.UpdatePodSandboxResourcesRequest {
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) *runtimeapi.UpdatePodSandboxResourcesRequest {
return nil
}

View File

@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/winstats"
"k8s.io/kubernetes/pkg/securitycontext"
@ -48,10 +49,8 @@ func (m *kubeGenericRuntimeManager) generateContainerResources(pod *v1.Pod, cont
}
// generateUpdatePodSandboxResourcesRequest generates platform specific podsandox resources config for runtime
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod) *runtimeapi.UpdatePodSandboxResourcesRequest {
return &runtimeapi.UpdatePodSandboxResourcesRequest{
PodSandboxId: sandboxID,
}
func (m *kubeGenericRuntimeManager) generateUpdatePodSandboxResourcesRequest(sandboxID string, pod *v1.Pod, podResources *cm.ResourceConfig) *runtimeapi.UpdatePodSandboxResourcesRequest {
return nil
}
// generateWindowsContainerResources generates windows container resources config for runtime

View File

@ -675,36 +675,56 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
}
podResources := cm.ResourceConfigForPod(pod, enforceCPULimits, uint64((m.cpuCFSQuotaPeriod.Duration)/time.Microsecond), false)
if podResources == nil {
klog.ErrorS(nil, "Unable to get resource configuration", "pod", pod.Name)
klog.ErrorS(nil, "Unable to get resource configuration", "pod", klog.KObj(pod))
result.Fail(fmt.Errorf("unable to get resource configuration processing resize for pod %s", pod.Name))
return
}
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
if err != nil {
klog.ErrorS(nil, "Unable to get pod cgroup memory config", "pod", klog.KObj(pod))
result.Fail(fmt.Errorf("unable to get pod cgroup memory config for pod %s", pod.Name))
return
}
currentPodCPUConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
if err != nil {
klog.ErrorS(nil, "Unable to get pod cgroup cpu config", "pod", klog.KObj(pod))
result.Fail(fmt.Errorf("unable to get pod cgroup cpu config for pod %s", pod.Name))
return
}
currentPodResources := podResources
currentPodResources = mergeResourceConfig(currentPodResources, currentPodMemoryConfig)
currentPodResources = mergeResourceConfig(currentPodResources, currentPodCPUConfig)
setPodCgroupConfig := func(rName v1.ResourceName, setLimitValue bool) error {
var err error
resizedResources := &cm.ResourceConfig{}
switch rName {
case v1.ResourceCPU:
podCPUResources := &cm.ResourceConfig{}
if setLimitValue {
podCPUResources.CPUPeriod = podResources.CPUPeriod
podCPUResources.CPUQuota = podResources.CPUQuota
resizedResources.CPUPeriod = podResources.CPUPeriod
resizedResources.CPUQuota = podResources.CPUQuota
} else {
podCPUResources.CPUShares = podResources.CPUShares
resizedResources.CPUShares = podResources.CPUShares
}
err = pcm.SetPodCgroupConfig(pod, podCPUResources)
case v1.ResourceMemory:
if !setLimitValue {
// Memory requests aren't written to cgroups.
return nil
}
podMemoryResources := &cm.ResourceConfig{
Memory: podResources.Memory,
}
err = pcm.SetPodCgroupConfig(pod, podMemoryResources)
resizedResources.Memory = podResources.Memory
}
err = pcm.SetPodCgroupConfig(pod, resizedResources)
if err != nil {
klog.ErrorS(err, "Failed to set cgroup config", "resource", rName, "pod", pod.Name)
klog.ErrorS(err, "Failed to set cgroup config", "resource", rName, "pod", klog.KObj(pod))
return err
}
return err
currentPodResources = mergeResourceConfig(currentPodResources, resizedResources)
if err = m.updatePodSandboxResources(podContainerChanges.SandboxID, pod, currentPodResources); err != nil {
klog.ErrorS(err, "Failed to notify runtime for UpdatePodSandboxResources", "resource", rName, "pod", klog.KObj(pod))
// Don't propagate the error since the updatePodSandboxResources call is best-effort.
}
return nil
}
// Memory and CPU are updated separately because memory resizes may be ordered differently than CPU resizes.
// If resize results in net pod resource increase, set pod cgroup config before resizing containers.
@ -747,14 +767,7 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
// partially actuated.
defer m.runtimeHelper.SetPodWatchCondition(pod.UID, "doPodResizeAction", func(*kubecontainer.PodStatus) bool { return true })
anyResizeDone := false
if len(podContainerChanges.ContainersToUpdate[v1.ResourceMemory]) > 0 || podContainerChanges.UpdatePodResources {
currentPodMemoryConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceMemory)
if err != nil {
klog.ErrorS(err, "GetPodCgroupConfig for memory failed", "pod", pod.Name)
result.Fail(err)
return
}
if podResources.Memory != nil {
currentPodMemoryUsage, err := pcm.GetPodCgroupMemoryUsage(pod)
if err != nil {
@ -776,7 +789,6 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
result.Fail(errResize)
return
}
anyResizeDone = true
}
if len(podContainerChanges.ContainersToUpdate[v1.ResourceCPU]) > 0 || podContainerChanges.UpdatePodResources {
if podResources.CPUShares == nil {
@ -785,31 +797,18 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC
result.Fail(fmt.Errorf("podResources.CPUShares is nil for pod %s", pod.Name))
return
}
currentPodCpuConfig, err := pcm.GetPodCgroupConfig(pod, v1.ResourceCPU)
if err != nil {
klog.ErrorS(err, "GetPodCgroupConfig for CPU failed", "pod", pod.Name)
result.Fail(err)
return
}
// Default pod CPUQuota to the current CPUQuota if no limit is set to prevent the pod limit
// from updating.
// TODO(#128675): This does not support removing limits.
if podResources.CPUQuota == nil {
podResources.CPUQuota = currentPodCpuConfig.CPUQuota
podResources.CPUQuota = currentPodCPUConfig.CPUQuota
}
if errResize := resizeContainers(v1.ResourceCPU, *currentPodCpuConfig.CPUQuota, *podResources.CPUQuota,
int64(*currentPodCpuConfig.CPUShares), int64(*podResources.CPUShares)); errResize != nil {
if errResize := resizeContainers(v1.ResourceCPU, *currentPodCPUConfig.CPUQuota, *podResources.CPUQuota,
int64(*currentPodCPUConfig.CPUShares), int64(*podResources.CPUShares)); errResize != nil {
result.Fail(errResize)
return
}
anyResizeDone = true
}
if anyResizeDone {
if errUpdate := m.updatePodSandboxResources(podContainerChanges.SandboxID, pod); errUpdate != nil {
klog.ErrorS(errUpdate, "updatePodSandboxResources failed", "pod", pod.Name)
}
}
}