diff --git a/pkg/api/pod/util.go b/pkg/api/pod/util.go index 8aa1fb47ed6..93d0076f279 100644 --- a/pkg/api/pod/util.go +++ b/pkg/api/pod/util.go @@ -1314,7 +1314,7 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) { continue } newPod.Status.Resize = api.PodResizeStatusProposed - break + return } } } diff --git a/pkg/apis/core/v1/defaults.go b/pkg/apis/core/v1/defaults.go index 3f0777cb462..0241dbc2a66 100644 --- a/pkg/apis/core/v1/defaults.go +++ b/pkg/apis/core/v1/defaults.go @@ -245,11 +245,20 @@ func setDefaultResizePolicy(obj *v1.Container) { }) } } - if _, exists := obj.Resources.Requests[v1.ResourceCPU]; exists { - defaultResizePolicy(v1.ResourceCPU) + if !resizePolicySpecified[v1.ResourceCPU] { + if _, exists := obj.Resources.Requests[v1.ResourceCPU]; exists { + defaultResizePolicy(v1.ResourceCPU) + } else if _, exists := obj.Resources.Limits[v1.ResourceCPU]; exists { + defaultResizePolicy(v1.ResourceCPU) + } } - if _, exists := obj.Resources.Requests[v1.ResourceMemory]; exists { - defaultResizePolicy(v1.ResourceMemory) + + if !resizePolicySpecified[v1.ResourceMemory] { + if _, exists := obj.Resources.Requests[v1.ResourceMemory]; exists { + defaultResizePolicy(v1.ResourceMemory) + } else if _, exists := obj.Resources.Limits[v1.ResourceMemory]; exists { + defaultResizePolicy(v1.ResourceMemory) + } } } diff --git a/pkg/apis/core/v1/defaults_test.go b/pkg/apis/core/v1/defaults_test.go index fa39fcf489d..df77211af1f 100644 --- a/pkg/apis/core/v1/defaults_test.go +++ b/pkg/apis/core/v1/defaults_test.go @@ -3195,13 +3195,12 @@ func TestSetDefaultResizePolicy(t *testing.T) { } output := roundTrip(t, runtime.Object(&testPod)) pod2 := output.(*v1.Pod) - if !cmp.Equal(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) { - t.Errorf("expected resize policy %+v, but got %+v for normal containers", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy) - } if isSidecarContainer { if !cmp.Equal(pod2.Spec.InitContainers[0].ResizePolicy, tc.expectedResizePolicy) { t.Errorf("expected resize policy %+v, but got %+v for restartable init containers", tc.expectedResizePolicy, pod2.Spec.InitContainers[0].ResizePolicy) } + } else if !cmp.Equal(pod2.Spec.Containers[0].ResizePolicy, tc.expectedResizePolicy) { + t.Errorf("expected resize policy %+v, but got %+v for normal containers", tc.expectedResizePolicy, pod2.Spec.Containers[0].ResizePolicy) } }) } diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index d51abf3c0e9..fb6d94f05e3 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -5289,7 +5289,6 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel for ix, container := range mungedPodSpec.Containers { container.Image = oldPod.Spec.Containers[ix].Image // +k8s:verify-mutation:reason=clone newContainers = append(newContainers, container) - } mungedPodSpec.Containers = newContainers // munge spec.initContainers[*].image @@ -5644,10 +5643,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel originalCPUMemPodSpec := *newPod.Spec.DeepCopy() var newContainers []core.Container for ix, container := range originalCPUMemPodSpec.Containers { - lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPod.Spec.Containers[ix].Resources.Limits) - req := dropCPUMemoryUpdates(container.Resources.Requests, oldPod.Spec.Containers[ix].Resources.Requests) - container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} - container.ResizePolicy = oldPod.Spec.Containers[ix].ResizePolicy // +k8s:verify-mutation:reason=clone + dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.Containers[ix]) newContainers = append(newContainers, container) } originalCPUMemPodSpec.Containers = newContainers @@ -5657,10 +5653,7 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) { for ix, container := range originalCPUMemPodSpec.InitContainers { if container.RestartPolicy != nil && *container.RestartPolicy == core.ContainerRestartPolicyAlways { // restartable init container - lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPod.Spec.InitContainers[ix].Resources.Limits) - req := dropCPUMemoryUpdates(container.Resources.Requests, oldPod.Spec.InitContainers[ix].Resources.Requests) - container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} - container.ResizePolicy = oldPod.Spec.InitContainers[ix].ResizePolicy // +k8s:verify-mutation:reason=clone + dropCPUMemoryResourcesFromContainer(&container, &oldPod.Spec.InitContainers[ix]) } newInitContainers = append(newInitContainers, container) } @@ -5683,25 +5676,32 @@ func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel return allErrs } -func dropCPUMemoryUpdates(resourceList, oldResourceList core.ResourceList) core.ResourceList { - if oldResourceList == nil { - return nil +// dropCPUMemoryResourcesFromContainer deletes the cpu and memory resources from the container, and copies them from the old pod container resources if present. +func dropCPUMemoryResourcesFromContainer(container *core.Container, oldPodSpecContainer *core.Container) { + dropCPUMemoryUpdates := func(resourceList, oldResourceList core.ResourceList) core.ResourceList { + if oldResourceList == nil { + return nil + } + var mungedResourceList core.ResourceList + if resourceList == nil { + mungedResourceList = make(core.ResourceList) + } else { + mungedResourceList = resourceList.DeepCopy() + } + delete(mungedResourceList, core.ResourceCPU) + delete(mungedResourceList, core.ResourceMemory) + if cpu, found := oldResourceList[core.ResourceCPU]; found { + mungedResourceList[core.ResourceCPU] = cpu + } + if mem, found := oldResourceList[core.ResourceMemory]; found { + mungedResourceList[core.ResourceMemory] = mem + } + return mungedResourceList } - var mungedResourceList core.ResourceList - if resourceList == nil { - mungedResourceList = make(core.ResourceList) - } else { - mungedResourceList = resourceList.DeepCopy() - } - delete(mungedResourceList, core.ResourceCPU) - delete(mungedResourceList, core.ResourceMemory) - if cpu, found := oldResourceList[core.ResourceCPU]; found { - mungedResourceList[core.ResourceCPU] = cpu - } - if mem, found := oldResourceList[core.ResourceMemory]; found { - mungedResourceList[core.ResourceMemory] = mem - } - return mungedResourceList + lim := dropCPUMemoryUpdates(container.Resources.Limits, oldPodSpecContainer.Resources.Limits) + req := dropCPUMemoryUpdates(container.Resources.Requests, oldPodSpecContainer.Resources.Requests) + container.Resources = core.ResourceRequirements{Limits: lim, Requests: req} + container.ResizePolicy = oldPodSpecContainer.ResizePolicy // +k8s:verify-mutation:reason=clone } // isPodResizeRequestSupported checks whether the pod is running on a node with InPlacePodVerticalScaling enabled. @@ -8630,3 +8630,11 @@ func validateImageVolumeSource(imageVolume *core.ImageVolumeSource, fldPath *fie allErrs = append(allErrs, validatePullPolicy(imageVolume.PullPolicy, fldPath.Child("pullPolicy"))...) return allErrs } + +// isRestartableInitContainer returns true if the container has ContainerRestartPolicyAlways. +func isRestartableInitContainer(initContainer *core.Container) bool { + if initContainer == nil || initContainer.RestartPolicy == nil { + return false + } + return *initContainer.RestartPolicy == core.ContainerRestartPolicyAlways +} diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 30c68a56635..4791534f98b 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -26053,12 +26053,6 @@ func TestValidatePodResize(t *testing.T) { test.new.Namespace = "namespace" test.old.Namespace = "namespace" } - if test.isSideCarCtr { - if test.new.Spec.InitContainers == nil && test.old.Spec.InitContainers == nil { - test.new.Spec.InitContainers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}} - test.old.Spec.InitContainers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}} - } - } if test.new.Spec.Containers == nil && test.old.Spec.Containers == nil { test.new.Spec.Containers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}} test.old.Spec.Containers = []core.Container{{Name: "autoadded", Image: "image", TerminationMessagePolicy: "File", ImagePullPolicy: "Always"}} diff --git a/pkg/kubelet/apis/podresources/server_v1.go b/pkg/kubelet/apis/podresources/server_v1.go index 1ed7bf1aa08..7d10449ad18 100644 --- a/pkg/kubelet/apis/podresources/server_v1.go +++ b/pkg/kubelet/apis/podresources/server_v1.go @@ -27,7 +27,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/metrics" podresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" ) // v1PodResourcesServer implements PodResourcesListerServer diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index a9850b73781..9364cbef5cf 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1780,6 +1780,7 @@ func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontaine return true } +// allocatedContainerResourcesMatchStatus returns true if the container resources matches with the container statuses resources. func allocatedContainerResourcesMatchStatus(allocatedPod *v1.Pod, c *v1.Container, podStatus *kubecontainer.PodStatus) bool { if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil { if cs.State != kubecontainer.ContainerStateRunning { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index b823a2230e4..2b2032055e8 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -51,7 +51,6 @@ import ( remote "k8s.io/cri-client/pkg" kubelettypes "k8s.io/kubelet/pkg/types" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/features" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/events" @@ -1072,14 +1071,12 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod // If the container is previously initialized but its status is not // found, it means its last status is removed for some reason. // Restart it if it is a restartable init container. - if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) { if isPreviouslyInitialized && podutil.IsRestartableInitContainer(container) { changes.InitContainersToStart = append(changes.InitContainersToStart, i) } continue } - if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) { if isPreviouslyInitialized && !podutil.IsRestartableInitContainer(container) { // after initialization, only restartable init containers need to be kept // running @@ -1096,7 +1093,6 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod changes.InitContainersToStart = append(changes.InitContainersToStart, i) case kubecontainer.ContainerStateRunning: - if !podutil.IsRestartableInitContainer(container) { if !podutil.IsRestartableInitContainer(container) { break } @@ -1173,7 +1169,6 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod // If the init container failed and the restart policy is Never, the pod is terminal. // Otherwise, restart the init container. case kubecontainer.ContainerStateExited: - if podutil.IsRestartableInitContainer(container) { if podutil.IsRestartableInitContainer(container) { changes.InitContainersToStart = append(changes.InitContainersToStart, i) } else { // init container @@ -1197,7 +1192,6 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod } default: // kubecontainer.ContainerStatusUnknown or other unknown states - if podutil.IsRestartableInitContainer(container) { if podutil.IsRestartableInitContainer(container) { // If the restartable init container is in unknown state, restart it. changes.ContainersToKill[status.ID] = containerToKillInfo{ diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 87e1553daf6..c84b649fac2 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -750,9 +750,12 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podContainerC } } if len(podContainerChanges.InitContainersToUpdate[rName]) > 0 { - if err = m.updatePodContainersResources(pod, rName, podContainerChanges.InitContainersToUpdate[rName], true); err != nil { - klog.ErrorS(err, "updatePodContainersResources failed for init containers", "pod", format.Pod(pod), "resource", rName) - return err + updateContainerResults, errUpdate := m.updatePodContainerResources(ctx, pod, rName, podContainerChanges.InitContainersToUpdate[rName], true) + for _, containerResult := range updateContainerResults { + result.AddSyncResult(containerResult) + } + if errUpdate != nil { + return errUpdate } } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 47b83019fab..c199e2ae161 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -2821,9 +2821,11 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) { apiSpecResources []v1.ResourceRequirements apiStatusResources []v1.ResourceRequirements requiresRestart []bool + injectedError error invokeUpdateResources bool expectedCurrentLimits []v1.ResourceList expectedCurrentRequests []v1.ResourceList + expectedResults []*kubecontainer.SyncResult }{ "Guaranteed QoS Pod - CPU & memory resize requested, update CPU": { resourceName: v1.ResourceCPU, @@ -2841,6 +2843,20 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) { invokeUpdateResources: true, expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi}, expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi}, + expectedResults: []*kubecontainer.SyncResult{ + { + Action: kubecontainer.UpdateContainerCPU, + Target: pod.Spec.InitContainers[0].Name, + }, + { + Action: kubecontainer.UpdateContainerCPU, + Target: pod.Spec.InitContainers[1].Name, + }, + { + Action: kubecontainer.UpdateContainerCPU, + Target: pod.Spec.InitContainers[2].Name, + }, + }, }, "Guaranteed QoS Pod - CPU & memory resize requested, update memory": { resourceName: v1.ResourceMemory, @@ -2858,12 +2874,27 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) { invokeUpdateResources: true, expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, + expectedResults: []*kubecontainer.SyncResult{ + { + Action: kubecontainer.UpdateContainerMemory, + Target: pod.Spec.InitContainers[0].Name, + }, + { + Action: kubecontainer.UpdateContainerMemory, + Target: pod.Spec.InitContainers[1].Name, + }, + { + Action: kubecontainer.UpdateContainerMemory, + Target: pod.Spec.InitContainers[2].Name, + }, + }, }, } { var initContainersToUpdate []containerToUpdateInfo for idx := range pod.Spec.InitContainers { // default resize policy when pod resize feature is enabled pod.Spec.InitContainers[idx].Resources = tc.apiSpecResources[idx] + pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx] cInfo := containerToUpdateInfo{ apiContainerIdx: idx, kubeContainerID: kubecontainer.ContainerID{}, @@ -2883,8 +2914,14 @@ func TestUpdatePodRestartableInitContainerResources(t *testing.T) { initContainersToUpdate = append(initContainersToUpdate, cInfo) } fakeRuntime.Called = []string{} - err := m.updatePodContainersResources(pod, tc.resourceName, initContainersToUpdate, true) - require.NoError(t, err, dsc) + + updateContainerResults, err := m.updatePodContainerResources(context.TODO(), pod, tc.resourceName, initContainersToUpdate, true) + assert.ElementsMatch(t, tc.expectedResults, updateContainerResults) + if tc.injectedError == nil { + require.NoError(t, err, dsc) + } else { + require.EqualError(t, err, tc.injectedError.Error(), dsc) + } if tc.invokeUpdateResources { assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc) diff --git a/pkg/registry/core/pod/strategy.go b/pkg/registry/core/pod/strategy.go index 76a78c0c369..9c9cd8d82be 100644 --- a/pkg/registry/core/pod/strategy.go +++ b/pkg/registry/core/pod/strategy.go @@ -323,7 +323,6 @@ func dropNonResizeUpdates(newPod, oldPod *api.Pod) *api.Pod { pod.Spec.InitContainers[idx].ResizePolicy = ctr.ResizePolicy } } - return pod }